index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
55,399 |
distrax._src.distributions.beta
|
variance
|
Calculates the variance.
|
def variance(self) -> Array:
"""Calculates the variance."""
sum_alpha_beta = self._alpha + self._beta
return self._alpha * self._beta / (
jnp.square(sum_alpha_beta) * (sum_alpha_beta + 1.))
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,400 |
distrax._src.bijectors.bijector
|
Bijector
|
Differentiable bijection that knows to compute its Jacobian determinant.
A bijector implements a differentiable and bijective transformation `f`, whose
inverse is also differentiable (`f` is called a "diffeomorphism"). A bijector
can be used to transform a continuous random variable `X` to a continuous
random variable `Y = f(X)` in the context of `TransformedDistribution`.
Typically, a bijector subclass will implement the following methods:
- `forward_and_log_det(x)` (required)
- `inverse_and_log_det(y)` (optional)
The remaining methods are defined in terms of the above by default.
Subclass requirements:
- Subclasses must ensure that `f` is differentiable and bijective, and that
their methods correctly implement `f^{-1}`, `J(f)` and `J(f^{-1})`. Distrax
will assume these properties hold, and will make no attempt to verify them.
- Distrax assumes that `f` acts on array-valued variables called "events", and
that the bijector operates on batched events. Specifically, Distrax assumes
the following:
* `f` acts on events of shape [M1, ..., Mn] and returns events of shape
[L1, ..., Lq]. `n` is referred to as `event_ndims_in`, and `q` as
`event_ndims_out`. `event_ndims_in` and `event_ndims_out` must be static
properties of the bijector, and must be known to it at construction time.
* The bijector acts on batched events of shape [N1, ..., Nk, M1, ..., Mn],
where [N1, ..., Nk] are batch dimensions, and returns batched events of
shape [K1, ..., Kp, L1, ..., Lq], where [K1, ..., Kp] are (possibly
different) batch dimensions. Distrax requires that bijectors always
broadcast against batched events, that is, that they apply `f` identically
to each event. Distrax also allows for events to broadcast against batched
bijectors, meaning that multiple instantiations of `f` are applied to the
same event, although this is not a subclass requirement.
|
class Bijector(jittable.Jittable, metaclass=abc.ABCMeta):
"""Differentiable bijection that knows to compute its Jacobian determinant.
A bijector implements a differentiable and bijective transformation `f`, whose
inverse is also differentiable (`f` is called a "diffeomorphism"). A bijector
can be used to transform a continuous random variable `X` to a continuous
random variable `Y = f(X)` in the context of `TransformedDistribution`.
Typically, a bijector subclass will implement the following methods:
- `forward_and_log_det(x)` (required)
- `inverse_and_log_det(y)` (optional)
The remaining methods are defined in terms of the above by default.
Subclass requirements:
- Subclasses must ensure that `f` is differentiable and bijective, and that
their methods correctly implement `f^{-1}`, `J(f)` and `J(f^{-1})`. Distrax
will assume these properties hold, and will make no attempt to verify them.
- Distrax assumes that `f` acts on array-valued variables called "events", and
that the bijector operates on batched events. Specifically, Distrax assumes
the following:
* `f` acts on events of shape [M1, ..., Mn] and returns events of shape
[L1, ..., Lq]. `n` is referred to as `event_ndims_in`, and `q` as
`event_ndims_out`. `event_ndims_in` and `event_ndims_out` must be static
properties of the bijector, and must be known to it at construction time.
* The bijector acts on batched events of shape [N1, ..., Nk, M1, ..., Mn],
where [N1, ..., Nk] are batch dimensions, and returns batched events of
shape [K1, ..., Kp, L1, ..., Lq], where [K1, ..., Kp] are (possibly
different) batch dimensions. Distrax requires that bijectors always
broadcast against batched events, that is, that they apply `f` identically
to each event. Distrax also allows for events to broadcast against batched
bijectors, meaning that multiple instantiations of `f` are applied to the
same event, although this is not a subclass requirement.
"""
def __init__(self,
event_ndims_in: int,
event_ndims_out: Optional[int] = None,
is_constant_jacobian: bool = False,
is_constant_log_det: Optional[bool] = None):
"""Initializes a Bijector.
Args:
event_ndims_in: Number of input event dimensions. The bijector acts on
events of shape [M1, ..., Mn], where `n == event_ndims_in`.
event_ndims_out: Number of output event dimensions. The bijector returns
events of shape [L1, ..., Lq], where `q == event_ndims_out`. If None, it
defaults to `event_ndims_in`.
is_constant_jacobian: Whether the Jacobian is promised to be constant
(which is the case if and only if the bijector is affine). A value of
False will be interpreted as "we don't know whether the Jacobian is
constant", rather than "the Jacobian is definitely not constant". Only
set to True if you're absolutely sure the Jacobian is constant; if
you're not sure, set to False.
is_constant_log_det: Whether the Jacobian determinant is promised to be
constant (which is the case for, e.g., volume-preserving bijectors). If
None, it defaults to `is_constant_jacobian`. Note that the Jacobian
determinant can be constant without the Jacobian itself being constant.
Only set to True if you're absoltely sure the Jacobian determinant is
constant; if you're not sure, set to None.
"""
if event_ndims_out is None:
event_ndims_out = event_ndims_in
if event_ndims_in < 0:
raise ValueError(
f"`event_ndims_in` can't be negative. Got {event_ndims_in}.")
if event_ndims_out < 0:
raise ValueError(
f"`event_ndims_out` can't be negative. Got {event_ndims_out}.")
if is_constant_log_det is None:
is_constant_log_det = is_constant_jacobian
if is_constant_jacobian and not is_constant_log_det:
raise ValueError("The Jacobian is said to be constant, but its "
"determinant is said not to be, which is impossible.")
self._event_ndims_in = event_ndims_in
self._event_ndims_out = event_ndims_out
self._is_constant_jacobian = is_constant_jacobian
self._is_constant_log_det = is_constant_log_det
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
y, _ = self.forward_and_log_det(x)
return y
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
x, _ = self.inverse_and_log_det(y)
return x
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
_, logdet = self.forward_and_log_det(x)
return logdet
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
_, logdet = self.inverse_and_log_det(y)
return logdet
@abc.abstractmethod
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
raise NotImplementedError(
f"Bijector {self.name} does not implement `inverse_and_log_det`.")
@property
def event_ndims_in(self) -> int:
"""Number of input event dimensions."""
return self._event_ndims_in
@property
def event_ndims_out(self) -> int:
"""Number of output event dimensions."""
return self._event_ndims_out
@property
def is_constant_jacobian(self) -> bool:
"""Whether the Jacobian is promised to be constant."""
return self._is_constant_jacobian
@property
def is_constant_log_det(self) -> bool:
"""Whether the Jacobian determinant is promised to be constant."""
return self._is_constant_log_det
@property
def name(self) -> str:
"""Name of the bijector."""
return self.__class__.__name__
def same_as(self, other: "Bijector") -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
del other
return False
def _check_forward_input_shape(self, x: Array) -> None:
"""Checks that the input `x` to a forward method has valid shape."""
x_ndims = len(jnp.shape(x))
if x_ndims < self.event_ndims_in:
raise ValueError(
f"Bijector {self.name} has `event_ndims_in=={self.event_ndims_in}`,"
f" but the input has only {x_ndims} array dimensions.")
def _check_inverse_input_shape(self, y: Array) -> None:
"""Checks that the input `y` to an inverse method has valid shape."""
y_ndims = len(jnp.shape(y))
if y_ndims < self.event_ndims_out:
raise ValueError(
f"Bijector {self.name} has `event_ndims_out=={self.event_ndims_out}`,"
f" but the input has only {y_ndims} array dimensions.")
|
(event_ndims_in: int, event_ndims_out: Optional[int] = None, is_constant_jacobian: bool = False, is_constant_log_det: Optional[bool] = None)
|
55,401 |
distrax._src.bijectors.bijector
|
__init__
|
Initializes a Bijector.
Args:
event_ndims_in: Number of input event dimensions. The bijector acts on
events of shape [M1, ..., Mn], where `n == event_ndims_in`.
event_ndims_out: Number of output event dimensions. The bijector returns
events of shape [L1, ..., Lq], where `q == event_ndims_out`. If None, it
defaults to `event_ndims_in`.
is_constant_jacobian: Whether the Jacobian is promised to be constant
(which is the case if and only if the bijector is affine). A value of
False will be interpreted as "we don't know whether the Jacobian is
constant", rather than "the Jacobian is definitely not constant". Only
set to True if you're absolutely sure the Jacobian is constant; if
you're not sure, set to False.
is_constant_log_det: Whether the Jacobian determinant is promised to be
constant (which is the case for, e.g., volume-preserving bijectors). If
None, it defaults to `is_constant_jacobian`. Note that the Jacobian
determinant can be constant without the Jacobian itself being constant.
Only set to True if you're absoltely sure the Jacobian determinant is
constant; if you're not sure, set to None.
|
def __init__(self,
event_ndims_in: int,
event_ndims_out: Optional[int] = None,
is_constant_jacobian: bool = False,
is_constant_log_det: Optional[bool] = None):
"""Initializes a Bijector.
Args:
event_ndims_in: Number of input event dimensions. The bijector acts on
events of shape [M1, ..., Mn], where `n == event_ndims_in`.
event_ndims_out: Number of output event dimensions. The bijector returns
events of shape [L1, ..., Lq], where `q == event_ndims_out`. If None, it
defaults to `event_ndims_in`.
is_constant_jacobian: Whether the Jacobian is promised to be constant
(which is the case if and only if the bijector is affine). A value of
False will be interpreted as "we don't know whether the Jacobian is
constant", rather than "the Jacobian is definitely not constant". Only
set to True if you're absolutely sure the Jacobian is constant; if
you're not sure, set to False.
is_constant_log_det: Whether the Jacobian determinant is promised to be
constant (which is the case for, e.g., volume-preserving bijectors). If
None, it defaults to `is_constant_jacobian`. Note that the Jacobian
determinant can be constant without the Jacobian itself being constant.
Only set to True if you're absoltely sure the Jacobian determinant is
constant; if you're not sure, set to None.
"""
if event_ndims_out is None:
event_ndims_out = event_ndims_in
if event_ndims_in < 0:
raise ValueError(
f"`event_ndims_in` can't be negative. Got {event_ndims_in}.")
if event_ndims_out < 0:
raise ValueError(
f"`event_ndims_out` can't be negative. Got {event_ndims_out}.")
if is_constant_log_det is None:
is_constant_log_det = is_constant_jacobian
if is_constant_jacobian and not is_constant_log_det:
raise ValueError("The Jacobian is said to be constant, but its "
"determinant is said not to be, which is impossible.")
self._event_ndims_in = event_ndims_in
self._event_ndims_out = event_ndims_out
self._is_constant_jacobian = is_constant_jacobian
self._is_constant_log_det = is_constant_log_det
|
(self, event_ndims_in: int, event_ndims_out: Optional[int] = None, is_constant_jacobian: bool = False, is_constant_log_det: Optional[bool] = None)
|
55,403 |
distrax._src.bijectors.bijector
|
_check_forward_input_shape
|
Checks that the input `x` to a forward method has valid shape.
|
def _check_forward_input_shape(self, x: Array) -> None:
"""Checks that the input `x` to a forward method has valid shape."""
x_ndims = len(jnp.shape(x))
if x_ndims < self.event_ndims_in:
raise ValueError(
f"Bijector {self.name} has `event_ndims_in=={self.event_ndims_in}`,"
f" but the input has only {x_ndims} array dimensions.")
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> NoneType
|
55,404 |
distrax._src.bijectors.bijector
|
_check_inverse_input_shape
|
Checks that the input `y` to an inverse method has valid shape.
|
def _check_inverse_input_shape(self, y: Array) -> None:
"""Checks that the input `y` to an inverse method has valid shape."""
y_ndims = len(jnp.shape(y))
if y_ndims < self.event_ndims_out:
raise ValueError(
f"Bijector {self.name} has `event_ndims_out=={self.event_ndims_out}`,"
f" but the input has only {y_ndims} array dimensions.")
|
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> NoneType
|
55,405 |
distrax._src.bijectors.bijector
|
forward
|
Computes y = f(x).
|
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
y, _ = self.forward_and_log_det(x)
return y
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,406 |
distrax._src.bijectors.bijector
|
forward_and_log_det
|
Computes y = f(x) and log|det J(f)(x)|.
|
@abc.abstractmethod
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,407 |
distrax._src.bijectors.bijector
|
forward_log_det_jacobian
|
Computes log|det J(f)(x)|.
|
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
_, logdet = self.forward_and_log_det(x)
return logdet
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,408 |
distrax._src.bijectors.bijector
|
inverse
|
Computes x = f^{-1}(y).
|
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
x, _ = self.inverse_and_log_det(y)
return x
|
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,409 |
distrax._src.bijectors.bijector
|
inverse_and_log_det
|
Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.
|
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
raise NotImplementedError(
f"Bijector {self.name} does not implement `inverse_and_log_det`.")
|
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,410 |
distrax._src.bijectors.bijector
|
inverse_log_det_jacobian
|
Computes log|det J(f^{-1})(y)|.
|
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
_, logdet = self.inverse_and_log_det(y)
return logdet
|
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,411 |
distrax._src.bijectors.bijector
|
same_as
|
Returns True if this bijector is guaranteed to be the same as `other`.
|
def same_as(self, other: "Bijector") -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
del other
return False
|
(self, other: distrax._src.bijectors.bijector.Bijector) -> bool
|
55,413 |
distrax._src.bijectors.block
|
Block
|
A wrapper that promotes a bijector to a block bijector.
A block bijector applies a bijector to a k-dimensional array of events, but
considers that array of events to be a single event. In practical terms, this
means that the log det Jacobian will be summed over its last k dimensions.
For example, consider a scalar bijector (such as `Tanh`) that operates on
scalar events. We may want to apply this bijector identically to a 4D array of
shape [N, H, W, C] representing a sequence of N images. Doing so naively will
produce a log det Jacobian of shape [N, H, W, C], because the scalar bijector
will assume scalar events and so all 4 dimensions will be considered as batch.
To promote the scalar bijector to a "block scalar" that operates on the 3D
arrays can be done by `Block(bijector, ndims=3)`. Then, applying the block
bijector will produce a log det Jacobian of shape [N] as desired.
In general, suppose `bijector` operates on n-dimensional events. Then,
`Block(bijector, k)` will promote `bijector` to a block bijector that
operates on (k + n)-dimensional events, summing the log det Jacobian over its
last k dimensions. In practice, this means that the last k batch dimensions
will be turned into event dimensions.
|
class Block(base.Bijector):
"""A wrapper that promotes a bijector to a block bijector.
A block bijector applies a bijector to a k-dimensional array of events, but
considers that array of events to be a single event. In practical terms, this
means that the log det Jacobian will be summed over its last k dimensions.
For example, consider a scalar bijector (such as `Tanh`) that operates on
scalar events. We may want to apply this bijector identically to a 4D array of
shape [N, H, W, C] representing a sequence of N images. Doing so naively will
produce a log det Jacobian of shape [N, H, W, C], because the scalar bijector
will assume scalar events and so all 4 dimensions will be considered as batch.
To promote the scalar bijector to a "block scalar" that operates on the 3D
arrays can be done by `Block(bijector, ndims=3)`. Then, applying the block
bijector will produce a log det Jacobian of shape [N] as desired.
In general, suppose `bijector` operates on n-dimensional events. Then,
`Block(bijector, k)` will promote `bijector` to a block bijector that
operates on (k + n)-dimensional events, summing the log det Jacobian over its
last k dimensions. In practice, this means that the last k batch dimensions
will be turned into event dimensions.
"""
def __init__(self, bijector: BijectorLike, ndims: int):
"""Initializes a Block.
Args:
bijector: the bijector to be promoted to a block bijector. It can be a
distrax bijector, a TFP bijector, or a callable to be wrapped by
`Lambda`.
ndims: number of batch dimensions to promote to event dimensions.
"""
if ndims < 0:
raise ValueError(f"`ndims` must be non-negative; got {ndims}.")
self._bijector = conversion.as_bijector(bijector)
self._ndims = ndims
super().__init__(
event_ndims_in=ndims + self._bijector.event_ndims_in,
event_ndims_out=ndims + self._bijector.event_ndims_out,
is_constant_jacobian=self._bijector.is_constant_jacobian,
is_constant_log_det=self._bijector.is_constant_log_det)
@property
def bijector(self) -> BijectorT:
"""The base bijector, without promoting to a block bijector."""
return self._bijector
@property
def ndims(self) -> int:
"""The number of batch dimensions promoted to event dimensions."""
return self._ndims
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
self._check_forward_input_shape(x)
return self._bijector.forward(x)
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
self._check_inverse_input_shape(y)
return self._bijector.inverse(y)
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
self._check_forward_input_shape(x)
log_det = self._bijector.forward_log_det_jacobian(x)
return math.sum_last(log_det, self._ndims)
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
self._check_inverse_input_shape(y)
log_det = self._bijector.inverse_log_det_jacobian(y)
return math.sum_last(log_det, self._ndims)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
self._check_forward_input_shape(x)
y, log_det = self._bijector.forward_and_log_det(x)
return y, math.sum_last(log_det, self._ndims)
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
self._check_inverse_input_shape(y)
x, log_det = self._bijector.inverse_and_log_det(y)
return x, math.sum_last(log_det, self._ndims)
@property
def name(self) -> str:
"""Name of the bijector."""
return self.__class__.__name__ + self._bijector.name
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is Block: # pylint: disable=unidiomatic-typecheck
return self.bijector.same_as(other.bijector) and self.ndims == other.ndims
return False
|
(bijector: Union[distrax._src.bijectors.bijector.Bijector, tensorflow_probability.substrates.jax.bijectors.bijector.Bijector, Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]], ndims: int)
|
55,414 |
distrax._src.bijectors.block
|
__init__
|
Initializes a Block.
Args:
bijector: the bijector to be promoted to a block bijector. It can be a
distrax bijector, a TFP bijector, or a callable to be wrapped by
`Lambda`.
ndims: number of batch dimensions to promote to event dimensions.
|
def __init__(self, bijector: BijectorLike, ndims: int):
"""Initializes a Block.
Args:
bijector: the bijector to be promoted to a block bijector. It can be a
distrax bijector, a TFP bijector, or a callable to be wrapped by
`Lambda`.
ndims: number of batch dimensions to promote to event dimensions.
"""
if ndims < 0:
raise ValueError(f"`ndims` must be non-negative; got {ndims}.")
self._bijector = conversion.as_bijector(bijector)
self._ndims = ndims
super().__init__(
event_ndims_in=ndims + self._bijector.event_ndims_in,
event_ndims_out=ndims + self._bijector.event_ndims_out,
is_constant_jacobian=self._bijector.is_constant_jacobian,
is_constant_log_det=self._bijector.is_constant_log_det)
|
(self, bijector: Union[distrax._src.bijectors.bijector.Bijector, tensorflow_probability.substrates.jax.bijectors.bijector.Bijector, Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]], ndims: int)
|
55,418 |
distrax._src.bijectors.block
|
forward
|
Computes y = f(x).
|
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
self._check_forward_input_shape(x)
return self._bijector.forward(x)
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,419 |
distrax._src.bijectors.block
|
forward_and_log_det
|
Computes y = f(x) and log|det J(f)(x)|.
|
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
self._check_forward_input_shape(x)
y, log_det = self._bijector.forward_and_log_det(x)
return y, math.sum_last(log_det, self._ndims)
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,420 |
distrax._src.bijectors.block
|
forward_log_det_jacobian
|
Computes log|det J(f)(x)|.
|
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
self._check_forward_input_shape(x)
log_det = self._bijector.forward_log_det_jacobian(x)
return math.sum_last(log_det, self._ndims)
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,421 |
distrax._src.bijectors.block
|
inverse
|
Computes x = f^{-1}(y).
|
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
self._check_inverse_input_shape(y)
return self._bijector.inverse(y)
|
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,422 |
distrax._src.bijectors.block
|
inverse_and_log_det
|
Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.
|
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
self._check_inverse_input_shape(y)
x, log_det = self._bijector.inverse_and_log_det(y)
return x, math.sum_last(log_det, self._ndims)
|
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,423 |
distrax._src.bijectors.block
|
inverse_log_det_jacobian
|
Computes log|det J(f^{-1})(y)|.
|
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
self._check_inverse_input_shape(y)
log_det = self._bijector.inverse_log_det_jacobian(y)
return math.sum_last(log_det, self._ndims)
|
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,424 |
distrax._src.bijectors.block
|
same_as
|
Returns True if this bijector is guaranteed to be the same as `other`.
|
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is Block: # pylint: disable=unidiomatic-typecheck
return self.bijector.same_as(other.bijector) and self.ndims == other.ndims
return False
|
(self, other: distrax._src.bijectors.bijector.Bijector) -> bool
|
55,426 |
distrax._src.distributions.categorical
|
Categorical
|
Categorical distribution.
|
class Categorical(distribution.Distribution):
"""Categorical distribution."""
equiv_tfp_cls = tfd.Categorical
def __init__(self,
logits: Optional[Array] = None,
probs: Optional[Array] = None,
dtype: Union[jnp.dtype, type[Any]] = int):
"""Initializes a Categorical distribution.
Args:
logits: Logit transform of the probability of each category. Only one
of `logits` or `probs` can be specified.
probs: Probability of each category. Only one of `logits` or `probs` can
be specified.
dtype: The type of event samples.
"""
super().__init__()
if (logits is None) == (probs is None):
raise ValueError(
f'One and exactly one of `logits` and `probs` should be `None`, '
f'but `logits` is {logits} and `probs` is {probs}.')
if not (jnp.issubdtype(dtype, jnp.integer) or
jnp.issubdtype(dtype, jnp.floating)):
raise ValueError(
f'The dtype of `{self.name}` must be integer or floating-point, '
f'instead got `{dtype}`.')
self._probs = None if probs is None else math.normalize(probs=probs)
self._logits = None if logits is None else math.normalize(logits=logits)
self._dtype = dtype
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return ()
@property
def logits(self) -> Array:
"""The logits for each event."""
if self._logits is not None:
return self._logits
return jnp.log(self._probs)
@property
def probs(self) -> Array:
"""The probabilities for each event."""
if self._probs is not None:
return self._probs
return jax.nn.softmax(self._logits, axis=-1)
@property
def num_categories(self) -> int:
"""Number of categories."""
if self._probs is not None:
return self._probs.shape[-1]
return self._logits.shape[-1]
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
new_shape = (n,) + self.logits.shape[:-1]
is_valid = jnp.logical_and(jnp.all(jnp.isfinite(self.probs), axis=-1),
jnp.all(self.probs >= 0, axis=-1))
draws = jax.random.categorical(key=key, logits=self.logits, axis=-1,
shape=new_shape).astype(self._dtype)
return jnp.where(is_valid, draws, jnp.ones_like(draws) * -1)
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
value_one_hot = jax.nn.one_hot(
value, self.num_categories, dtype=self.logits.dtype)
mask_outside_domain = jnp.logical_or(
value < 0, value > self.num_categories - 1)
return jnp.where(
mask_outside_domain, -jnp.inf,
jnp.sum(math.multiply_no_nan(self.logits, value_one_hot), axis=-1))
def prob(self, value: EventT) -> Array:
"""See `Distribution.prob`."""
value_one_hot = jax.nn.one_hot(
value, self.num_categories, dtype=self.probs.dtype)
return jnp.sum(math.multiply_no_nan(self.probs, value_one_hot), axis=-1)
def entropy(self) -> Array:
"""See `Distribution.entropy`."""
if self._logits is None:
log_probs = jnp.log(self._probs)
else:
log_probs = jax.nn.log_softmax(self._logits)
return -jnp.sum(math.mul_exp(log_probs, log_probs), axis=-1)
def mode(self) -> Array:
"""See `Distribution.mode`."""
parameter = self._probs if self._logits is None else self._logits
return jnp.argmax(parameter, axis=-1).astype(self._dtype)
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
# For value < 0 the output should be zero because support = {0, ..., K-1}.
should_be_zero = value < 0
# For value >= K-1 the output should be one. Explicitly accounting for this
# case addresses potential numerical issues that may arise when evaluating
# derived methods (mainly, `log_survival_function`) for `value >= K-1`.
should_be_one = value >= self.num_categories - 1
# Will use value as an index below, so clip it to {0, ..., K-1}.
value = jnp.clip(value, 0, self.num_categories - 1)
value_one_hot = jax.nn.one_hot(
value, self.num_categories, dtype=self.probs.dtype)
cdf = jnp.sum(math.multiply_no_nan(
jnp.cumsum(self.probs, axis=-1), value_one_hot), axis=-1)
return jnp.where(should_be_zero, 0., jnp.where(should_be_one, 1., cdf))
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return jnp.log(self.cdf(value))
def logits_parameter(self) -> Array:
"""Wrapper for `logits` property, for TFP API compatibility."""
return self.logits
def __getitem__(self, index) -> 'Categorical':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
if self._logits is not None:
return Categorical(logits=self.logits[index], dtype=self._dtype)
return Categorical(probs=self.probs[index], dtype=self._dtype)
|
(logits: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, probs: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, dtype: Union[numpy.dtype, type[Any]] = <class 'int'>)
|
55,427 |
distrax._src.distributions.categorical
|
__getitem__
|
See `Distribution.__getitem__`.
|
def __getitem__(self, index) -> 'Categorical':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
if self._logits is not None:
return Categorical(logits=self.logits[index], dtype=self._dtype)
return Categorical(probs=self.probs[index], dtype=self._dtype)
|
(self, index) -> distrax._src.distributions.categorical.Categorical
|
55,428 |
distrax._src.distributions.categorical
|
__init__
|
Initializes a Categorical distribution.
Args:
logits: Logit transform of the probability of each category. Only one
of `logits` or `probs` can be specified.
probs: Probability of each category. Only one of `logits` or `probs` can
be specified.
dtype: The type of event samples.
|
def __init__(self,
logits: Optional[Array] = None,
probs: Optional[Array] = None,
dtype: Union[jnp.dtype, type[Any]] = int):
"""Initializes a Categorical distribution.
Args:
logits: Logit transform of the probability of each category. Only one
of `logits` or `probs` can be specified.
probs: Probability of each category. Only one of `logits` or `probs` can
be specified.
dtype: The type of event samples.
"""
super().__init__()
if (logits is None) == (probs is None):
raise ValueError(
f'One and exactly one of `logits` and `probs` should be `None`, '
f'but `logits` is {logits} and `probs` is {probs}.')
if not (jnp.issubdtype(dtype, jnp.integer) or
jnp.issubdtype(dtype, jnp.floating)):
raise ValueError(
f'The dtype of `{self.name}` must be integer or floating-point, '
f'instead got `{dtype}`.')
self._probs = None if probs is None else math.normalize(probs=probs)
self._logits = None if logits is None else math.normalize(logits=logits)
self._dtype = dtype
|
(self, logits: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, probs: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, dtype: Union[numpy.dtype, type[Any]] = <class 'int'>)
|
55,431 |
distrax._src.distributions.categorical
|
_sample_n
|
See `Distribution._sample_n`.
|
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
new_shape = (n,) + self.logits.shape[:-1]
is_valid = jnp.logical_and(jnp.all(jnp.isfinite(self.probs), axis=-1),
jnp.all(self.probs >= 0, axis=-1))
draws = jax.random.categorical(key=key, logits=self.logits, axis=-1,
shape=new_shape).astype(self._dtype)
return jnp.where(is_valid, draws, jnp.ones_like(draws) * -1)
|
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,433 |
distrax._src.distributions.categorical
|
cdf
|
See `Distribution.cdf`.
|
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
# For value < 0 the output should be zero because support = {0, ..., K-1}.
should_be_zero = value < 0
# For value >= K-1 the output should be one. Explicitly accounting for this
# case addresses potential numerical issues that may arise when evaluating
# derived methods (mainly, `log_survival_function`) for `value >= K-1`.
should_be_one = value >= self.num_categories - 1
# Will use value as an index below, so clip it to {0, ..., K-1}.
value = jnp.clip(value, 0, self.num_categories - 1)
value_one_hot = jax.nn.one_hot(
value, self.num_categories, dtype=self.probs.dtype)
cdf = jnp.sum(math.multiply_no_nan(
jnp.cumsum(self.probs, axis=-1), value_one_hot), axis=-1)
return jnp.where(should_be_zero, 0., jnp.where(should_be_one, 1., cdf))
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,435 |
distrax._src.distributions.categorical
|
entropy
|
See `Distribution.entropy`.
|
def entropy(self) -> Array:
"""See `Distribution.entropy`."""
if self._logits is None:
log_probs = jnp.log(self._probs)
else:
log_probs = jax.nn.log_softmax(self._logits)
return -jnp.sum(math.mul_exp(log_probs, log_probs), axis=-1)
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,438 |
distrax._src.distributions.categorical
|
log_prob
|
See `Distribution.log_prob`.
|
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
value_one_hot = jax.nn.one_hot(
value, self.num_categories, dtype=self.logits.dtype)
mask_outside_domain = jnp.logical_or(
value < 0, value > self.num_categories - 1)
return jnp.where(
mask_outside_domain, -jnp.inf,
jnp.sum(math.multiply_no_nan(self.logits, value_one_hot), axis=-1))
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,440 |
distrax._src.distributions.categorical
|
logits_parameter
|
Wrapper for `logits` property, for TFP API compatibility.
|
def logits_parameter(self) -> Array:
"""Wrapper for `logits` property, for TFP API compatibility."""
return self.logits
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,441 |
distrax._src.distributions.distribution
|
mean
|
Calculates the mean.
|
def mean(self) -> EventT:
"""Calculates the mean."""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `mean`.')
|
(self) -> ~EventT
|
55,443 |
distrax._src.distributions.categorical
|
mode
|
See `Distribution.mode`.
|
def mode(self) -> Array:
"""See `Distribution.mode`."""
parameter = self._probs if self._logits is None else self._logits
return jnp.argmax(parameter, axis=-1).astype(self._dtype)
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,444 |
distrax._src.distributions.categorical
|
prob
|
See `Distribution.prob`.
|
def prob(self, value: EventT) -> Array:
"""See `Distribution.prob`."""
value_one_hot = jax.nn.one_hot(
value, self.num_categories, dtype=self.probs.dtype)
return jnp.sum(math.multiply_no_nan(self.probs, value_one_hot), axis=-1)
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,450 |
distrax._src.distributions.distribution
|
variance
|
Calculates the variance.
|
def variance(self) -> EventT:
"""Calculates the variance."""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `variance`.')
|
(self) -> ~EventT
|
55,451 |
distrax._src.distributions.categorical_uniform
|
CategoricalUniform
|
Mixture Categorical-Uniform distribution.
Given an interval `[a, b]` and a probability vector `p = [p_1, ..., p_K]`, a
random variable `x` follows a Categorical-Uniform distribution if its PDF
is `p(x) = p_k / C` if `(k-1)C <= x - a < kC` for any `k = 1, ..., K`,
where `C = (b-a) / K`, and `p(x) = 0` otherwise.
Equivalently, the Categorical-Uniform can be understood as a mixture of
Uniform distributions, with mixture probabilities `p_k` and Uniform component
distributions with support in `[a + (k-1)C, a + kC]`.
|
class CategoricalUniform(distribution.Distribution):
"""Mixture Categorical-Uniform distribution.
Given an interval `[a, b]` and a probability vector `p = [p_1, ..., p_K]`, a
random variable `x` follows a Categorical-Uniform distribution if its PDF
is `p(x) = p_k / C` if `(k-1)C <= x - a < kC` for any `k = 1, ..., K`,
where `C = (b-a) / K`, and `p(x) = 0` otherwise.
Equivalently, the Categorical-Uniform can be understood as a mixture of
Uniform distributions, with mixture probabilities `p_k` and Uniform component
distributions with support in `[a + (k-1)C, a + kC]`.
"""
def __init__(
self,
*,
low: Numeric,
high: Numeric,
logits: Array,
) -> None:
"""Initializes a CategoricalUniform distribution.
Args:
low: The lowest value of the support, denoted `a` in the class
docstring. It can also be a batch of values.
high: The highest value of the support, denoted `b` in the class
docstring. It can also be a batch of values.
logits: The unnormalized log-probabilities of the mixture. It must be an
array of length `K`. Additional leading dimensions, if any, index
batches.
"""
super().__init__()
self._low = conversion.as_float_array(low)
self._high = conversion.as_float_array(high)
self._logits = conversion.as_float_array(logits)
if self._logits.ndim < 1:
raise ValueError(
'The parameter `logits` must have at least one dimension.')
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return jax.lax.broadcast_shapes(
self._low.shape, self._high.shape, self._logits.shape[:-1])
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
quantile = jax.random.uniform(key, (n,) + self.batch_shape)
return self._inverse_cdf(quantile)
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return self._get_mixture().log_prob(value)
def entropy(self) -> Array:
"""See `Distribution.entropy`."""
# The following holds because the components have non-overlapping domains.
mixture = self._get_mixture()
return mixture.mixture_distribution.entropy() + jnp.log(
(self._high - self._low) / self.num_bins)
def mean(self) -> Array:
"""Calculates the mean."""
return self._get_mixture().mean()
def variance(self) -> Array:
"""Calculates the variance."""
return self._get_mixture().variance()
def __getitem__(self, index) -> 'CategoricalUniform':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return CategoricalUniform(
low=self.low[index], high=self.high[index], logits=self.logits[index])
def _get_category_limits(self) -> Array:
"""Gets limits for each category."""
return jnp.linspace(self.low, self.high, self.num_bins + 1, axis=-1)
def _get_mixture(self) -> mixture_same_family.MixtureSameFamily:
"""Gets a mixture distribution."""
limits = self._get_category_limits()
return mixture_same_family.MixtureSameFamily(
components_distribution=uniform.Uniform(
low=limits[..., :-1], high=limits[..., 1:]),
mixture_distribution=categorical.Categorical(logits=self.logits),
)
def _inverse_cdf(self, quantile):
"""Inverse cumulative density function."""
probs = jax.nn.softmax(self.logits, axis=-1)
cum_probs = jnp.cumsum(probs, axis=-1)
quantile_limits = jnp.concatenate(
[jnp.zeros_like(cum_probs[..., :1]), cum_probs], axis=-1)
limits = self._get_category_limits()
domain_diff = jnp.diff(limits, axis=-1)
quantile_diff = jnp.diff(quantile_limits, axis=-1)
slopes = domain_diff / quantile_diff
quantile_contributions = jnp.minimum(
quantile_diff,
jax.nn.relu(quantile[..., None] - quantile_limits[..., :-1]),
)
return self.low + jnp.sum(slopes * quantile_contributions, axis=-1)
@property
def low(self) -> Array:
# Broadcasted version of the argument passed in the initializer.
return jnp.broadcast_to(self._low, self.batch_shape)
@property
def high(self) -> Array:
# Broadcasted version of the argument passed in the initializer.
return jnp.broadcast_to(self._high, self.batch_shape)
@property
def logits(self) -> Array:
return jnp.broadcast_to(self._logits, self.batch_shape + (self.num_bins,))
@property
def num_bins(self) -> int:
return self._logits.shape[-1]
|
(*, low: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], high: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], logits: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> None
|
55,452 |
distrax._src.distributions.categorical_uniform
|
__getitem__
|
See `Distribution.__getitem__`.
|
def __getitem__(self, index) -> 'CategoricalUniform':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return CategoricalUniform(
low=self.low[index], high=self.high[index], logits=self.logits[index])
|
(self, index) -> distrax._src.distributions.categorical_uniform.CategoricalUniform
|
55,453 |
distrax._src.distributions.categorical_uniform
|
__init__
|
Initializes a CategoricalUniform distribution.
Args:
low: The lowest value of the support, denoted `a` in the class
docstring. It can also be a batch of values.
high: The highest value of the support, denoted `b` in the class
docstring. It can also be a batch of values.
logits: The unnormalized log-probabilities of the mixture. It must be an
array of length `K`. Additional leading dimensions, if any, index
batches.
|
def __init__(
self,
*,
low: Numeric,
high: Numeric,
logits: Array,
) -> None:
"""Initializes a CategoricalUniform distribution.
Args:
low: The lowest value of the support, denoted `a` in the class
docstring. It can also be a batch of values.
high: The highest value of the support, denoted `b` in the class
docstring. It can also be a batch of values.
logits: The unnormalized log-probabilities of the mixture. It must be an
array of length `K`. Additional leading dimensions, if any, index
batches.
"""
super().__init__()
self._low = conversion.as_float_array(low)
self._high = conversion.as_float_array(high)
self._logits = conversion.as_float_array(logits)
if self._logits.ndim < 1:
raise ValueError(
'The parameter `logits` must have at least one dimension.')
|
(self, *, low: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], high: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], logits: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> NoneType
|
55,455 |
distrax._src.distributions.categorical_uniform
|
_get_category_limits
|
Gets limits for each category.
|
def _get_category_limits(self) -> Array:
"""Gets limits for each category."""
return jnp.linspace(self.low, self.high, self.num_bins + 1, axis=-1)
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,456 |
distrax._src.distributions.categorical_uniform
|
_get_mixture
|
Gets a mixture distribution.
|
def _get_mixture(self) -> mixture_same_family.MixtureSameFamily:
"""Gets a mixture distribution."""
limits = self._get_category_limits()
return mixture_same_family.MixtureSameFamily(
components_distribution=uniform.Uniform(
low=limits[..., :-1], high=limits[..., 1:]),
mixture_distribution=categorical.Categorical(logits=self.logits),
)
|
(self) -> distrax._src.distributions.mixture_same_family.MixtureSameFamily
|
55,457 |
distrax._src.distributions.categorical_uniform
|
_inverse_cdf
|
Inverse cumulative density function.
|
def _inverse_cdf(self, quantile):
"""Inverse cumulative density function."""
probs = jax.nn.softmax(self.logits, axis=-1)
cum_probs = jnp.cumsum(probs, axis=-1)
quantile_limits = jnp.concatenate(
[jnp.zeros_like(cum_probs[..., :1]), cum_probs], axis=-1)
limits = self._get_category_limits()
domain_diff = jnp.diff(limits, axis=-1)
quantile_diff = jnp.diff(quantile_limits, axis=-1)
slopes = domain_diff / quantile_diff
quantile_contributions = jnp.minimum(
quantile_diff,
jax.nn.relu(quantile[..., None] - quantile_limits[..., :-1]),
)
return self.low + jnp.sum(slopes * quantile_contributions, axis=-1)
|
(self, quantile)
|
55,459 |
distrax._src.distributions.categorical_uniform
|
_sample_n
|
See `Distribution._sample_n`.
|
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
quantile = jax.random.uniform(key, (n,) + self.batch_shape)
return self._inverse_cdf(quantile)
|
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,461 |
distrax._src.distributions.distribution
|
cdf
|
Evaluates the cumulative distribution function at `value`.
Args:
value: An event.
Returns:
The CDF evaluated at value, i.e. P[X <= value].
|
def cdf(self, value: EventT) -> Array:
"""Evaluates the cumulative distribution function at `value`.
Args:
value: An event.
Returns:
The CDF evaluated at value, i.e. P[X <= value].
"""
return jnp.exp(self.log_cdf(value))
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,463 |
distrax._src.distributions.categorical_uniform
|
entropy
|
See `Distribution.entropy`.
|
def entropy(self) -> Array:
"""See `Distribution.entropy`."""
# The following holds because the components have non-overlapping domains.
mixture = self._get_mixture()
return mixture.mixture_distribution.entropy() + jnp.log(
(self._high - self._low) / self.num_bins)
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,465 |
distrax._src.distributions.distribution
|
log_cdf
|
Evaluates the log cumulative distribution function at `value`.
Args:
value: An event.
Returns:
The log CDF evaluated at value, i.e. log P[X <= value].
|
def log_cdf(self, value: EventT) -> Array:
"""Evaluates the log cumulative distribution function at `value`.
Args:
value: An event.
Returns:
The log CDF evaluated at value, i.e. log P[X <= value].
"""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `log_cdf`.')
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,466 |
distrax._src.distributions.categorical_uniform
|
log_prob
|
See `Distribution.log_prob`.
|
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return self._get_mixture().log_prob(value)
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,468 |
distrax._src.distributions.categorical_uniform
|
mean
|
Calculates the mean.
|
def mean(self) -> Array:
"""Calculates the mean."""
return self._get_mixture().mean()
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,470 |
distrax._src.distributions.distribution
|
mode
|
Calculates the mode.
|
def mode(self) -> EventT:
"""Calculates the mode."""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `mode`.')
|
(self) -> ~EventT
|
55,477 |
distrax._src.distributions.categorical_uniform
|
variance
|
Calculates the variance.
|
def variance(self) -> Array:
"""Calculates the variance."""
return self._get_mixture().variance()
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,478 |
distrax._src.bijectors.chain
|
Chain
|
Composition of a sequence of bijectors into a single bijector.
Bijectors are composable: if `f` and `g` are bijectors, then `g o f` is also
a bijector. Given a sequence of bijectors `[f1, ..., fN]`, this class
implements the bijector defined by `fN o ... o f1`.
NOTE: the bijectors are applied in reverse order from the order they appear in
the sequence. For example, consider the following code where `f` and `g` are
two bijectors:
```
layers = []
layers.append(f)
layers.append(g)
bijector = distrax.Chain(layers)
y = bijector.forward(x)
```
The above code will transform `x` by first applying `g`, then `f`, so that
`y = f(g(x))`.
|
class Chain(base.Bijector):
"""Composition of a sequence of bijectors into a single bijector.
Bijectors are composable: if `f` and `g` are bijectors, then `g o f` is also
a bijector. Given a sequence of bijectors `[f1, ..., fN]`, this class
implements the bijector defined by `fN o ... o f1`.
NOTE: the bijectors are applied in reverse order from the order they appear in
the sequence. For example, consider the following code where `f` and `g` are
two bijectors:
```
layers = []
layers.append(f)
layers.append(g)
bijector = distrax.Chain(layers)
y = bijector.forward(x)
```
The above code will transform `x` by first applying `g`, then `f`, so that
`y = f(g(x))`.
"""
def __init__(self, bijectors: Sequence[BijectorLike]):
"""Initializes a Chain bijector.
Args:
bijectors: a sequence of bijectors to be composed into one. Each bijector
can be a distrax bijector, a TFP bijector, or a callable to be wrapped
by `Lambda`. The sequence must contain at least one bijector.
"""
if not bijectors:
raise ValueError("The sequence of bijectors cannot be empty.")
self._bijectors = [conversion.as_bijector(b) for b in bijectors]
# Check that neighboring bijectors in the chain have compatible dimensions
for i, (outer, inner) in enumerate(zip(self._bijectors[:-1],
self._bijectors[1:])):
if outer.event_ndims_in != inner.event_ndims_out:
raise ValueError(
f"The chain of bijector event shapes are incompatible. Bijector "
f"{i} ({outer.name}) expects events with {outer.event_ndims_in} "
f"dimensions, while Bijector {i+1} ({inner.name}) produces events "
f"with {inner.event_ndims_out} dimensions.")
is_constant_jacobian = all(b.is_constant_jacobian for b in self._bijectors)
is_constant_log_det = all(b.is_constant_log_det for b in self._bijectors)
super().__init__(
event_ndims_in=self._bijectors[-1].event_ndims_in,
event_ndims_out=self._bijectors[0].event_ndims_out,
is_constant_jacobian=is_constant_jacobian,
is_constant_log_det=is_constant_log_det)
@property
def bijectors(self) -> List[BijectorT]:
"""The list of bijectors in the chain."""
return self._bijectors
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
for bijector in reversed(self._bijectors):
x = bijector.forward(x)
return x
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
for bijector in self._bijectors:
y = bijector.inverse(y)
return y
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
x, log_det = self._bijectors[-1].forward_and_log_det(x)
for bijector in reversed(self._bijectors[:-1]):
x, ld = bijector.forward_and_log_det(x)
log_det += ld
return x, log_det
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
y, log_det = self._bijectors[0].inverse_and_log_det(y)
for bijector in self._bijectors[1:]:
y, ld = bijector.inverse_and_log_det(y)
log_det += ld
return y, log_det
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is Chain: # pylint: disable=unidiomatic-typecheck
if len(self.bijectors) != len(other.bijectors):
return False
for bij1, bij2 in zip(self.bijectors, other.bijectors):
if not bij1.same_as(bij2):
return False
return True
elif len(self.bijectors) == 1:
return self.bijectors[0].same_as(other)
return False
|
(bijectors: Sequence[Union[distrax._src.bijectors.bijector.Bijector, tensorflow_probability.substrates.jax.bijectors.bijector.Bijector, Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]]])
|
55,479 |
distrax._src.bijectors.chain
|
__init__
|
Initializes a Chain bijector.
Args:
bijectors: a sequence of bijectors to be composed into one. Each bijector
can be a distrax bijector, a TFP bijector, or a callable to be wrapped
by `Lambda`. The sequence must contain at least one bijector.
|
def __init__(self, bijectors: Sequence[BijectorLike]):
"""Initializes a Chain bijector.
Args:
bijectors: a sequence of bijectors to be composed into one. Each bijector
can be a distrax bijector, a TFP bijector, or a callable to be wrapped
by `Lambda`. The sequence must contain at least one bijector.
"""
if not bijectors:
raise ValueError("The sequence of bijectors cannot be empty.")
self._bijectors = [conversion.as_bijector(b) for b in bijectors]
# Check that neighboring bijectors in the chain have compatible dimensions
for i, (outer, inner) in enumerate(zip(self._bijectors[:-1],
self._bijectors[1:])):
if outer.event_ndims_in != inner.event_ndims_out:
raise ValueError(
f"The chain of bijector event shapes are incompatible. Bijector "
f"{i} ({outer.name}) expects events with {outer.event_ndims_in} "
f"dimensions, while Bijector {i+1} ({inner.name}) produces events "
f"with {inner.event_ndims_out} dimensions.")
is_constant_jacobian = all(b.is_constant_jacobian for b in self._bijectors)
is_constant_log_det = all(b.is_constant_log_det for b in self._bijectors)
super().__init__(
event_ndims_in=self._bijectors[-1].event_ndims_in,
event_ndims_out=self._bijectors[0].event_ndims_out,
is_constant_jacobian=is_constant_jacobian,
is_constant_log_det=is_constant_log_det)
|
(self, bijectors: Sequence[Union[distrax._src.bijectors.bijector.Bijector, tensorflow_probability.substrates.jax.bijectors.bijector.Bijector, Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]]])
|
55,483 |
distrax._src.bijectors.chain
|
forward
|
Computes y = f(x).
|
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
for bijector in reversed(self._bijectors):
x = bijector.forward(x)
return x
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,484 |
distrax._src.bijectors.chain
|
forward_and_log_det
|
Computes y = f(x) and log|det J(f)(x)|.
|
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
x, log_det = self._bijectors[-1].forward_and_log_det(x)
for bijector in reversed(self._bijectors[:-1]):
x, ld = bijector.forward_and_log_det(x)
log_det += ld
return x, log_det
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,486 |
distrax._src.bijectors.chain
|
inverse
|
Computes x = f^{-1}(y).
|
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
for bijector in self._bijectors:
y = bijector.inverse(y)
return y
|
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,487 |
distrax._src.bijectors.chain
|
inverse_and_log_det
|
Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.
|
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
y, log_det = self._bijectors[0].inverse_and_log_det(y)
for bijector in self._bijectors[1:]:
y, ld = bijector.inverse_and_log_det(y)
log_det += ld
return y, log_det
|
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,489 |
distrax._src.bijectors.chain
|
same_as
|
Returns True if this bijector is guaranteed to be the same as `other`.
|
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is Chain: # pylint: disable=unidiomatic-typecheck
if len(self.bijectors) != len(other.bijectors):
return False
for bij1, bij2 in zip(self.bijectors, other.bijectors):
if not bij1.same_as(bij2):
return False
return True
elif len(self.bijectors) == 1:
return self.bijectors[0].same_as(other)
return False
|
(self, other: distrax._src.bijectors.bijector.Bijector) -> bool
|
55,491 |
distrax._src.distributions.clipped
|
Clipped
|
A clipped distribution.
|
class Clipped(base_distribution.Distribution):
"""A clipped distribution."""
def __init__(
self,
distribution: DistributionLike,
minimum: Numeric,
maximum: Numeric):
"""Wraps a distribution clipping samples out of `[minimum, maximum]`.
The samples outside of `[minimum, maximum]` are clipped to the boundary.
The log probability of samples outside of this range is `-inf`.
Args:
distribution: a Distrax / TFP distribution to be wrapped.
minimum: can be a `scalar` or `vector`; if a vector, must have fewer dims
than `distribution.batch_shape` and must be broadcastable to it.
maximum: can be a `scalar` or `vector`; if a vector, must have fewer dims
than `distribution.batch_shape` and must be broadcastable to it.
"""
super().__init__()
if distribution.event_shape:
raise ValueError('The wrapped distribution must have event shape ().')
if (jnp.array(minimum).ndim > len(distribution.batch_shape) or
jnp.array(maximum).ndim > len(distribution.batch_shape)):
raise ValueError(
'The minimum and maximum clipping boundaries must be scalars or'
'vectors with fewer dimensions as the batch_shape of distribution:'
'i.e. we can broadcast min/max to batch_shape but not viceversa.')
self._distribution = conversion.as_distribution(distribution)
self._minimum = jnp.broadcast_to(minimum, self._distribution.batch_shape)
self._maximum = jnp.broadcast_to(maximum, self._distribution.batch_shape)
self._log_prob_minimum = self._distribution.log_cdf(minimum)
self._log_prob_maximum = self._distribution.log_survival_function(maximum)
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
raw_sample = self._distribution.sample(seed=key, sample_shape=[n])
return jnp.clip(raw_sample, self._minimum, self._maximum)
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
samples = self._sample_n(key, n)
return samples, self.log_prob(samples)
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
# The log_prob can be used to compute expectations by explicitly integrating
# over the discrete and continuous elements.
# Info about mixed distributions:
# http://www.randomservices.org/random/dist/Mixed.html
log_prob = jnp.where(
jnp.equal(value, self._minimum),
self._log_prob_minimum,
jnp.where(jnp.equal(value, self._maximum),
self._log_prob_maximum,
self._distribution.log_prob(value)))
# Giving -inf log_prob outside the boundaries.
return jnp.where(
jnp.logical_or(value < self._minimum, value > self._maximum),
-jnp.inf,
log_prob)
@property
def minimum(self) -> Array:
return self._minimum
@property
def maximum(self) -> Array:
return self._maximum
@property
def distribution(self) -> DistributionLike:
return self._distribution
@property
def event_shape(self) -> Tuple[int, ...]:
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
return self._distribution.batch_shape
def __getitem__(self, index) -> 'Clipped':
"""See `Distribution.__getitem__`."""
index = base_distribution.to_batch_shape_index(self.batch_shape, index)
return Clipped(
distribution=self.distribution[index],
minimum=self.minimum[index],
maximum=self.maximum[index])
|
(distribution: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution], minimum: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], maximum: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int])
|
55,492 |
distrax._src.distributions.clipped
|
__getitem__
|
See `Distribution.__getitem__`.
|
def __getitem__(self, index) -> 'Clipped':
"""See `Distribution.__getitem__`."""
index = base_distribution.to_batch_shape_index(self.batch_shape, index)
return Clipped(
distribution=self.distribution[index],
minimum=self.minimum[index],
maximum=self.maximum[index])
|
(self, index) -> distrax._src.distributions.clipped.Clipped
|
55,493 |
distrax._src.distributions.clipped
|
__init__
|
Wraps a distribution clipping samples out of `[minimum, maximum]`.
The samples outside of `[minimum, maximum]` are clipped to the boundary.
The log probability of samples outside of this range is `-inf`.
Args:
distribution: a Distrax / TFP distribution to be wrapped.
minimum: can be a `scalar` or `vector`; if a vector, must have fewer dims
than `distribution.batch_shape` and must be broadcastable to it.
maximum: can be a `scalar` or `vector`; if a vector, must have fewer dims
than `distribution.batch_shape` and must be broadcastable to it.
|
def __init__(
self,
distribution: DistributionLike,
minimum: Numeric,
maximum: Numeric):
"""Wraps a distribution clipping samples out of `[minimum, maximum]`.
The samples outside of `[minimum, maximum]` are clipped to the boundary.
The log probability of samples outside of this range is `-inf`.
Args:
distribution: a Distrax / TFP distribution to be wrapped.
minimum: can be a `scalar` or `vector`; if a vector, must have fewer dims
than `distribution.batch_shape` and must be broadcastable to it.
maximum: can be a `scalar` or `vector`; if a vector, must have fewer dims
than `distribution.batch_shape` and must be broadcastable to it.
"""
super().__init__()
if distribution.event_shape:
raise ValueError('The wrapped distribution must have event shape ().')
if (jnp.array(minimum).ndim > len(distribution.batch_shape) or
jnp.array(maximum).ndim > len(distribution.batch_shape)):
raise ValueError(
'The minimum and maximum clipping boundaries must be scalars or'
'vectors with fewer dimensions as the batch_shape of distribution:'
'i.e. we can broadcast min/max to batch_shape but not viceversa.')
self._distribution = conversion.as_distribution(distribution)
self._minimum = jnp.broadcast_to(minimum, self._distribution.batch_shape)
self._maximum = jnp.broadcast_to(maximum, self._distribution.batch_shape)
self._log_prob_minimum = self._distribution.log_cdf(minimum)
self._log_prob_maximum = self._distribution.log_survival_function(maximum)
|
(self, distribution: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution], minimum: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], maximum: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int])
|
55,496 |
distrax._src.distributions.clipped
|
_sample_n
|
See `Distribution._sample_n`.
|
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
raw_sample = self._distribution.sample(seed=key, sample_shape=[n])
return jnp.clip(raw_sample, self._minimum, self._maximum)
|
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,497 |
distrax._src.distributions.clipped
|
_sample_n_and_log_prob
|
See `Distribution._sample_n_and_log_prob`.
|
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
samples = self._sample_n(key, n)
return samples, self.log_prob(samples)
|
(self, key: jax.Array, n: int) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,500 |
distrax._src.distributions.distribution
|
entropy
|
Calculates the Shannon entropy (in nats).
|
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `entropy`.')
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,503 |
distrax._src.distributions.clipped
|
log_prob
|
See `Distribution.log_prob`.
|
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
# The log_prob can be used to compute expectations by explicitly integrating
# over the discrete and continuous elements.
# Info about mixed distributions:
# http://www.randomservices.org/random/dist/Mixed.html
log_prob = jnp.where(
jnp.equal(value, self._minimum),
self._log_prob_minimum,
jnp.where(jnp.equal(value, self._maximum),
self._log_prob_maximum,
self._distribution.log_prob(value)))
# Giving -inf log_prob outside the boundaries.
return jnp.where(
jnp.logical_or(value < self._minimum, value > self._maximum),
-jnp.inf,
log_prob)
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,515 |
distrax._src.distributions.clipped
|
ClippedLogistic
|
A clipped logistic distribution.
|
class ClippedLogistic(Clipped):
"""A clipped logistic distribution."""
def __init__(
self, loc: Numeric, scale: Numeric, minimum: Numeric, maximum: Numeric):
distribution = logistic.Logistic(loc=loc, scale=scale)
super().__init__(distribution, minimum=minimum, maximum=maximum)
|
(loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], minimum: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], maximum: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int])
|
55,517 |
distrax._src.distributions.clipped
|
__init__
| null |
def __init__(
self, loc: Numeric, scale: Numeric, minimum: Numeric, maximum: Numeric):
distribution = logistic.Logistic(loc=loc, scale=scale)
super().__init__(distribution, minimum=minimum, maximum=maximum)
|
(self, loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], minimum: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], maximum: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int])
|
55,539 |
distrax._src.distributions.clipped
|
ClippedNormal
|
A clipped normal distribution.
|
class ClippedNormal(Clipped):
"""A clipped normal distribution."""
def __init__(
self, loc: Numeric, scale: Numeric, minimum: Numeric, maximum: Numeric):
distribution = normal.Normal(loc=loc, scale=scale)
super().__init__(distribution, minimum=minimum, maximum=maximum)
|
(loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], minimum: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], maximum: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int])
|
55,541 |
distrax._src.distributions.clipped
|
__init__
| null |
def __init__(
self, loc: Numeric, scale: Numeric, minimum: Numeric, maximum: Numeric):
distribution = normal.Normal(loc=loc, scale=scale)
super().__init__(distribution, minimum=minimum, maximum=maximum)
|
(self, loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], minimum: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], maximum: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int])
|
55,563 |
distrax._src.distributions.deterministic
|
Deterministic
|
Scalar Deterministic distribution on the real line.
|
class Deterministic(distribution.Distribution):
"""Scalar Deterministic distribution on the real line."""
equiv_tfp_cls = tfd.Deterministic
def __init__(self,
loc: Numeric,
atol: Optional[Numeric] = None,
rtol: Optional[Numeric] = None):
"""Initializes a Deterministic distribution.
Args:
loc: Batch of points on which the distribution is supported.
atol: Absolute tolerance for comparing closeness to `loc`. It must be
broadcastable with `loc`, and it must not lead to additional batch
dimensions after broadcasting.
rtol: Relative tolerance for comparing closeness to `loc`. It must be
broadcastable with `loc`, and it must not lead to additional batch
dimensions after broadcasting.
"""
super().__init__()
self._loc = jnp.asarray(loc)
self._atol = jnp.asarray(0. if atol is None else atol)
self._rtol = jnp.asarray(0. if rtol is None else rtol)
if len(self._rtol.shape) > len(self._loc.shape):
raise ValueError(f'The parameter `rtol` cannot have more dimensions than '
f'`loc`, but their shapes are {self._rtol.shape} and '
f'{self._loc.shape}, respectively.')
if len(self._atol.shape) > len(self._loc.shape):
raise ValueError(f'The parameter `atol` cannot have more dimensions than '
f'`loc`, but their shapes are {self._atol.shape} and '
f'{self._loc.shape}, respectively.')
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of the events."""
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return self._loc.shape
@property
def loc(self) -> Array:
"""Point(s) on which this distribution is supported."""
return self._loc
@property
def atol(self) -> Array:
"""Absolute tolerance for comparing closeness to `loc`."""
return jnp.broadcast_to(self._atol, self.batch_shape)
@property
def rtol(self) -> Array:
"""Relative tolerance for comparing closeness to `loc`."""
return jnp.broadcast_to(self._rtol, self.batch_shape)
@property
def slack(self) -> Array:
return jnp.where(
self.rtol == 0,
self.atol,
self.atol + self.rtol * jnp.abs(self.loc))
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
del key # unused
loc = jnp.expand_dims(self.loc, axis=0)
return jnp.repeat(loc, n, axis=0)
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
samples = self._sample_n(key, n)
log_prob = jnp.zeros_like(samples)
return samples, log_prob
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return jnp.log(self.prob(value))
def prob(self, value: EventT) -> Array:
"""See `Distribution.prob`."""
return jnp.where(
jnp.abs(value - self.loc) <= self.slack, 1., 0.)
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
return jnp.zeros(self.batch_shape, jnp.float_)
def mean(self) -> Array:
"""Calculates the mean."""
return self.loc
def mode(self) -> Array:
"""Calculates the mode."""
return self.mean()
def variance(self) -> Array:
"""Calculates the variance."""
return jnp.zeros(self.batch_shape, jnp.float_)
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return self.variance()
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return jnp.log(self.cdf(value))
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
return jnp.where(value >= self.loc - self.slack, 1., 0.)
def __getitem__(self, index) -> 'Deterministic':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Deterministic(
loc=self.loc[index], atol=self.atol[index], rtol=self.rtol[index])
|
(loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], atol: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int, NoneType] = None, rtol: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int, NoneType] = None)
|
55,564 |
distrax._src.distributions.deterministic
|
__getitem__
|
See `Distribution.__getitem__`.
|
def __getitem__(self, index) -> 'Deterministic':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Deterministic(
loc=self.loc[index], atol=self.atol[index], rtol=self.rtol[index])
|
(self, index) -> distrax._src.distributions.deterministic.Deterministic
|
55,565 |
distrax._src.distributions.deterministic
|
__init__
|
Initializes a Deterministic distribution.
Args:
loc: Batch of points on which the distribution is supported.
atol: Absolute tolerance for comparing closeness to `loc`. It must be
broadcastable with `loc`, and it must not lead to additional batch
dimensions after broadcasting.
rtol: Relative tolerance for comparing closeness to `loc`. It must be
broadcastable with `loc`, and it must not lead to additional batch
dimensions after broadcasting.
|
def __init__(self,
loc: Numeric,
atol: Optional[Numeric] = None,
rtol: Optional[Numeric] = None):
"""Initializes a Deterministic distribution.
Args:
loc: Batch of points on which the distribution is supported.
atol: Absolute tolerance for comparing closeness to `loc`. It must be
broadcastable with `loc`, and it must not lead to additional batch
dimensions after broadcasting.
rtol: Relative tolerance for comparing closeness to `loc`. It must be
broadcastable with `loc`, and it must not lead to additional batch
dimensions after broadcasting.
"""
super().__init__()
self._loc = jnp.asarray(loc)
self._atol = jnp.asarray(0. if atol is None else atol)
self._rtol = jnp.asarray(0. if rtol is None else rtol)
if len(self._rtol.shape) > len(self._loc.shape):
raise ValueError(f'The parameter `rtol` cannot have more dimensions than '
f'`loc`, but their shapes are {self._rtol.shape} and '
f'{self._loc.shape}, respectively.')
if len(self._atol.shape) > len(self._loc.shape):
raise ValueError(f'The parameter `atol` cannot have more dimensions than '
f'`loc`, but their shapes are {self._atol.shape} and '
f'{self._loc.shape}, respectively.')
|
(self, loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], atol: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int, NoneType] = None, rtol: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int, NoneType] = None)
|
55,568 |
distrax._src.distributions.deterministic
|
_sample_n
|
See `Distribution._sample_n`.
|
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
del key # unused
loc = jnp.expand_dims(self.loc, axis=0)
return jnp.repeat(loc, n, axis=0)
|
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,569 |
distrax._src.distributions.deterministic
|
_sample_n_and_log_prob
|
See `Distribution._sample_n_and_log_prob`.
|
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
samples = self._sample_n(key, n)
log_prob = jnp.zeros_like(samples)
return samples, log_prob
|
(self, key: jax.Array, n: int) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,570 |
distrax._src.distributions.deterministic
|
cdf
|
See `Distribution.cdf`.
|
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
return jnp.where(value >= self.loc - self.slack, 1., 0.)
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,572 |
distrax._src.distributions.deterministic
|
entropy
|
Calculates the Shannon entropy (in nats).
|
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
return jnp.zeros(self.batch_shape, jnp.float_)
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,575 |
distrax._src.distributions.deterministic
|
log_prob
|
See `Distribution.log_prob`.
|
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return jnp.log(self.prob(value))
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,577 |
distrax._src.distributions.deterministic
|
mean
|
Calculates the mean.
|
def mean(self) -> Array:
"""Calculates the mean."""
return self.loc
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,579 |
distrax._src.distributions.deterministic
|
mode
|
Calculates the mode.
|
def mode(self) -> Array:
"""Calculates the mode."""
return self.mean()
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,580 |
distrax._src.distributions.deterministic
|
prob
|
See `Distribution.prob`.
|
def prob(self, value: EventT) -> Array:
"""See `Distribution.prob`."""
return jnp.where(
jnp.abs(value - self.loc) <= self.slack, 1., 0.)
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,583 |
distrax._src.distributions.deterministic
|
stddev
|
Calculates the standard deviation.
|
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return self.variance()
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,586 |
distrax._src.distributions.deterministic
|
variance
|
Calculates the variance.
|
def variance(self) -> Array:
"""Calculates the variance."""
return jnp.zeros(self.batch_shape, jnp.float_)
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,587 |
distrax._src.bijectors.diag_linear
|
DiagLinear
|
Linear bijector with a diagonal weight matrix.
The bijector is defined as `f(x) = Ax` where `A` is a `DxD` diagonal matrix.
Additional dimensions, if any, index batches.
The Jacobian determinant is trivially computed by taking the product of the
diagonal entries in `A`. The inverse transformation `x = f^{-1}(y)` is
computed element-wise.
The bijector is invertible if and only if the diagonal entries of `A` are all
non-zero. It is the responsibility of the user to make sure that this is the
case; the class will make no attempt to verify that the bijector is
invertible.
|
class DiagLinear(linear.Linear):
"""Linear bijector with a diagonal weight matrix.
The bijector is defined as `f(x) = Ax` where `A` is a `DxD` diagonal matrix.
Additional dimensions, if any, index batches.
The Jacobian determinant is trivially computed by taking the product of the
diagonal entries in `A`. The inverse transformation `x = f^{-1}(y)` is
computed element-wise.
The bijector is invertible if and only if the diagonal entries of `A` are all
non-zero. It is the responsibility of the user to make sure that this is the
case; the class will make no attempt to verify that the bijector is
invertible.
"""
def __init__(self, diag: Array):
"""Initializes the bijector.
Args:
diag: a vector of length D, the diagonal of matrix `A`. Can also be a
batch of such vectors.
"""
if diag.ndim < 1:
raise ValueError("`diag` must have at least one dimension.")
self._bijector = block.Block(
scalar_affine.ScalarAffine(shift=0., scale=diag), ndims=1)
super().__init__(
event_dims=diag.shape[-1],
batch_shape=diag.shape[:-1],
dtype=diag.dtype)
self._diag = diag
self.forward = self._bijector.forward
self.forward_log_det_jacobian = self._bijector.forward_log_det_jacobian
self.inverse = self._bijector.inverse
self.inverse_log_det_jacobian = self._bijector.inverse_log_det_jacobian
self.inverse_and_log_det = self._bijector.inverse_and_log_det
@property
def diag(self) -> Array:
"""Vector of length D, the diagonal of matrix `A`."""
return self._diag
@property
def matrix(self) -> Array:
"""The full matrix `A`."""
return jnp.vectorize(jnp.diag, signature="(k)->(k,k)")(self.diag)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return self._bijector.forward_and_log_det(x)
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is DiagLinear: # pylint: disable=unidiomatic-typecheck
return self.diag is other.diag
return False
|
(diag: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number])
|
55,588 |
distrax._src.bijectors.diag_linear
|
__init__
|
Initializes the bijector.
Args:
diag: a vector of length D, the diagonal of matrix `A`. Can also be a
batch of such vectors.
|
def __init__(self, diag: Array):
"""Initializes the bijector.
Args:
diag: a vector of length D, the diagonal of matrix `A`. Can also be a
batch of such vectors.
"""
if diag.ndim < 1:
raise ValueError("`diag` must have at least one dimension.")
self._bijector = block.Block(
scalar_affine.ScalarAffine(shift=0., scale=diag), ndims=1)
super().__init__(
event_dims=diag.shape[-1],
batch_shape=diag.shape[:-1],
dtype=diag.dtype)
self._diag = diag
self.forward = self._bijector.forward
self.forward_log_det_jacobian = self._bijector.forward_log_det_jacobian
self.inverse = self._bijector.inverse
self.inverse_log_det_jacobian = self._bijector.inverse_log_det_jacobian
self.inverse_and_log_det = self._bijector.inverse_and_log_det
|
(self, diag: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number])
|
55,593 |
distrax._src.bijectors.diag_linear
|
forward_and_log_det
|
Computes y = f(x) and log|det J(f)(x)|.
|
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return self._bijector.forward_and_log_det(x)
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,598 |
distrax._src.bijectors.diag_linear
|
same_as
|
Returns True if this bijector is guaranteed to be the same as `other`.
|
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is DiagLinear: # pylint: disable=unidiomatic-typecheck
return self.diag is other.diag
return False
|
(self, other: distrax._src.bijectors.bijector.Bijector) -> bool
|
55,600 |
distrax._src.bijectors.diag_plus_low_rank_linear
|
DiagPlusLowRankLinear
|
Linear bijector whose weights are a low-rank perturbation of a diagonal.
The bijector is defined as `f(x) = Ax` where `A = S + UV^T` and:
- `S` is a DxD diagonal matrix,
- `U`, `V` are DxK matrices.
When K < D, this bijector is computationally more efficient than an equivalent
`UnconstrainedAffine` bijector.
The Jacobian determinant is computed using the matrix determinant lemma:
det J(x) = det A = det(S) det(I + V^T S^{-1} U)
The matrix `I + V^T S^{-1} U` is KxK instead of DxD, so for K < D computing
its determinant is faster than computing the determinant of `A`.
The inverse is computed using the Woodbury matrix identity:
A^{-1} = (I - S^{-1} U (I + V^T S^{-1} U)^{-1} V^T) S^{-1}
As above, inverting the KxK matrix `I + V^T S^{-1} U` is faster than inverting
`A` when K < D.
The bijector is invertible if and only if both `S` and `I + V^T S^{-1} U` are
invertible matrices. It is the responsibility of the user to make sure that
this is the case; the class will make no attempt to verify that the bijector
is invertible.
|
class DiagPlusLowRankLinear(linear.Linear):
"""Linear bijector whose weights are a low-rank perturbation of a diagonal.
The bijector is defined as `f(x) = Ax` where `A = S + UV^T` and:
- `S` is a DxD diagonal matrix,
- `U`, `V` are DxK matrices.
When K < D, this bijector is computationally more efficient than an equivalent
`UnconstrainedAffine` bijector.
The Jacobian determinant is computed using the matrix determinant lemma:
det J(x) = det A = det(S) det(I + V^T S^{-1} U)
The matrix `I + V^T S^{-1} U` is KxK instead of DxD, so for K < D computing
its determinant is faster than computing the determinant of `A`.
The inverse is computed using the Woodbury matrix identity:
A^{-1} = (I - S^{-1} U (I + V^T S^{-1} U)^{-1} V^T) S^{-1}
As above, inverting the KxK matrix `I + V^T S^{-1} U` is faster than inverting
`A` when K < D.
The bijector is invertible if and only if both `S` and `I + V^T S^{-1} U` are
invertible matrices. It is the responsibility of the user to make sure that
this is the case; the class will make no attempt to verify that the bijector
is invertible.
"""
def __init__(self, diag: Array, u_matrix: Array, v_matrix: Array):
"""Initializes the bijector.
Args:
diag: a vector of length D, the diagonal of matrix `S`. Can also be a
batch of such vectors.
u_matrix: a DxK matrix, the `U` matrix in `A = S + UV^T`. Can also be a
batch of DxK matrices.
v_matrix: a DxK matrix, the `V` matrix in `A = S + UV^T`. Can also be a
batch of DxK matrices.
"""
_check_shapes_are_valid(diag, u_matrix, v_matrix)
# Since `S + UV^T = S (I + WV^T)` where `W = S^{-1}U`, we can implement this
# bijector by composing `_IdentityPlusLowRankLinear` with `DiagLinear`.
id_plus_low_rank_linear = _IdentityPlusLowRankLinear(
u_matrix=u_matrix / diag[..., None],
v_matrix=v_matrix)
self._bijector = chain.Chain(
[diag_linear.DiagLinear(diag), id_plus_low_rank_linear])
batch_shape = jnp.broadcast_shapes(
diag.shape[:-1], u_matrix.shape[:-2], v_matrix.shape[:-2])
dtype = jnp.result_type(diag, u_matrix, v_matrix)
super().__init__(
event_dims=diag.shape[-1], batch_shape=batch_shape, dtype=dtype)
self._diag = diag
self._u_matrix = u_matrix
self._v_matrix = v_matrix
self.forward = self._bijector.forward
self.forward_log_det_jacobian = self._bijector.forward_log_det_jacobian
self.inverse = self._bijector.inverse
self.inverse_log_det_jacobian = self._bijector.inverse_log_det_jacobian
self.inverse_and_log_det = self._bijector.inverse_and_log_det
@property
def diag(self) -> Array:
"""Vector of length D, the diagonal of matrix `S`."""
return self._diag
@property
def u_matrix(self) -> Array:
"""The `U` matrix in `A = S + UV^T`."""
return self._u_matrix
@property
def v_matrix(self) -> Array:
"""The `V` matrix in `A = S + UV^T`."""
return self._v_matrix
@property
def matrix(self) -> Array:
"""The matrix `A = S + UV^T` of the transformation."""
batched = jnp.vectorize(
lambda s, u, v: jnp.diag(s) + u @ v.T,
signature="(d),(d,k),(d,k)->(d,d)")
return batched(self._diag, self._u_matrix, self._v_matrix)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return self._bijector.forward_and_log_det(x)
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is DiagPlusLowRankLinear: # pylint: disable=unidiomatic-typecheck
return all((
self.diag is other.diag,
self.u_matrix is other.u_matrix,
self.v_matrix is other.v_matrix,
))
return False
|
(diag: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], u_matrix: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], v_matrix: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number])
|
55,601 |
distrax._src.bijectors.diag_plus_low_rank_linear
|
__init__
|
Initializes the bijector.
Args:
diag: a vector of length D, the diagonal of matrix `S`. Can also be a
batch of such vectors.
u_matrix: a DxK matrix, the `U` matrix in `A = S + UV^T`. Can also be a
batch of DxK matrices.
v_matrix: a DxK matrix, the `V` matrix in `A = S + UV^T`. Can also be a
batch of DxK matrices.
|
def __init__(self, diag: Array, u_matrix: Array, v_matrix: Array):
"""Initializes the bijector.
Args:
diag: a vector of length D, the diagonal of matrix `S`. Can also be a
batch of such vectors.
u_matrix: a DxK matrix, the `U` matrix in `A = S + UV^T`. Can also be a
batch of DxK matrices.
v_matrix: a DxK matrix, the `V` matrix in `A = S + UV^T`. Can also be a
batch of DxK matrices.
"""
_check_shapes_are_valid(diag, u_matrix, v_matrix)
# Since `S + UV^T = S (I + WV^T)` where `W = S^{-1}U`, we can implement this
# bijector by composing `_IdentityPlusLowRankLinear` with `DiagLinear`.
id_plus_low_rank_linear = _IdentityPlusLowRankLinear(
u_matrix=u_matrix / diag[..., None],
v_matrix=v_matrix)
self._bijector = chain.Chain(
[diag_linear.DiagLinear(diag), id_plus_low_rank_linear])
batch_shape = jnp.broadcast_shapes(
diag.shape[:-1], u_matrix.shape[:-2], v_matrix.shape[:-2])
dtype = jnp.result_type(diag, u_matrix, v_matrix)
super().__init__(
event_dims=diag.shape[-1], batch_shape=batch_shape, dtype=dtype)
self._diag = diag
self._u_matrix = u_matrix
self._v_matrix = v_matrix
self.forward = self._bijector.forward
self.forward_log_det_jacobian = self._bijector.forward_log_det_jacobian
self.inverse = self._bijector.inverse
self.inverse_log_det_jacobian = self._bijector.inverse_log_det_jacobian
self.inverse_and_log_det = self._bijector.inverse_and_log_det
|
(self, diag: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], u_matrix: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], v_matrix: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number])
|
55,611 |
distrax._src.bijectors.diag_plus_low_rank_linear
|
same_as
|
Returns True if this bijector is guaranteed to be the same as `other`.
|
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is DiagPlusLowRankLinear: # pylint: disable=unidiomatic-typecheck
return all((
self.diag is other.diag,
self.u_matrix is other.u_matrix,
self.v_matrix is other.v_matrix,
))
return False
|
(self, other: distrax._src.bijectors.bijector.Bijector) -> bool
|
55,613 |
distrax._src.distributions.dirichlet
|
Dirichlet
|
Dirichlet distribution with concentration parameter `alpha`.
The PDF of a Dirichlet distributed random variable `X`, where `X` lives in the
simplex `(0, 1)^K` with `sum_{k=1}^{K} X_k = 1`, is given by,
```
p(x; alpha) = ( prod_{k=1}^{K} x_k ** (alpha_k - 1) ) / B(alpha)
```
where `B(alpha)` is the multivariate beta function, and the concentration
parameters `alpha_k > 0`.
Note that the support of the distribution does not include `x_k = 0` nor
`x_k = 1`.
|
class Dirichlet(distribution.Distribution):
"""Dirichlet distribution with concentration parameter `alpha`.
The PDF of a Dirichlet distributed random variable `X`, where `X` lives in the
simplex `(0, 1)^K` with `sum_{k=1}^{K} X_k = 1`, is given by,
```
p(x; alpha) = ( prod_{k=1}^{K} x_k ** (alpha_k - 1) ) / B(alpha)
```
where `B(alpha)` is the multivariate beta function, and the concentration
parameters `alpha_k > 0`.
Note that the support of the distribution does not include `x_k = 0` nor
`x_k = 1`.
"""
equiv_tfp_cls = tfd.Dirichlet
def __init__(self, concentration: Array):
"""Initializes a Dirichlet distribution.
Args:
concentration: Concentration parameter `alpha` of the distribution. It
must be an array of length `K >= 2` containing positive values
(additional dimensions index batches).
"""
super().__init__()
self._concentration = conversion.as_float_array(concentration)
if self._concentration.ndim < 1:
raise ValueError(
'The concentration parameter must have at least one dimension.')
if self._concentration.shape[-1] < 2:
raise ValueError(
'The last dimension of the concentration parameter must be '
'at least 2.')
self._log_normalization_constant = math.log_beta_multivariate(
self._concentration)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return self._concentration.shape[-1:]
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return self._concentration.shape[:-1]
@property
def concentration(self) -> Array:
"""Concentration parameter `alpha` of the distribution."""
return self._concentration
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
out_shape = (n,) + self.batch_shape
dtype = self._concentration.dtype
rnd = jax.random.dirichlet(
key, alpha=self._concentration, shape=out_shape, dtype=dtype)
return rnd
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return (jnp.sum((self._concentration - 1.) * jnp.log(value), axis=-1)
- self._log_normalization_constant)
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
sum_concentration = jnp.sum(self._concentration, axis=-1)
return (
self._log_normalization_constant
+ ((sum_concentration - self._concentration.shape[-1])
* jax.lax.digamma(sum_concentration))
- jnp.sum((self._concentration - 1.) *
jax.lax.digamma(self._concentration), axis=-1)
)
def mean(self) -> Array:
"""Calculates the mean."""
return self._concentration / jnp.sum(
self._concentration, axis=-1, keepdims=True)
def mode(self) -> Array:
"""Calculates the mode.
Returns:
The mode, an array of shape `batch_shape + event_shape`. If any
`alpha_k <= 1`, the returned value is `jnp.nan`.
"""
result_if_valid = (self._concentration - 1.) / jnp.sum(
self._concentration - 1., axis=-1, keepdims=True)
return jnp.where(
jnp.all(self._concentration > 1., axis=-1, keepdims=True),
result_if_valid,
jnp.nan)
def variance(self) -> Array:
"""Calculates the variance."""
sum_concentration = jnp.sum(self._concentration, axis=-1, keepdims=True)
norm_concentration = self._concentration / sum_concentration
return norm_concentration * (1. - norm_concentration) / (
sum_concentration + 1.)
def covariance(self) -> Array:
"""Calculates the covariance.
Returns:
An array of shape `batch_shape + event_shape + event_shape` with the
covariance of the distribution.
"""
sum_concentration = jnp.sum(self._concentration, axis=-1, keepdims=True)
norm_concentration = self._concentration / sum_concentration
norm_over_sum = norm_concentration / (sum_concentration + 1.)
cov = - jnp.expand_dims(norm_over_sum, axis=-1) * jnp.expand_dims(
norm_concentration, axis=-2)
cov += jnp.vectorize(jnp.diag, signature='(k)->(k,k)')(norm_over_sum)
return cov
def __getitem__(self, index) -> 'Dirichlet':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Dirichlet(concentration=self.concentration[index])
|
(concentration: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number])
|
55,614 |
distrax._src.distributions.dirichlet
|
__getitem__
|
See `Distribution.__getitem__`.
|
def __getitem__(self, index) -> 'Dirichlet':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Dirichlet(concentration=self.concentration[index])
|
(self, index) -> distrax._src.distributions.dirichlet.Dirichlet
|
55,615 |
distrax._src.distributions.dirichlet
|
__init__
|
Initializes a Dirichlet distribution.
Args:
concentration: Concentration parameter `alpha` of the distribution. It
must be an array of length `K >= 2` containing positive values
(additional dimensions index batches).
|
def __init__(self, concentration: Array):
"""Initializes a Dirichlet distribution.
Args:
concentration: Concentration parameter `alpha` of the distribution. It
must be an array of length `K >= 2` containing positive values
(additional dimensions index batches).
"""
super().__init__()
self._concentration = conversion.as_float_array(concentration)
if self._concentration.ndim < 1:
raise ValueError(
'The concentration parameter must have at least one dimension.')
if self._concentration.shape[-1] < 2:
raise ValueError(
'The last dimension of the concentration parameter must be '
'at least 2.')
self._log_normalization_constant = math.log_beta_multivariate(
self._concentration)
|
(self, concentration: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number])
|
55,618 |
distrax._src.distributions.dirichlet
|
_sample_n
|
See `Distribution._sample_n`.
|
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
out_shape = (n,) + self.batch_shape
dtype = self._concentration.dtype
rnd = jax.random.dirichlet(
key, alpha=self._concentration, shape=out_shape, dtype=dtype)
return rnd
|
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,621 |
distrax._src.distributions.dirichlet
|
covariance
|
Calculates the covariance.
Returns:
An array of shape `batch_shape + event_shape + event_shape` with the
covariance of the distribution.
|
def covariance(self) -> Array:
"""Calculates the covariance.
Returns:
An array of shape `batch_shape + event_shape + event_shape` with the
covariance of the distribution.
"""
sum_concentration = jnp.sum(self._concentration, axis=-1, keepdims=True)
norm_concentration = self._concentration / sum_concentration
norm_over_sum = norm_concentration / (sum_concentration + 1.)
cov = - jnp.expand_dims(norm_over_sum, axis=-1) * jnp.expand_dims(
norm_concentration, axis=-2)
cov += jnp.vectorize(jnp.diag, signature='(k)->(k,k)')(norm_over_sum)
return cov
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,623 |
distrax._src.distributions.dirichlet
|
entropy
|
Calculates the Shannon entropy (in nats).
|
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
sum_concentration = jnp.sum(self._concentration, axis=-1)
return (
self._log_normalization_constant
+ ((sum_concentration - self._concentration.shape[-1])
* jax.lax.digamma(sum_concentration))
- jnp.sum((self._concentration - 1.) *
jax.lax.digamma(self._concentration), axis=-1)
)
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,626 |
distrax._src.distributions.dirichlet
|
log_prob
|
See `Distribution.log_prob`.
|
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return (jnp.sum((self._concentration - 1.) * jnp.log(value), axis=-1)
- self._log_normalization_constant)
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,628 |
distrax._src.distributions.dirichlet
|
mean
|
Calculates the mean.
|
def mean(self) -> Array:
"""Calculates the mean."""
return self._concentration / jnp.sum(
self._concentration, axis=-1, keepdims=True)
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,630 |
distrax._src.distributions.dirichlet
|
mode
|
Calculates the mode.
Returns:
The mode, an array of shape `batch_shape + event_shape`. If any
`alpha_k <= 1`, the returned value is `jnp.nan`.
|
def mode(self) -> Array:
"""Calculates the mode.
Returns:
The mode, an array of shape `batch_shape + event_shape`. If any
`alpha_k <= 1`, the returned value is `jnp.nan`.
"""
result_if_valid = (self._concentration - 1.) / jnp.sum(
self._concentration - 1., axis=-1, keepdims=True)
return jnp.where(
jnp.all(self._concentration > 1., axis=-1, keepdims=True),
result_if_valid,
jnp.nan)
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,637 |
distrax._src.distributions.dirichlet
|
variance
|
Calculates the variance.
|
def variance(self) -> Array:
"""Calculates the variance."""
sum_concentration = jnp.sum(self._concentration, axis=-1, keepdims=True)
norm_concentration = self._concentration / sum_concentration
return norm_concentration * (1. - norm_concentration) / (
sum_concentration + 1.)
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,638 |
distrax._src.distributions.distribution
|
Distribution
|
Jittable abstract base class for all Distrax distributions.
|
class Distribution(
jittable.Jittable, Generic[EventT, ShapeT, DTypeT], metaclass=abc.ABCMeta):
"""Jittable abstract base class for all Distrax distributions."""
@abc.abstractmethod
def _sample_n(self, key: PRNGKey, n: int) -> EventT:
"""Returns `n` samples."""
def _sample_n_and_log_prob(
self,
key: PRNGKey,
n: int,
) -> Tuple[EventT, Array]:
"""Returns `n` samples and their log probs.
By default, it just calls `log_prob` on the generated samples. However, for
many distributions it's more efficient to compute the log prob of samples
than of arbitrary events (for example, there's no need to check that a
sample is within the distribution's domain). If that's the case, a subclass
may override this method with a more efficient implementation.
Args:
key: PRNG key.
n: Number of samples to generate.
Returns:
A tuple of `n` samples and their log probs.
"""
samples = self._sample_n(key, n)
log_prob = self.log_prob(samples)
return samples, log_prob
@abc.abstractmethod
def log_prob(self, value: EventT) -> Array:
"""Calculates the log probability of an event.
Args:
value: An event.
Returns:
The log probability log P(value).
"""
def prob(self, value: EventT) -> Array:
"""Calculates the probability of an event.
Args:
value: An event.
Returns:
The probability P(value).
"""
return jnp.exp(self.log_prob(value))
@property
@abc.abstractmethod
def event_shape(self) -> ShapeT:
"""Shape of event of distribution samples."""
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
sample_spec = jax.eval_shape(
self.sample, seed=jax.random.PRNGKey(0), sample_shape=())
if not self.event_shape:
batch_shapes = jax.tree_util.tree_map(lambda x: x.shape, sample_spec)
else:
batch_shapes = jax.tree_util.tree_map(
lambda s, e: s.shape[:s.ndim - len(e)], sample_spec, self.event_shape)
# Get flat batch shapes.
batch_shapes = jax.tree_util.tree_structure(sample_spec).flatten_up_to(
batch_shapes)
if not batch_shapes:
return ()
# Ensure batch shapes are consistent.
batch_shape = batch_shapes[0]
for i in range(1, len(batch_shapes)):
np.testing.assert_equal(batch_shape, batch_shapes[i])
return batch_shape
@property
def name(self) -> str:
"""Distribution name."""
return type(self).__name__
@property
def dtype(self) -> DTypeT:
"""The data type of the samples generated by the distribution."""
sample_spec = jax.eval_shape(
self.sample, seed=jax.random.PRNGKey(0), sample_shape=())
return jax.tree_util.tree_map(lambda x: x.dtype, sample_spec)
def sample(self,
*,
seed: Union[IntLike, PRNGKey],
sample_shape: Union[IntLike, Sequence[IntLike]] = ()) -> EventT:
"""Samples an event.
Args:
seed: PRNG key or integer seed.
sample_shape: Additional leading dimensions for sample.
Returns:
A sample of shape `sample_shape + self.batch_shape + self.event_shape`.
"""
rng, sample_shape = convert_seed_and_sample_shape(seed, sample_shape)
num_samples = functools.reduce(operator.mul, sample_shape, 1) # product
samples = self._sample_n(rng, num_samples)
return jax.tree_util.tree_map(
lambda t: t.reshape(sample_shape + t.shape[1:]), samples)
def sample_and_log_prob(
self,
*,
seed: Union[IntLike, PRNGKey],
sample_shape: Union[IntLike, Sequence[IntLike]] = ()
) -> Tuple[EventT, Array]:
"""Returns a sample and associated log probability. See `sample`."""
rng, sample_shape = convert_seed_and_sample_shape(seed, sample_shape)
num_samples = functools.reduce(operator.mul, sample_shape, 1) # product
samples, log_prob = self._sample_n_and_log_prob(rng, num_samples)
samples, log_prob = jax.tree_util.tree_map(
lambda t: t.reshape(sample_shape + t.shape[1:]), (samples, log_prob))
return samples, log_prob
def kl_divergence(self, other_dist, **kwargs) -> Array:
"""Calculates the KL divergence to another distribution.
Args:
other_dist: A compatible Distax or TFP Distribution.
**kwargs: Additional kwargs.
Returns:
The KL divergence `KL(self || other_dist)`.
"""
return tfd.kullback_leibler.kl_divergence(self, other_dist, **kwargs)
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `entropy`.')
def log_cdf(self, value: EventT) -> Array:
"""Evaluates the log cumulative distribution function at `value`.
Args:
value: An event.
Returns:
The log CDF evaluated at value, i.e. log P[X <= value].
"""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `log_cdf`.')
def cdf(self, value: EventT) -> Array:
"""Evaluates the cumulative distribution function at `value`.
Args:
value: An event.
Returns:
The CDF evaluated at value, i.e. P[X <= value].
"""
return jnp.exp(self.log_cdf(value))
def survival_function(self, value: EventT) -> Array:
"""Evaluates the survival function at `value`.
Note that by default we use a numerically not necessarily stable definition
of the survival function in terms of the CDF.
More stable definitions should be implemented in subclasses for
distributions for which they exist.
Args:
value: An event.
Returns:
The survival function evaluated at `value`, i.e. P[X > value]
"""
if not self.event_shape:
# Defined for univariate distributions only.
return 1. - self.cdf(value)
else:
raise NotImplementedError('`survival_function` is not defined for '
f'distribution `{self.name}`.')
def log_survival_function(self, value: EventT) -> Array:
"""Evaluates the log of the survival function at `value`.
Note that by default we use a numerically not necessarily stable definition
of the log of the survival function in terms of the CDF.
More stable definitions should be implemented in subclasses for
distributions for which they exist.
Args:
value: An event.
Returns:
The log of the survival function evaluated at `value`, i.e.
log P[X > value]
"""
if not self.event_shape:
# Defined for univariate distributions only.
return jnp.log1p(-self.cdf(value))
else:
raise NotImplementedError('`log_survival_function` is not defined for '
f'distribution `{self.name}`.')
def mean(self) -> EventT:
"""Calculates the mean."""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `mean`.')
def median(self) -> EventT:
"""Calculates the median."""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `median`.')
def variance(self) -> EventT:
"""Calculates the variance."""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `variance`.')
def stddev(self) -> EventT:
"""Calculates the standard deviation."""
return jnp.sqrt(self.variance())
def mode(self) -> EventT:
"""Calculates the mode."""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `mode`.')
def cross_entropy(self, other_dist, **kwargs) -> Array:
"""Calculates the cross entropy to another distribution.
Args:
other_dist: A compatible Distax or TFP Distribution.
**kwargs: Additional kwargs.
Returns:
The cross entropy `H(self || other_dist)`.
"""
return self.kl_divergence(other_dist, **kwargs) + self.entropy()
@contextlib.contextmanager
def _name_and_control_scope(self, *unused_a, **unused_k):
yield
def __getitem__(self, index) -> 'Distribution':
"""Returns a matching distribution obtained by indexing the batch shape.
Args:
index: An object, typically int or slice (or a tuple thereof), used for
indexing the distribution.
"""
raise NotImplementedError(f'Indexing not implemented for `{self.name}`.')
|
(*args, **kwargs)
|
55,639 |
distrax._src.distributions.distribution
|
__getitem__
|
Returns a matching distribution obtained by indexing the batch shape.
Args:
index: An object, typically int or slice (or a tuple thereof), used for
indexing the distribution.
|
def __getitem__(self, index) -> 'Distribution':
"""Returns a matching distribution obtained by indexing the batch shape.
Args:
index: An object, typically int or slice (or a tuple thereof), used for
indexing the distribution.
"""
raise NotImplementedError(f'Indexing not implemented for `{self.name}`.')
|
(self, index) -> distrax._src.distributions.distribution.Distribution
|
55,642 |
distrax._src.distributions.distribution
|
_sample_n
|
Returns `n` samples.
|
@abc.abstractmethod
def _sample_n(self, key: PRNGKey, n: int) -> EventT:
"""Returns `n` samples."""
|
(self, key: jax.Array, n: int) -> ~EventT
|
55,649 |
distrax._src.distributions.distribution
|
log_prob
|
Calculates the log probability of an event.
Args:
value: An event.
Returns:
The log probability log P(value).
|
@abc.abstractmethod
def log_prob(self, value: EventT) -> Array:
"""Calculates the log probability of an event.
Args:
value: An event.
Returns:
The log probability log P(value).
"""
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,661 |
distrax._src.distributions.epsilon_greedy
|
EpsilonGreedy
|
A Categorical that is ε-greedy with respect to some preferences.
Given a set of unnormalized preferences, the distribution is a mixture
of the Greedy and Uniform distribution; with weight (1-ε) and ε, respectively.
|
class EpsilonGreedy(categorical.Categorical):
"""A Categorical that is ε-greedy with respect to some preferences.
Given a set of unnormalized preferences, the distribution is a mixture
of the Greedy and Uniform distribution; with weight (1-ε) and ε, respectively.
"""
def __init__(self,
preferences: Array,
epsilon: float,
dtype: Union[jnp.dtype, type[Any]] = int):
"""Initializes an EpsilonGreedy distribution.
Args:
preferences: Unnormalized preferences.
epsilon: Mixing parameter ε.
dtype: The type of event samples.
"""
self._preferences = jnp.asarray(preferences)
self._epsilon = epsilon
greedy_probs = _argmax_with_random_tie_breaking(self._preferences)
probs = _mix_probs_with_uniform(greedy_probs, epsilon)
super().__init__(probs=probs, dtype=dtype)
@property
def epsilon(self) -> float:
"""Mixing parameters of the distribution."""
return self._epsilon
@property
def preferences(self) -> Array:
"""Unnormalized preferences."""
return self._preferences
def __getitem__(self, index) -> 'EpsilonGreedy':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return EpsilonGreedy(
preferences=self.preferences[index],
epsilon=self.epsilon,
dtype=self.dtype)
|
(preferences: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], epsilon: float, dtype: Union[numpy.dtype, type[Any]] = <class 'int'>)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.