index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
55,881
distrax._src.distributions.laplace
stddev
Calculates the standard deviation.
def stddev(self) -> Array: """Calculates the standard deviation.""" return math.sqrt(2.) * self.scale
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,884
distrax._src.distributions.laplace
variance
Calculates the variance.
def variance(self) -> Array: """Calculates the variance.""" return 2. * jnp.square(self.scale)
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,885
distrax._src.bijectors.linear
Linear
Base class for linear bijectors. This class provides a base class for bijectors defined as `f(x) = Ax`, where `A` is a `DxD` matrix and `x` is a `D`-dimensional vector.
class Linear(base.Bijector, metaclass=abc.ABCMeta): """Base class for linear bijectors. This class provides a base class for bijectors defined as `f(x) = Ax`, where `A` is a `DxD` matrix and `x` is a `D`-dimensional vector. """ def __init__(self, event_dims: int, batch_shape: Sequence[int], dtype: jnp.dtype): """Initializes a `Linear` bijector. Args: event_dims: the dimensionality `D` of the event `x`. It is assumed that `x` is a vector of length `event_dims`. batch_shape: the batch shape of the bijector. dtype: the data type of matrix `A`. """ super().__init__(event_ndims_in=1, is_constant_jacobian=True) self._event_dims = event_dims self._batch_shape = tuple(batch_shape) self._dtype = dtype @property def matrix(self) -> Array: """The matrix `A` of the transformation. To be optionally implemented in a subclass. Returns: An array of shape `batch_shape + (event_dims, event_dims)` and data type `dtype`. """ raise NotImplementedError( f"Linear bijector {self.name} does not implement `matrix`.") @property def event_dims(self) -> int: """The dimensionality `D` of the event `x`.""" return self._event_dims @property def batch_shape(self) -> Tuple[int, ...]: """The batch shape of the bijector.""" return self._batch_shape @property def dtype(self) -> jnp.dtype: """The data type of matrix `A`.""" return self._dtype
(event_dims: int, batch_shape: Sequence[int], dtype: numpy.dtype)
55,886
distrax._src.bijectors.linear
__init__
Initializes a `Linear` bijector. Args: event_dims: the dimensionality `D` of the event `x`. It is assumed that `x` is a vector of length `event_dims`. batch_shape: the batch shape of the bijector. dtype: the data type of matrix `A`.
def __init__(self, event_dims: int, batch_shape: Sequence[int], dtype: jnp.dtype): """Initializes a `Linear` bijector. Args: event_dims: the dimensionality `D` of the event `x`. It is assumed that `x` is a vector of length `event_dims`. batch_shape: the batch shape of the bijector. dtype: the data type of matrix `A`. """ super().__init__(event_ndims_in=1, is_constant_jacobian=True) self._event_dims = event_dims self._batch_shape = tuple(batch_shape) self._dtype = dtype
(self, event_dims: int, batch_shape: Sequence[int], dtype: numpy.dtype)
55,898
distrax._src.distributions.log_stddev_normal
LogStddevNormal
Normal distribution with `log_scale` parameter. The `LogStddevNormal` has three parameters: `loc`, `log_scale`, and (optionally) `max_scale`. The distribution is a univariate normal distribution with mean equal to `loc` and scale parameter (i.e., stddev) equal to `exp(log_scale)` if `max_scale` is None. If `max_scale` is not None, a soft thresholding is applied to obtain the scale parameter of the normal, so that its log is given by `log(max_scale) - softplus(log(max_scale) - log_scale)`.
class LogStddevNormal(normal.Normal): """Normal distribution with `log_scale` parameter. The `LogStddevNormal` has three parameters: `loc`, `log_scale`, and (optionally) `max_scale`. The distribution is a univariate normal distribution with mean equal to `loc` and scale parameter (i.e., stddev) equal to `exp(log_scale)` if `max_scale` is None. If `max_scale` is not None, a soft thresholding is applied to obtain the scale parameter of the normal, so that its log is given by `log(max_scale) - softplus(log(max_scale) - log_scale)`. """ def __init__(self, loc: Numeric, log_scale: Numeric, max_scale: Optional[float] = None): """Initializes a LogStddevNormal distribution. Args: loc: Mean of the distribution. log_scale: Log of the distribution's scale (before the soft thresholding applied when `max_scale` is not None). max_scale: Maximum value of the scale that this distribution will saturate at. This parameter can be useful for numerical stability. It is not a hard maximum; rather, we compute `log(scale)` as per the formula: `log(max_scale) - softplus(log(max_scale) - log_scale)`. """ self._max_scale = max_scale if max_scale is not None: max_log_scale = math.log(max_scale) self._log_scale = max_log_scale - jax.nn.softplus( max_log_scale - conversion.as_float_array(log_scale)) else: self._log_scale = conversion.as_float_array(log_scale) scale = jnp.exp(self._log_scale) super().__init__(loc, scale) @property def log_scale(self) -> Array: """The log standard deviation (after thresholding, if applicable).""" return jnp.broadcast_to(self._log_scale, self.batch_shape) def __getitem__(self, index) -> 'LogStddevNormal': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return LogStddevNormal( loc=self.loc[index], log_scale=self.log_scale[index], max_scale=self._max_scale)
(loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], log_scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], max_scale: Optional[float] = None)
55,899
distrax._src.distributions.log_stddev_normal
__getitem__
See `Distribution.__getitem__`.
def __getitem__(self, index) -> 'LogStddevNormal': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return LogStddevNormal( loc=self.loc[index], log_scale=self.log_scale[index], max_scale=self._max_scale)
(self, index) -> distrax._src.distributions.log_stddev_normal.LogStddevNormal
55,900
distrax._src.distributions.log_stddev_normal
__init__
Initializes a LogStddevNormal distribution. Args: loc: Mean of the distribution. log_scale: Log of the distribution's scale (before the soft thresholding applied when `max_scale` is not None). max_scale: Maximum value of the scale that this distribution will saturate at. This parameter can be useful for numerical stability. It is not a hard maximum; rather, we compute `log(scale)` as per the formula: `log(max_scale) - softplus(log(max_scale) - log_scale)`.
def __init__(self, loc: Numeric, log_scale: Numeric, max_scale: Optional[float] = None): """Initializes a LogStddevNormal distribution. Args: loc: Mean of the distribution. log_scale: Log of the distribution's scale (before the soft thresholding applied when `max_scale` is not None). max_scale: Maximum value of the scale that this distribution will saturate at. This parameter can be useful for numerical stability. It is not a hard maximum; rather, we compute `log(scale)` as per the formula: `log(max_scale) - softplus(log(max_scale) - log_scale)`. """ self._max_scale = max_scale if max_scale is not None: max_log_scale = math.log(max_scale) self._log_scale = max_log_scale - jax.nn.softplus( max_log_scale - conversion.as_float_array(log_scale)) else: self._log_scale = conversion.as_float_array(log_scale) scale = jnp.exp(self._log_scale) super().__init__(loc, scale)
(self, loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], log_scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], max_scale: Optional[float] = None)
55,903
distrax._src.distributions.normal
_sample_from_std_normal
null
def _sample_from_std_normal(self, key: PRNGKey, n: int) -> Array: out_shape = (n,) + self.batch_shape dtype = jnp.result_type(self._loc, self._scale) return jax.random.normal(key, shape=out_shape, dtype=dtype)
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,904
distrax._src.distributions.normal
_sample_n
See `Distribution._sample_n`.
def _sample_n(self, key: PRNGKey, n: int) -> Array: """See `Distribution._sample_n`.""" rnd = self._sample_from_std_normal(key, n) scale = jnp.expand_dims(self._scale, range(rnd.ndim - self._scale.ndim)) loc = jnp.expand_dims(self._loc, range(rnd.ndim - self._loc.ndim)) return scale * rnd + loc
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,905
distrax._src.distributions.normal
_sample_n_and_log_prob
See `Distribution._sample_n_and_log_prob`.
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]: """See `Distribution._sample_n_and_log_prob`.""" rnd = self._sample_from_std_normal(key, n) samples = self._scale * rnd + self._loc log_prob = -0.5 * jnp.square(rnd) - _half_log2pi - jnp.log(self._scale) return samples, log_prob
(self, key: jax.Array, n: int) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
55,906
distrax._src.distributions.normal
_standardize
null
def _standardize(self, value: EventT) -> Array: return (value - self._loc) / self._scale
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,907
distrax._src.distributions.normal
cdf
See `Distribution.cdf`.
def cdf(self, value: EventT) -> Array: """See `Distribution.cdf`.""" return jax.scipy.special.ndtr(self._standardize(value))
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,909
distrax._src.distributions.normal
entropy
Calculates the Shannon entropy (in nats).
def entropy(self) -> Array: """Calculates the Shannon entropy (in nats).""" log_normalization = _half_log2pi + jnp.log(self.scale) entropy = 0.5 + log_normalization return entropy
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,911
distrax._src.distributions.normal
log_cdf
See `Distribution.log_cdf`.
def log_cdf(self, value: EventT) -> Array: """See `Distribution.log_cdf`.""" return jax.scipy.special.log_ndtr(self._standardize(value))
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,912
distrax._src.distributions.normal
log_prob
See `Distribution.log_prob`.
def log_prob(self, value: EventT) -> Array: """See `Distribution.log_prob`.""" log_unnormalized = -0.5 * jnp.square(self._standardize(value)) log_normalization = _half_log2pi + jnp.log(self._scale) return log_unnormalized - log_normalization
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,913
distrax._src.distributions.normal
log_survival_function
See `Distribution.log_survival_function`.
def log_survival_function(self, value: EventT) -> Array: """See `Distribution.log_survival_function`.""" return jax.scipy.special.log_ndtr(-self._standardize(value))
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,920
distrax._src.distributions.normal
stddev
Calculates the standard deviation.
def stddev(self) -> Array: """Calculates the standard deviation.""" return self.scale
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,921
distrax._src.distributions.normal
survival_function
See `Distribution.survival_function`.
def survival_function(self, value: EventT) -> Array: """See `Distribution.survival_function`.""" return jax.scipy.special.ndtr(-self._standardize(value))
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,923
distrax._src.distributions.normal
variance
Calculates the variance.
def variance(self) -> Array: """Calculates the variance.""" return jnp.square(self.scale)
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,924
distrax._src.distributions.logistic
Logistic
The Logistic distribution with location `loc` and `scale` parameters.
class Logistic(distribution.Distribution): """The Logistic distribution with location `loc` and `scale` parameters.""" equiv_tfp_cls = tfd.Logistic def __init__(self, loc: Numeric, scale: Numeric) -> None: """Initializes a Logistic distribution. Args: loc: Mean of the distribution. scale: Spread of the distribution. """ super().__init__() self._loc = conversion.as_float_array(loc) self._scale = conversion.as_float_array(scale) self._batch_shape = jax.lax.broadcast_shapes( self._loc.shape, self._scale.shape) @property def event_shape(self) -> Tuple[int, ...]: """Shape of event of distribution samples.""" return () @property def batch_shape(self) -> Tuple[int, ...]: """Shape of batch of distribution samples.""" return self._batch_shape @property def loc(self) -> Array: """Mean of the distribution.""" return jnp.broadcast_to(self._loc, self.batch_shape) @property def scale(self) -> Array: """Spread of the distribution.""" return jnp.broadcast_to(self._scale, self.batch_shape) def _standardize(self, x: Array) -> Array: return (x - self.loc) / self.scale def _sample_n(self, key: PRNGKey, n: int) -> Array: """See `Distribution._sample_n`.""" out_shape = (n,) + self.batch_shape dtype = jnp.result_type(self._loc, self._scale) uniform = jax.random.uniform( key, shape=out_shape, dtype=dtype, minval=jnp.finfo(dtype).tiny, maxval=1.) rnd = jnp.log(uniform) - jnp.log1p(-uniform) return self._scale * rnd + self._loc def log_prob(self, value: EventT) -> Array: """See `Distribution.log_prob`.""" z = self._standardize(value) return -z - 2. * jax.nn.softplus(-z) - jnp.log(self._scale) def entropy(self) -> Array: """Calculates the Shannon entropy (in Nats).""" return 2. + jnp.broadcast_to(jnp.log(self._scale), self.batch_shape) def cdf(self, value: EventT) -> Array: """See `Distribution.cdf`.""" return jax.nn.sigmoid(self._standardize(value)) def log_cdf(self, value: EventT) -> Array: """See `Distribution.log_cdf`.""" return -jax.nn.softplus(-self._standardize(value)) def survival_function(self, value: EventT) -> Array: """See `Distribution.survival_function`.""" return jax.nn.sigmoid(-self._standardize(value)) def log_survival_function(self, value: EventT) -> Array: """See `Distribution.log_survival_function`.""" return -jax.nn.softplus(self._standardize(value)) def mean(self) -> Array: """Calculates the mean.""" return self.loc def variance(self) -> Array: """Calculates the variance.""" return jnp.square(self.scale * jnp.pi) / 3. def stddev(self) -> Array: """Calculates the standard deviation.""" return self.scale * jnp.pi / jnp.sqrt(3.) def mode(self) -> Array: """Calculates the mode.""" return self.mean() def median(self) -> Array: """Calculates the median.""" return self.mean() def __getitem__(self, index) -> 'Logistic': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return Logistic(loc=self.loc[index], scale=self.scale[index])
(loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int]) -> None
55,925
distrax._src.distributions.logistic
__getitem__
See `Distribution.__getitem__`.
def __getitem__(self, index) -> 'Logistic': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return Logistic(loc=self.loc[index], scale=self.scale[index])
(self, index) -> distrax._src.distributions.logistic.Logistic
55,926
distrax._src.distributions.logistic
__init__
Initializes a Logistic distribution. Args: loc: Mean of the distribution. scale: Spread of the distribution.
def __init__(self, loc: Numeric, scale: Numeric) -> None: """Initializes a Logistic distribution. Args: loc: Mean of the distribution. scale: Spread of the distribution. """ super().__init__() self._loc = conversion.as_float_array(loc) self._scale = conversion.as_float_array(scale) self._batch_shape = jax.lax.broadcast_shapes( self._loc.shape, self._scale.shape)
(self, loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int]) -> NoneType
55,929
distrax._src.distributions.logistic
_sample_n
See `Distribution._sample_n`.
def _sample_n(self, key: PRNGKey, n: int) -> Array: """See `Distribution._sample_n`.""" out_shape = (n,) + self.batch_shape dtype = jnp.result_type(self._loc, self._scale) uniform = jax.random.uniform( key, shape=out_shape, dtype=dtype, minval=jnp.finfo(dtype).tiny, maxval=1.) rnd = jnp.log(uniform) - jnp.log1p(-uniform) return self._scale * rnd + self._loc
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,931
distrax._src.distributions.logistic
_standardize
null
def _standardize(self, x: Array) -> Array: return (x - self.loc) / self.scale
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,932
distrax._src.distributions.logistic
cdf
See `Distribution.cdf`.
def cdf(self, value: EventT) -> Array: """See `Distribution.cdf`.""" return jax.nn.sigmoid(self._standardize(value))
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,934
distrax._src.distributions.logistic
entropy
Calculates the Shannon entropy (in Nats).
def entropy(self) -> Array: """Calculates the Shannon entropy (in Nats).""" return 2. + jnp.broadcast_to(jnp.log(self._scale), self.batch_shape)
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,936
distrax._src.distributions.logistic
log_cdf
See `Distribution.log_cdf`.
def log_cdf(self, value: EventT) -> Array: """See `Distribution.log_cdf`.""" return -jax.nn.softplus(-self._standardize(value))
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,937
distrax._src.distributions.logistic
log_prob
See `Distribution.log_prob`.
def log_prob(self, value: EventT) -> Array: """See `Distribution.log_prob`.""" z = self._standardize(value) return -z - 2. * jax.nn.softplus(-z) - jnp.log(self._scale)
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,938
distrax._src.distributions.logistic
log_survival_function
See `Distribution.log_survival_function`.
def log_survival_function(self, value: EventT) -> Array: """See `Distribution.log_survival_function`.""" return -jax.nn.softplus(self._standardize(value))
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,945
distrax._src.distributions.logistic
stddev
Calculates the standard deviation.
def stddev(self) -> Array: """Calculates the standard deviation.""" return self.scale * jnp.pi / jnp.sqrt(3.)
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,946
distrax._src.distributions.logistic
survival_function
See `Distribution.survival_function`.
def survival_function(self, value: EventT) -> Array: """See `Distribution.survival_function`.""" return jax.nn.sigmoid(-self._standardize(value))
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,948
distrax._src.distributions.logistic
variance
Calculates the variance.
def variance(self) -> Array: """Calculates the variance.""" return jnp.square(self.scale * jnp.pi) / 3.
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,949
distrax._src.bijectors.lower_upper_triangular_affine
LowerUpperTriangularAffine
An affine bijector whose weight matrix is parameterized as A = LU. This bijector is defined as `f(x) = Ax + b` where: * A = LU is a DxD matrix. * L is a lower-triangular matrix with ones on the diagonal. * U is an upper-triangular matrix. The Jacobian determinant can be computed in O(D) as follows: log|det J(x)| = log|det A| = sum(log|diag(U)|) The inverse can be computed in O(D^2) by solving two triangular systems: * Lz = y - b * Ux = z The bijector is invertible if and only if all diagonal elements of U are non-zero. It is the responsibility of the user to make sure that this is the case; the class will make no attempt to verify that the bijector is invertible. L and U are parameterized using a square matrix M as follows: * The lower-triangular part of M (excluding the diagonal) becomes L. * The upper-triangular part of M (including the diagonal) becomes U. The parameterization is such that if M is the identity, LU is also the identity. Note however that M is not generally equal to LU.
class LowerUpperTriangularAffine(chain.Chain): """An affine bijector whose weight matrix is parameterized as A = LU. This bijector is defined as `f(x) = Ax + b` where: * A = LU is a DxD matrix. * L is a lower-triangular matrix with ones on the diagonal. * U is an upper-triangular matrix. The Jacobian determinant can be computed in O(D) as follows: log|det J(x)| = log|det A| = sum(log|diag(U)|) The inverse can be computed in O(D^2) by solving two triangular systems: * Lz = y - b * Ux = z The bijector is invertible if and only if all diagonal elements of U are non-zero. It is the responsibility of the user to make sure that this is the case; the class will make no attempt to verify that the bijector is invertible. L and U are parameterized using a square matrix M as follows: * The lower-triangular part of M (excluding the diagonal) becomes L. * The upper-triangular part of M (including the diagonal) becomes U. The parameterization is such that if M is the identity, LU is also the identity. Note however that M is not generally equal to LU. """ def __init__(self, matrix: Array, bias: Array): """Initializes a `LowerUpperTriangularAffine` bijector. Args: matrix: a square matrix parameterizing `L` and `U` as described in the class docstring. Can also be a batch of matrices. If `matrix` is the identity, `LU` is also the identity. Note however that `matrix` is generally not equal to the product `LU`. bias: the vector `b` in `LUx + b`. Can also be a batch of vectors. """ unconstrained_affine.check_affine_parameters(matrix, bias) self._upper = triangular_linear.TriangularLinear(matrix, is_lower=False) dim = matrix.shape[-1] lower = jnp.eye(dim) + jnp.tril(matrix, -1) # Replace diagonal with ones. self._lower = triangular_linear.TriangularLinear(lower, is_lower=True) self._shift = block.Block(shift.Shift(bias), 1) self._bias = bias super().__init__([self._shift, self._lower, self._upper]) @property def lower(self) -> Array: """The lower triangular matrix `L` with ones in the diagonal.""" return self._lower.matrix @property def upper(self) -> Array: """The upper triangular matrix `U`.""" return self._upper.matrix @property def matrix(self) -> Array: """The matrix `A = LU` of the transformation.""" return self.lower @ self.upper @property def bias(self) -> Array: """The shift `b` of the transformation.""" return self._bias def same_as(self, other: base.Bijector) -> bool: """Returns True if this bijector is guaranteed to be the same as `other`.""" if type(other) is LowerUpperTriangularAffine: # pylint: disable=unidiomatic-typecheck return all(( self.lower is other.lower, self.upper is other.upper, self.bias is other.bias, )) return False
(matrix: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], bias: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number])
55,950
distrax._src.bijectors.lower_upper_triangular_affine
__init__
Initializes a `LowerUpperTriangularAffine` bijector. Args: matrix: a square matrix parameterizing `L` and `U` as described in the class docstring. Can also be a batch of matrices. If `matrix` is the identity, `LU` is also the identity. Note however that `matrix` is generally not equal to the product `LU`. bias: the vector `b` in `LUx + b`. Can also be a batch of vectors.
def __init__(self, matrix: Array, bias: Array): """Initializes a `LowerUpperTriangularAffine` bijector. Args: matrix: a square matrix parameterizing `L` and `U` as described in the class docstring. Can also be a batch of matrices. If `matrix` is the identity, `LU` is also the identity. Note however that `matrix` is generally not equal to the product `LU`. bias: the vector `b` in `LUx + b`. Can also be a batch of vectors. """ unconstrained_affine.check_affine_parameters(matrix, bias) self._upper = triangular_linear.TriangularLinear(matrix, is_lower=False) dim = matrix.shape[-1] lower = jnp.eye(dim) + jnp.tril(matrix, -1) # Replace diagonal with ones. self._lower = triangular_linear.TriangularLinear(lower, is_lower=True) self._shift = block.Block(shift.Shift(bias), 1) self._bias = bias super().__init__([self._shift, self._lower, self._upper])
(self, matrix: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], bias: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number])
55,960
distrax._src.bijectors.lower_upper_triangular_affine
same_as
Returns True if this bijector is guaranteed to be the same as `other`.
def same_as(self, other: base.Bijector) -> bool: """Returns True if this bijector is guaranteed to be the same as `other`.""" if type(other) is LowerUpperTriangularAffine: # pylint: disable=unidiomatic-typecheck return all(( self.lower is other.lower, self.upper is other.upper, self.bias is other.bias, )) return False
(self, other: distrax._src.bijectors.bijector.Bijector) -> bool
55,962
distrax._src.bijectors.masked_coupling
MaskedCoupling
Coupling bijector that uses a mask to specify which inputs are transformed. This coupling bijector takes in a boolean mask that indicates which inputs are transformed. Inputs where `mask==True` remain unchanged. Inputs where `mask==False` are transformed by an inner bijector, conditioned on the masked inputs. The number of event dimensions this bijector operates on is referred to as `event_ndims`, and is equal to both `event_ndims_in` and `event_ndims_out`. By default, `event_ndims` is equal to `mask.ndim + inner_event_ndims`. The user can override this by passing an explicit value for `event_ndims`. If `event_ndims > mask.ndim + inner_event_ndims`, the mask is broadcast to the extra dimensions. If `event_ndims < mask.ndims + inner_event_ndims`, the mask is assumed to be a batch of masks that will broadcast against the input. Let `f` be a conditional bijector (the inner bijector), `g` be a function (the conditioner), and `m` be a boolean mask interpreted numerically, such that True is 1 and False is 0. The masked coupling bijector is defined as follows: - Forward: `y = (1-m) * f(x; g(m*x)) + m*x` - Forward Jacobian log determinant: `log|det J(x)| = sum((1-m) * log|df/dx(x; g(m*x))|)` - Inverse: `x = (1-m) * f^{-1}(y; g(m*y)) + m*y` - Inverse Jacobian log determinant: `log|det J(y)| = sum((1-m) * log|df^{-1}/dy(y; g(m*y))|)`
class MaskedCoupling(base.Bijector): """Coupling bijector that uses a mask to specify which inputs are transformed. This coupling bijector takes in a boolean mask that indicates which inputs are transformed. Inputs where `mask==True` remain unchanged. Inputs where `mask==False` are transformed by an inner bijector, conditioned on the masked inputs. The number of event dimensions this bijector operates on is referred to as `event_ndims`, and is equal to both `event_ndims_in` and `event_ndims_out`. By default, `event_ndims` is equal to `mask.ndim + inner_event_ndims`. The user can override this by passing an explicit value for `event_ndims`. If `event_ndims > mask.ndim + inner_event_ndims`, the mask is broadcast to the extra dimensions. If `event_ndims < mask.ndims + inner_event_ndims`, the mask is assumed to be a batch of masks that will broadcast against the input. Let `f` be a conditional bijector (the inner bijector), `g` be a function (the conditioner), and `m` be a boolean mask interpreted numerically, such that True is 1 and False is 0. The masked coupling bijector is defined as follows: - Forward: `y = (1-m) * f(x; g(m*x)) + m*x` - Forward Jacobian log determinant: `log|det J(x)| = sum((1-m) * log|df/dx(x; g(m*x))|)` - Inverse: `x = (1-m) * f^{-1}(y; g(m*y)) + m*y` - Inverse Jacobian log determinant: `log|det J(y)| = sum((1-m) * log|df^{-1}/dy(y; g(m*y))|)` """ def __init__(self, mask: Array, conditioner: Callable[[Array], BijectorParams], bijector: Callable[[BijectorParams], base.BijectorLike], event_ndims: Optional[int] = None, inner_event_ndims: int = 0): """Initializes a MaskedCoupling bijector. Args: mask: the mask, or a batch of masks. Its elements must be boolean; a value of True indicates that the corresponding input remains unchanged, and a value of False indicates that the corresponding input is transformed. The mask should have `mask.ndim` equal to the number of batch dimensions plus `event_ndims - inner_event_ndims`. In particular, an inner event is either fully masked or fully un-masked: it is not possible to be partially masked. conditioner: a function that computes the parameters of the inner bijector as a function of the masked input. The output of the conditioner will be passed to `bijector` in order to obtain the inner bijector. bijector: a callable that returns the inner bijector that will be used to transform the input. The input to `bijector` is a set of parameters that can be used to configure the inner bijector. The `event_ndims_in` and `event_ndims_out` of this bijector must match the `inner_event_dims`. For example, if `inner_event_dims` is `0`, then the inner bijector must be a scalar bijector. event_ndims: the number of array dimensions the bijector operates on. If None, it defaults to `mask.ndim + inner_event_ndims`. Both `event_ndims_in` and `event_ndims_out` are equal to `event_ndims`. Note that `event_ndims` should be at least as large as `inner_event_ndims`. inner_event_ndims: the number of array dimensions the inner bijector operates on. This is `0` by default, meaning the inner bijector acts on scalars. """ if mask.dtype != bool: raise ValueError(f'`mask` must have values of type `bool`; got values of' f' type `{mask.dtype}`.') if event_ndims is not None and event_ndims < inner_event_ndims: raise ValueError(f'`event_ndims={event_ndims}` should be at least as' f' large as `inner_event_ndims={inner_event_ndims}`.') self._mask = mask self._event_mask = jnp.reshape(mask, mask.shape + (1,) * inner_event_ndims) self._conditioner = conditioner self._bijector = bijector self._inner_event_ndims = inner_event_ndims if event_ndims is None: self._event_ndims = mask.ndim + inner_event_ndims else: self._event_ndims = event_ndims super().__init__(event_ndims_in=self._event_ndims) @property def bijector(self) -> Callable[[BijectorParams], base.BijectorLike]: """The callable that returns the inner bijector of `MaskedCoupling`.""" return self._bijector @property def conditioner(self) -> Callable[[Array], BijectorParams]: """The conditioner function.""" return self._conditioner @property def mask(self) -> Array: """The mask characterizing the `MaskedCoupling`, with boolean `dtype`.""" return self._mask def _inner_bijector(self, params: BijectorParams) -> base.Bijector: bijector = conversion.as_bijector(self._bijector(params)) if (bijector.event_ndims_in != self._inner_event_ndims or bijector.event_ndims_out != self._inner_event_ndims): raise ValueError( 'The inner bijector event ndims in and out must match the' f' `inner_event_ndims={self._inner_event_ndims}`. Instead, got' f' `event_ndims_in={bijector.event_ndims_in}` and' f' `event_ndims_out={bijector.event_ndims_out}`.') return bijector def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]: """Computes y = f(x) and log|det J(f)(x)|.""" self._check_forward_input_shape(x) masked_x = jnp.where(self._event_mask, x, 0.) params = self._conditioner(masked_x) y0, log_d = self._inner_bijector(params).forward_and_log_det(x) y = jnp.where(self._event_mask, x, y0) logdet = math.sum_last( jnp.where(self._mask, 0., log_d), self._event_ndims - self._inner_event_ndims) return y, logdet def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]: """Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.""" self._check_inverse_input_shape(y) masked_y = jnp.where(self._event_mask, y, 0.) params = self._conditioner(masked_y) x0, log_d = self._inner_bijector(params).inverse_and_log_det(y) x = jnp.where(self._event_mask, y, x0) logdet = math.sum_last(jnp.where(self._mask, 0., log_d), self._event_ndims - self._inner_event_ndims) return x, logdet
(mask: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], conditioner: Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Any], bijector: Callable[[Any], Union[distrax._src.bijectors.bijector.Bijector, tensorflow_probability.substrates.jax.bijectors.bijector.Bijector, Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]]], event_ndims: Optional[int] = None, inner_event_ndims: int = 0)
55,963
distrax._src.bijectors.masked_coupling
__init__
Initializes a MaskedCoupling bijector. Args: mask: the mask, or a batch of masks. Its elements must be boolean; a value of True indicates that the corresponding input remains unchanged, and a value of False indicates that the corresponding input is transformed. The mask should have `mask.ndim` equal to the number of batch dimensions plus `event_ndims - inner_event_ndims`. In particular, an inner event is either fully masked or fully un-masked: it is not possible to be partially masked. conditioner: a function that computes the parameters of the inner bijector as a function of the masked input. The output of the conditioner will be passed to `bijector` in order to obtain the inner bijector. bijector: a callable that returns the inner bijector that will be used to transform the input. The input to `bijector` is a set of parameters that can be used to configure the inner bijector. The `event_ndims_in` and `event_ndims_out` of this bijector must match the `inner_event_dims`. For example, if `inner_event_dims` is `0`, then the inner bijector must be a scalar bijector. event_ndims: the number of array dimensions the bijector operates on. If None, it defaults to `mask.ndim + inner_event_ndims`. Both `event_ndims_in` and `event_ndims_out` are equal to `event_ndims`. Note that `event_ndims` should be at least as large as `inner_event_ndims`. inner_event_ndims: the number of array dimensions the inner bijector operates on. This is `0` by default, meaning the inner bijector acts on scalars.
def __init__(self, mask: Array, conditioner: Callable[[Array], BijectorParams], bijector: Callable[[BijectorParams], base.BijectorLike], event_ndims: Optional[int] = None, inner_event_ndims: int = 0): """Initializes a MaskedCoupling bijector. Args: mask: the mask, or a batch of masks. Its elements must be boolean; a value of True indicates that the corresponding input remains unchanged, and a value of False indicates that the corresponding input is transformed. The mask should have `mask.ndim` equal to the number of batch dimensions plus `event_ndims - inner_event_ndims`. In particular, an inner event is either fully masked or fully un-masked: it is not possible to be partially masked. conditioner: a function that computes the parameters of the inner bijector as a function of the masked input. The output of the conditioner will be passed to `bijector` in order to obtain the inner bijector. bijector: a callable that returns the inner bijector that will be used to transform the input. The input to `bijector` is a set of parameters that can be used to configure the inner bijector. The `event_ndims_in` and `event_ndims_out` of this bijector must match the `inner_event_dims`. For example, if `inner_event_dims` is `0`, then the inner bijector must be a scalar bijector. event_ndims: the number of array dimensions the bijector operates on. If None, it defaults to `mask.ndim + inner_event_ndims`. Both `event_ndims_in` and `event_ndims_out` are equal to `event_ndims`. Note that `event_ndims` should be at least as large as `inner_event_ndims`. inner_event_ndims: the number of array dimensions the inner bijector operates on. This is `0` by default, meaning the inner bijector acts on scalars. """ if mask.dtype != bool: raise ValueError(f'`mask` must have values of type `bool`; got values of' f' type `{mask.dtype}`.') if event_ndims is not None and event_ndims < inner_event_ndims: raise ValueError(f'`event_ndims={event_ndims}` should be at least as' f' large as `inner_event_ndims={inner_event_ndims}`.') self._mask = mask self._event_mask = jnp.reshape(mask, mask.shape + (1,) * inner_event_ndims) self._conditioner = conditioner self._bijector = bijector self._inner_event_ndims = inner_event_ndims if event_ndims is None: self._event_ndims = mask.ndim + inner_event_ndims else: self._event_ndims = event_ndims super().__init__(event_ndims_in=self._event_ndims)
(self, mask: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], conditioner: Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Any], bijector: Callable[[Any], Union[distrax._src.bijectors.bijector.Bijector, tensorflow_probability.substrates.jax.bijectors.bijector.Bijector, Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]]], event_ndims: Optional[int] = None, inner_event_ndims: int = 0)
55,967
distrax._src.bijectors.masked_coupling
_inner_bijector
null
def _inner_bijector(self, params: BijectorParams) -> base.Bijector: bijector = conversion.as_bijector(self._bijector(params)) if (bijector.event_ndims_in != self._inner_event_ndims or bijector.event_ndims_out != self._inner_event_ndims): raise ValueError( 'The inner bijector event ndims in and out must match the' f' `inner_event_ndims={self._inner_event_ndims}`. Instead, got' f' `event_ndims_in={bijector.event_ndims_in}` and' f' `event_ndims_out={bijector.event_ndims_out}`.') return bijector
(self, params: Any) -> distrax._src.bijectors.bijector.Bijector
55,969
distrax._src.bijectors.masked_coupling
forward_and_log_det
Computes y = f(x) and log|det J(f)(x)|.
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]: """Computes y = f(x) and log|det J(f)(x)|.""" self._check_forward_input_shape(x) masked_x = jnp.where(self._event_mask, x, 0.) params = self._conditioner(masked_x) y0, log_d = self._inner_bijector(params).forward_and_log_det(x) y = jnp.where(self._event_mask, x, y0) logdet = math.sum_last( jnp.where(self._mask, 0., log_d), self._event_ndims - self._inner_event_ndims) return y, logdet
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
55,972
distrax._src.bijectors.masked_coupling
inverse_and_log_det
Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]: """Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.""" self._check_inverse_input_shape(y) masked_y = jnp.where(self._event_mask, y, 0.) params = self._conditioner(masked_y) x0, log_d = self._inner_bijector(params).inverse_and_log_det(y) x = jnp.where(self._event_mask, y, x0) logdet = math.sum_last(jnp.where(self._mask, 0., log_d), self._event_ndims - self._inner_event_ndims) return x, logdet
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
55,976
distrax._src.distributions.mixture_of_two
MixtureOfTwo
A mixture of two distributions.
class MixtureOfTwo(base_distribution.Distribution): """A mixture of two distributions.""" def __init__( self, prob_a: Numeric, component_a: DistributionLike, component_b: DistributionLike): """Creates a mixture of two distributions. Differently from `MixtureSameFamily` the component distributions are allowed to belong to different families. Args: prob_a: a scalar weight for the `component_a`, is a float or a rank 0 vector. component_a: the first component distribution. component_b: the second component distribution. """ super().__init__() # Validate inputs. chex.assert_rank(prob_a, 0) if component_a.event_shape != component_b.event_shape: raise ValueError( f'The component distributions must have the same event shape, but ' f'{component_a.event_shape} != {component_b.event_shape}.') if component_a.batch_shape != component_b.batch_shape: raise ValueError( f'The component distributions must have the same batch shape, but ' f'{component_a.batch_shape} != {component_b.batch_shape}.') if component_a.dtype != component_b.dtype: raise ValueError( 'The component distributions must have the same dtype, but' f' {component_a.dtype} != {component_b.dtype}.') # Store args. self._prob_a = prob_a self._component_a = conversion.as_distribution(component_a) self._component_b = conversion.as_distribution(component_b) def _sample_n(self, key: PRNGKey, n: int) -> Array: """See `Distribution._sample_n`.""" key, key_a, key_b, mask_key = jax.random.split(key, num=4) mask_from_a = jax.random.bernoulli(mask_key, p=self._prob_a, shape=[n]) sample_a = self._component_a.sample(seed=key_a, sample_shape=n) sample_b = self._component_b.sample(seed=key_b, sample_shape=n) mask_from_a = jnp.expand_dims(mask_from_a, tuple(range(1, sample_a.ndim))) return jnp.where(mask_from_a, sample_a, sample_b) def log_prob(self, value: EventT) -> Array: """See `Distribution.log_prob`.""" logp1 = jnp.log(self._prob_a) + self._component_a.log_prob(value) logp2 = jnp.log(1 - self._prob_a) + self._component_b.log_prob(value) return jnp.logaddexp(logp1, logp2) @property def event_shape(self) -> Tuple[int, ...]: return self._component_a.event_shape @property def batch_shape(self) -> Tuple[int, ...]: return self._component_a.batch_shape @property def prob_a(self) -> Numeric: return self._prob_a @property def prob_b(self) -> Numeric: return 1. - self._prob_a def __getitem__(self, index) -> 'MixtureOfTwo': """See `Distribution.__getitem__`.""" index = base_distribution.to_batch_shape_index(self.batch_shape, index) return MixtureOfTwo( prob_a=self.prob_a, component_a=self._component_a[index], component_b=self._component_b[index])
(prob_a: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], component_a: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution], component_b: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution])
55,977
distrax._src.distributions.mixture_of_two
__getitem__
See `Distribution.__getitem__`.
def __getitem__(self, index) -> 'MixtureOfTwo': """See `Distribution.__getitem__`.""" index = base_distribution.to_batch_shape_index(self.batch_shape, index) return MixtureOfTwo( prob_a=self.prob_a, component_a=self._component_a[index], component_b=self._component_b[index])
(self, index) -> distrax._src.distributions.mixture_of_two.MixtureOfTwo
55,978
distrax._src.distributions.mixture_of_two
__init__
Creates a mixture of two distributions. Differently from `MixtureSameFamily` the component distributions are allowed to belong to different families. Args: prob_a: a scalar weight for the `component_a`, is a float or a rank 0 vector. component_a: the first component distribution. component_b: the second component distribution.
def __init__( self, prob_a: Numeric, component_a: DistributionLike, component_b: DistributionLike): """Creates a mixture of two distributions. Differently from `MixtureSameFamily` the component distributions are allowed to belong to different families. Args: prob_a: a scalar weight for the `component_a`, is a float or a rank 0 vector. component_a: the first component distribution. component_b: the second component distribution. """ super().__init__() # Validate inputs. chex.assert_rank(prob_a, 0) if component_a.event_shape != component_b.event_shape: raise ValueError( f'The component distributions must have the same event shape, but ' f'{component_a.event_shape} != {component_b.event_shape}.') if component_a.batch_shape != component_b.batch_shape: raise ValueError( f'The component distributions must have the same batch shape, but ' f'{component_a.batch_shape} != {component_b.batch_shape}.') if component_a.dtype != component_b.dtype: raise ValueError( 'The component distributions must have the same dtype, but' f' {component_a.dtype} != {component_b.dtype}.') # Store args. self._prob_a = prob_a self._component_a = conversion.as_distribution(component_a) self._component_b = conversion.as_distribution(component_b)
(self, prob_a: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], component_a: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution], component_b: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution])
55,981
distrax._src.distributions.mixture_of_two
_sample_n
See `Distribution._sample_n`.
def _sample_n(self, key: PRNGKey, n: int) -> Array: """See `Distribution._sample_n`.""" key, key_a, key_b, mask_key = jax.random.split(key, num=4) mask_from_a = jax.random.bernoulli(mask_key, p=self._prob_a, shape=[n]) sample_a = self._component_a.sample(seed=key_a, sample_shape=n) sample_b = self._component_b.sample(seed=key_b, sample_shape=n) mask_from_a = jnp.expand_dims(mask_from_a, tuple(range(1, sample_a.ndim))) return jnp.where(mask_from_a, sample_a, sample_b)
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
55,988
distrax._src.distributions.mixture_of_two
log_prob
See `Distribution.log_prob`.
def log_prob(self, value: EventT) -> Array: """See `Distribution.log_prob`.""" logp1 = jnp.log(self._prob_a) + self._component_a.log_prob(value) logp2 = jnp.log(1 - self._prob_a) + self._component_b.log_prob(value) return jnp.logaddexp(logp1, logp2)
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,000
distrax._src.distributions.mixture_same_family
MixtureSameFamily
Mixture with components provided from a single batched distribution.
class MixtureSameFamily(distribution.Distribution): """Mixture with components provided from a single batched distribution.""" equiv_tfp_cls = tfd.MixtureSameFamily def __init__(self, mixture_distribution: CategoricalLike, components_distribution: DistributionLike): """Initializes a mixture distribution for components of a shared family. Args: mixture_distribution: Distribution over selecting components. components_distribution: Component distribution, with rightmost batch dimension indexing components. """ super().__init__() mixture_distribution = conversion.as_distribution(mixture_distribution) components_distribution = conversion.as_distribution( components_distribution) self._mixture_distribution = mixture_distribution self._components_distribution = components_distribution # Store normalized weights (last axis of logits is for components). # This uses the TFP API, which is replicated in Distrax. self._mixture_log_probs = jax.nn.log_softmax( mixture_distribution.logits_parameter(), axis=-1) batch_shape_mixture = mixture_distribution.batch_shape batch_shape_components = components_distribution.batch_shape if batch_shape_mixture != batch_shape_components[:-1]: msg = (f'`mixture_distribution.batch_shape` ' f'({mixture_distribution.batch_shape}) is not compatible with ' f'`components_distribution.batch_shape` ' f'({components_distribution.batch_shape}`)') raise ValueError(msg) @property def components_distribution(self): """The components distribution.""" return self._components_distribution @property def mixture_distribution(self): """The mixture distribution.""" return self._mixture_distribution @property def event_shape(self) -> Tuple[int, ...]: """Shape of event of distribution samples.""" return self._components_distribution.event_shape @property def batch_shape(self) -> Tuple[int, ...]: """Shape of batch of distribution samples.""" return self._components_distribution.batch_shape[:-1] def _sample_n(self, key: PRNGKey, n: int) -> Array: """See `Distribution._sample_n`.""" key_mix, key_components = jax.random.split(key) mix_sample = self.mixture_distribution.sample(sample_shape=n, seed=key_mix) num_components = self._components_distribution.batch_shape[-1] # Sample from all components, then multiply with a one-hot mask and sum. # While this does computation that is not used eventually, it is faster on # GPU/TPUs, which excel at batched operations (as opposed to indexing). It # is in particular more efficient than using `gather` or `where` operations. mask = jax.nn.one_hot(mix_sample, num_components) samples_all = self.components_distribution.sample(sample_shape=n, seed=key_components) # Make mask broadcast with (potentially multivariate) samples. mask = mask.reshape(mask.shape + (1,) * len(self.event_shape)) # Need to sum over the component axis, which is the last one for scalar # components, the second-last one for 1-dim events, etc. samples = jnp.sum(samples_all * mask, axis=-1 - len(self.event_shape)) return samples def log_prob(self, value: EventT) -> Array: """See `Distribution.log_prob`.""" # Add component axis to make input broadcast with components distribution. expanded = jnp.expand_dims(value, axis=-1 - len(self.event_shape)) # Compute `log_prob` in every component. lp = self.components_distribution.log_prob(expanded) # Last batch axis is number of components, i.e. last axis of `lp` below. # Last axis of mixture log probs are components, so reduce last axis. return jax.scipy.special.logsumexp(a=lp + self._mixture_log_probs, axis=-1) def mean(self) -> Array: """Calculates the mean.""" means = self.components_distribution.mean() weights = jnp.exp(self._mixture_log_probs) # Broadcast weights over event shape, and average over component axis. weights = weights.reshape(weights.shape + (1,) * len(self.event_shape)) return jnp.sum(means * weights, axis=-1 - len(self.event_shape)) def variance(self) -> Array: """Calculates the variance.""" means = self.components_distribution.mean() variances = self.components_distribution.variance() weights = jnp.exp(self._mixture_log_probs) # Make weights broadcast over event shape. weights = weights.reshape(weights.shape + (1,) * len(self.event_shape)) # Component axis to reduce over. component_axis = -1 - len(self.event_shape) # Using: Var(Y) = E[Var(Y|X)] + Var(E[Y|X]). mean = jnp.sum(means * weights, axis=component_axis) mean_cond_var = jnp.sum(weights * variances, axis=component_axis) # Need to add an axis to `mean` to make it broadcast over components. sq_diff = jnp.square(means - jnp.expand_dims(mean, axis=component_axis)) var_cond_mean = jnp.sum(weights * sq_diff, axis=component_axis) return mean_cond_var + var_cond_mean def __getitem__(self, index) -> 'MixtureSameFamily': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return MixtureSameFamily( mixture_distribution=self.mixture_distribution[index], components_distribution=self.components_distribution[index])
(mixture_distribution: Union[distrax._src.distributions.categorical.Categorical, tensorflow_probability.substrates.jax.distributions.categorical.Categorical], components_distribution: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution])
56,001
distrax._src.distributions.mixture_same_family
__getitem__
See `Distribution.__getitem__`.
def __getitem__(self, index) -> 'MixtureSameFamily': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return MixtureSameFamily( mixture_distribution=self.mixture_distribution[index], components_distribution=self.components_distribution[index])
(self, index) -> distrax._src.distributions.mixture_same_family.MixtureSameFamily
56,002
distrax._src.distributions.mixture_same_family
__init__
Initializes a mixture distribution for components of a shared family. Args: mixture_distribution: Distribution over selecting components. components_distribution: Component distribution, with rightmost batch dimension indexing components.
def __init__(self, mixture_distribution: CategoricalLike, components_distribution: DistributionLike): """Initializes a mixture distribution for components of a shared family. Args: mixture_distribution: Distribution over selecting components. components_distribution: Component distribution, with rightmost batch dimension indexing components. """ super().__init__() mixture_distribution = conversion.as_distribution(mixture_distribution) components_distribution = conversion.as_distribution( components_distribution) self._mixture_distribution = mixture_distribution self._components_distribution = components_distribution # Store normalized weights (last axis of logits is for components). # This uses the TFP API, which is replicated in Distrax. self._mixture_log_probs = jax.nn.log_softmax( mixture_distribution.logits_parameter(), axis=-1) batch_shape_mixture = mixture_distribution.batch_shape batch_shape_components = components_distribution.batch_shape if batch_shape_mixture != batch_shape_components[:-1]: msg = (f'`mixture_distribution.batch_shape` ' f'({mixture_distribution.batch_shape}) is not compatible with ' f'`components_distribution.batch_shape` ' f'({components_distribution.batch_shape}`)') raise ValueError(msg)
(self, mixture_distribution: Union[distrax._src.distributions.categorical.Categorical, tensorflow_probability.substrates.jax.distributions.categorical.Categorical], components_distribution: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution])
56,005
distrax._src.distributions.mixture_same_family
_sample_n
See `Distribution._sample_n`.
def _sample_n(self, key: PRNGKey, n: int) -> Array: """See `Distribution._sample_n`.""" key_mix, key_components = jax.random.split(key) mix_sample = self.mixture_distribution.sample(sample_shape=n, seed=key_mix) num_components = self._components_distribution.batch_shape[-1] # Sample from all components, then multiply with a one-hot mask and sum. # While this does computation that is not used eventually, it is faster on # GPU/TPUs, which excel at batched operations (as opposed to indexing). It # is in particular more efficient than using `gather` or `where` operations. mask = jax.nn.one_hot(mix_sample, num_components) samples_all = self.components_distribution.sample(sample_shape=n, seed=key_components) # Make mask broadcast with (potentially multivariate) samples. mask = mask.reshape(mask.shape + (1,) * len(self.event_shape)) # Need to sum over the component axis, which is the last one for scalar # components, the second-last one for 1-dim events, etc. samples = jnp.sum(samples_all * mask, axis=-1 - len(self.event_shape)) return samples
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,012
distrax._src.distributions.mixture_same_family
log_prob
See `Distribution.log_prob`.
def log_prob(self, value: EventT) -> Array: """See `Distribution.log_prob`.""" # Add component axis to make input broadcast with components distribution. expanded = jnp.expand_dims(value, axis=-1 - len(self.event_shape)) # Compute `log_prob` in every component. lp = self.components_distribution.log_prob(expanded) # Last batch axis is number of components, i.e. last axis of `lp` below. # Last axis of mixture log probs are components, so reduce last axis. return jax.scipy.special.logsumexp(a=lp + self._mixture_log_probs, axis=-1)
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,014
distrax._src.distributions.mixture_same_family
mean
Calculates the mean.
def mean(self) -> Array: """Calculates the mean.""" means = self.components_distribution.mean() weights = jnp.exp(self._mixture_log_probs) # Broadcast weights over event shape, and average over component axis. weights = weights.reshape(weights.shape + (1,) * len(self.event_shape)) return jnp.sum(means * weights, axis=-1 - len(self.event_shape))
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,023
distrax._src.distributions.mixture_same_family
variance
Calculates the variance.
def variance(self) -> Array: """Calculates the variance.""" means = self.components_distribution.mean() variances = self.components_distribution.variance() weights = jnp.exp(self._mixture_log_probs) # Make weights broadcast over event shape. weights = weights.reshape(weights.shape + (1,) * len(self.event_shape)) # Component axis to reduce over. component_axis = -1 - len(self.event_shape) # Using: Var(Y) = E[Var(Y|X)] + Var(E[Y|X]). mean = jnp.sum(means * weights, axis=component_axis) mean_cond_var = jnp.sum(weights * variances, axis=component_axis) # Need to add an axis to `mean` to make it broadcast over components. sq_diff = jnp.square(means - jnp.expand_dims(mean, axis=component_axis)) var_cond_mean = jnp.sum(weights * sq_diff, axis=component_axis) return mean_cond_var + var_cond_mean
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,024
distrax._src.distributions.multinomial
Multinomial
Multinomial distribution with parameter `probs`.
class Multinomial(distribution.Distribution): """Multinomial distribution with parameter `probs`.""" equiv_tfp_cls = tfd.Multinomial def __init__(self, total_count: Numeric, logits: Optional[Array] = None, probs: Optional[Array] = None, dtype: Union[jnp.dtype, type[Any]] = int): """Initializes a Multinomial distribution. Args: total_count: The number of trials per sample. logits: Logit transform of the probability of each category. Only one of `logits` or `probs` can be specified. probs: Probability of each category. Only one of `logits` or `probs` can be specified. dtype: The type of event samples. """ super().__init__() logits = None if logits is None else conversion.as_float_array(logits) probs = None if probs is None else conversion.as_float_array(probs) if (logits is None) == (probs is None): raise ValueError( f'One and exactly one of `logits` and `probs` should be `None`, ' f'but `logits` is {logits} and `probs` is {probs}.') if logits is not None and (not logits.shape or logits.shape[-1] < 2): raise ValueError( f'The last dimension of `logits` must be greater than 1, but ' f'`logits.shape = {logits.shape}`.') if probs is not None and (not probs.shape or probs.shape[-1] < 2): raise ValueError( f'The last dimension of `probs` must be greater than 1, but ' f'`probs.shape = {probs.shape}`.') if not (jnp.issubdtype(dtype, jnp.integer) or jnp.issubdtype(dtype, jnp.floating)): raise ValueError( f'The dtype of `{self.name}` must be integer or floating-point, ' f'instead got `{dtype}`.') self._total_count = jnp.asarray(total_count, dtype=dtype) self._probs = None if probs is None else math.normalize(probs=probs) self._logits = None if logits is None else math.normalize(logits=logits) self._dtype = dtype if self._probs is not None: probs_batch_shape = self._probs.shape[:-1] else: assert self._logits is not None probs_batch_shape = self._logits.shape[:-1] self._batch_shape = lax.broadcast_shapes( probs_batch_shape, self._total_count.shape) @property def event_shape(self) -> Tuple[int, ...]: """Shape of event of distribution samples.""" if self._logits is not None: return self._logits.shape[-1:] else: return self._probs.shape[-1:] @property def batch_shape(self) -> Tuple[int, ...]: """Shape of batch of distribution samples.""" return self._batch_shape @property def total_count(self) -> Array: """The number of trials per sample.""" return jnp.broadcast_to(self._total_count, self.batch_shape) @property def num_trials(self) -> Array: """The number of trials for each event.""" return self.total_count @property def logits(self) -> Array: """The logits for each event.""" if self._logits is not None: return jnp.broadcast_to(self._logits, self.batch_shape + self.event_shape) return jnp.broadcast_to(jnp.log(self._probs), self.batch_shape + self.event_shape) @property def probs(self) -> Array: """The probabilities for each event.""" if self._probs is not None: return jnp.broadcast_to(self._probs, self.batch_shape + self.event_shape) return jnp.broadcast_to(jax.nn.softmax(self._logits, axis=-1), self.batch_shape + self.event_shape) @property def log_of_probs(self) -> Array: """The log probabilities for each event.""" if self._logits is not None: # jax.nn.log_softmax was already applied in init to logits. return jnp.broadcast_to(self._logits, self.batch_shape + self.event_shape) return jnp.broadcast_to(jnp.log(self._probs), self.batch_shape + self.event_shape) def log_prob(self, value: EventT) -> Array: """See `Distribution.log_prob`.""" total_permutations = lax.lgamma(self._total_count + 1.) counts_factorial = lax.lgamma(value + 1.) redundant_permutations = jnp.sum(counts_factorial, axis=-1) log_combinations = total_permutations - redundant_permutations return log_combinations + jnp.sum( math.multiply_no_nan(self.log_of_probs, value), axis=-1) def _sample_n(self, key: PRNGKey, n: int) -> Array: """See `Distribution._sample_n`.""" num_keys = functools.reduce(operator.mul, self.batch_shape, 1) keys = jax.random.split(key, num=num_keys) total_count = jnp.reshape(self.total_count, (-1,)) logits = jnp.reshape(self.logits, (-1,) + self.event_shape) sample_fn = jax.vmap( self._sample_n_scalar, in_axes=(0, 0, None, 0, None), out_axes=1) samples = sample_fn(keys, total_count, n, logits, self._dtype) # [n, B, K] return samples.reshape((n,) + self.batch_shape + self.event_shape) @staticmethod def _sample_n_scalar( key: PRNGKey, total_count: Union[int, Array], n: int, logits: Array, dtype: jnp.dtype) -> Array: """Sample method for a Multinomial with integer `total_count`.""" def cond_func(args): i, _, _ = args return jnp.less(i, total_count) def body_func(args): i, key_i, sample_aggregator = args key_i, current_key = jax.random.split(key_i) sample_i = jax.random.categorical(current_key, logits=logits, shape=(n,)) one_hot_i = jax.nn.one_hot(sample_i, logits.shape[0]).astype(dtype) return i + 1, key_i, sample_aggregator + one_hot_i init_aggregator = jnp.zeros((n, logits.shape[0]), dtype=dtype) return lax.while_loop(cond_func, body_func, (0, key, init_aggregator))[2] def entropy(self) -> Array: """Calculates the Shannon entropy (in nats).""" # The method `_entropy_scalar` does not work when `self.total_count` is an # array (instead of a scalar) or when we jit the function, so we default to # computing the entropy using an alternative method that uses a lax while # loop and does not create intermediate arrays whose shape depends on # `self.total_count`. entropy_fn = jnp.vectorize( self._entropy_scalar_with_lax, signature='(),(k),(k)->()') return entropy_fn(self.total_count, self.probs, self.log_of_probs) @staticmethod def _entropy_scalar( total_count: int, probs: Array, log_of_probs: Array ) -> Union[jnp.float32, jnp.float64]: """Calculates the entropy for a Multinomial with integer `total_count`.""" # Constant factors in the entropy. xi = jnp.arange(total_count + 1, dtype=probs.dtype) log_xi_factorial = lax.lgamma(xi + 1) log_n_minus_xi_factorial = jnp.flip(log_xi_factorial, axis=-1) log_n_factorial = log_xi_factorial[..., -1] log_comb_n_xi = ( log_n_factorial[..., None] - log_xi_factorial - log_n_minus_xi_factorial) comb_n_xi = jnp.round(jnp.exp(log_comb_n_xi)) chex.assert_shape(comb_n_xi, (total_count + 1,)) likelihood1 = math.power_no_nan(probs[..., None], xi) likelihood2 = math.power_no_nan(1. - probs[..., None], total_count - xi) chex.assert_shape(likelihood1, (probs.shape[-1], total_count + 1,)) chex.assert_shape(likelihood2, (probs.shape[-1], total_count + 1,)) likelihood = jnp.sum(likelihood1 * likelihood2, axis=-2) chex.assert_shape(likelihood, (total_count + 1,)) comb_term = jnp.sum(comb_n_xi * log_xi_factorial * likelihood, axis=-1) chex.assert_shape(comb_term, ()) # Probs factors in the entropy. n_probs_factor = jnp.sum( total_count * math.multiply_no_nan(log_of_probs, probs), axis=-1) return - log_n_factorial - n_probs_factor + comb_term @staticmethod def _entropy_scalar_with_lax( total_count: int, probs: Array, log_of_probs: Array ) -> Union[jnp.float32, jnp.float64]: """Like `_entropy_scalar`, but uses a lax while loop.""" dtype = probs.dtype log_n_factorial = lax.lgamma(jnp.asarray(total_count + 1, dtype=dtype)) def cond_func(args): xi, _ = args return jnp.less_equal(xi, total_count) def body_func(args): xi, accumulated_sum = args xi_float = jnp.asarray(xi, dtype=dtype) log_xi_factorial = lax.lgamma(xi_float + 1.) log_comb_n_xi = (log_n_factorial - log_xi_factorial - lax.lgamma(total_count - xi_float + 1.)) comb_n_xi = jnp.round(jnp.exp(log_comb_n_xi)) likelihood1 = math.power_no_nan(probs, xi) likelihood2 = math.power_no_nan(1. - probs, total_count - xi) likelihood = likelihood1 * likelihood2 comb_term = comb_n_xi * log_xi_factorial * likelihood # [K] chex.assert_shape(comb_term, (probs.shape[-1],)) return xi + 1, accumulated_sum + comb_term comb_term = jnp.sum( lax.while_loop(cond_func, body_func, (0, jnp.zeros_like(probs)))[1], axis=-1) n_probs_factor = jnp.sum( total_count * math.multiply_no_nan(log_of_probs, probs), axis=-1) return - log_n_factorial - n_probs_factor + comb_term def mean(self) -> Array: """Calculates the mean.""" return self._total_count[..., None] * self.probs def variance(self) -> Array: """Calculates the variance.""" probs = self.probs return self._total_count[..., None] * probs * (1. - probs) def covariance(self) -> Array: """Calculates the covariance.""" probs = self.probs cov_matrix = -self._total_count[..., None, None] * ( probs[..., None, :] * probs[..., :, None]) chex.assert_shape(cov_matrix, probs.shape + self.event_shape) # Missing diagonal term in the covariance matrix. cov_matrix += jnp.vectorize( jnp.diag, signature='(k)->(k,k)')( self._total_count[..., None] * probs) return cov_matrix def __getitem__(self, index) -> 'Multinomial': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) total_count = self.total_count[index] if self._logits is not None: return Multinomial( total_count=total_count, logits=self.logits[index], dtype=self._dtype) return Multinomial( total_count=total_count, probs=self.probs[index], dtype=self._dtype)
(total_count: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], logits: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, probs: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, dtype: Union[numpy.dtype, type[Any]] = <class 'int'>)
56,025
distrax._src.distributions.multinomial
__getitem__
See `Distribution.__getitem__`.
def __getitem__(self, index) -> 'Multinomial': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) total_count = self.total_count[index] if self._logits is not None: return Multinomial( total_count=total_count, logits=self.logits[index], dtype=self._dtype) return Multinomial( total_count=total_count, probs=self.probs[index], dtype=self._dtype)
(self, index) -> distrax._src.distributions.multinomial.Multinomial
56,026
distrax._src.distributions.multinomial
__init__
Initializes a Multinomial distribution. Args: total_count: The number of trials per sample. logits: Logit transform of the probability of each category. Only one of `logits` or `probs` can be specified. probs: Probability of each category. Only one of `logits` or `probs` can be specified. dtype: The type of event samples.
def __init__(self, total_count: Numeric, logits: Optional[Array] = None, probs: Optional[Array] = None, dtype: Union[jnp.dtype, type[Any]] = int): """Initializes a Multinomial distribution. Args: total_count: The number of trials per sample. logits: Logit transform of the probability of each category. Only one of `logits` or `probs` can be specified. probs: Probability of each category. Only one of `logits` or `probs` can be specified. dtype: The type of event samples. """ super().__init__() logits = None if logits is None else conversion.as_float_array(logits) probs = None if probs is None else conversion.as_float_array(probs) if (logits is None) == (probs is None): raise ValueError( f'One and exactly one of `logits` and `probs` should be `None`, ' f'but `logits` is {logits} and `probs` is {probs}.') if logits is not None and (not logits.shape or logits.shape[-1] < 2): raise ValueError( f'The last dimension of `logits` must be greater than 1, but ' f'`logits.shape = {logits.shape}`.') if probs is not None and (not probs.shape or probs.shape[-1] < 2): raise ValueError( f'The last dimension of `probs` must be greater than 1, but ' f'`probs.shape = {probs.shape}`.') if not (jnp.issubdtype(dtype, jnp.integer) or jnp.issubdtype(dtype, jnp.floating)): raise ValueError( f'The dtype of `{self.name}` must be integer or floating-point, ' f'instead got `{dtype}`.') self._total_count = jnp.asarray(total_count, dtype=dtype) self._probs = None if probs is None else math.normalize(probs=probs) self._logits = None if logits is None else math.normalize(logits=logits) self._dtype = dtype if self._probs is not None: probs_batch_shape = self._probs.shape[:-1] else: assert self._logits is not None probs_batch_shape = self._logits.shape[:-1] self._batch_shape = lax.broadcast_shapes( probs_batch_shape, self._total_count.shape)
(self, total_count: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], logits: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, probs: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, dtype: Union[numpy.dtype, type[Any]] = <class 'int'>)
56,028
distrax._src.distributions.multinomial
_entropy_scalar
Calculates the entropy for a Multinomial with integer `total_count`.
@staticmethod def _entropy_scalar( total_count: int, probs: Array, log_of_probs: Array ) -> Union[jnp.float32, jnp.float64]: """Calculates the entropy for a Multinomial with integer `total_count`.""" # Constant factors in the entropy. xi = jnp.arange(total_count + 1, dtype=probs.dtype) log_xi_factorial = lax.lgamma(xi + 1) log_n_minus_xi_factorial = jnp.flip(log_xi_factorial, axis=-1) log_n_factorial = log_xi_factorial[..., -1] log_comb_n_xi = ( log_n_factorial[..., None] - log_xi_factorial - log_n_minus_xi_factorial) comb_n_xi = jnp.round(jnp.exp(log_comb_n_xi)) chex.assert_shape(comb_n_xi, (total_count + 1,)) likelihood1 = math.power_no_nan(probs[..., None], xi) likelihood2 = math.power_no_nan(1. - probs[..., None], total_count - xi) chex.assert_shape(likelihood1, (probs.shape[-1], total_count + 1,)) chex.assert_shape(likelihood2, (probs.shape[-1], total_count + 1,)) likelihood = jnp.sum(likelihood1 * likelihood2, axis=-2) chex.assert_shape(likelihood, (total_count + 1,)) comb_term = jnp.sum(comb_n_xi * log_xi_factorial * likelihood, axis=-1) chex.assert_shape(comb_term, ()) # Probs factors in the entropy. n_probs_factor = jnp.sum( total_count * math.multiply_no_nan(log_of_probs, probs), axis=-1) return - log_n_factorial - n_probs_factor + comb_term
(total_count: int, probs: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], log_of_probs: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.numpy.float32, jax.numpy.float64]
56,029
distrax._src.distributions.multinomial
_entropy_scalar_with_lax
Like `_entropy_scalar`, but uses a lax while loop.
@staticmethod def _entropy_scalar_with_lax( total_count: int, probs: Array, log_of_probs: Array ) -> Union[jnp.float32, jnp.float64]: """Like `_entropy_scalar`, but uses a lax while loop.""" dtype = probs.dtype log_n_factorial = lax.lgamma(jnp.asarray(total_count + 1, dtype=dtype)) def cond_func(args): xi, _ = args return jnp.less_equal(xi, total_count) def body_func(args): xi, accumulated_sum = args xi_float = jnp.asarray(xi, dtype=dtype) log_xi_factorial = lax.lgamma(xi_float + 1.) log_comb_n_xi = (log_n_factorial - log_xi_factorial - lax.lgamma(total_count - xi_float + 1.)) comb_n_xi = jnp.round(jnp.exp(log_comb_n_xi)) likelihood1 = math.power_no_nan(probs, xi) likelihood2 = math.power_no_nan(1. - probs, total_count - xi) likelihood = likelihood1 * likelihood2 comb_term = comb_n_xi * log_xi_factorial * likelihood # [K] chex.assert_shape(comb_term, (probs.shape[-1],)) return xi + 1, accumulated_sum + comb_term comb_term = jnp.sum( lax.while_loop(cond_func, body_func, (0, jnp.zeros_like(probs)))[1], axis=-1) n_probs_factor = jnp.sum( total_count * math.multiply_no_nan(log_of_probs, probs), axis=-1) return - log_n_factorial - n_probs_factor + comb_term
(total_count: int, probs: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], log_of_probs: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.numpy.float32, jax.numpy.float64]
56,031
distrax._src.distributions.multinomial
_sample_n
See `Distribution._sample_n`.
def _sample_n(self, key: PRNGKey, n: int) -> Array: """See `Distribution._sample_n`.""" num_keys = functools.reduce(operator.mul, self.batch_shape, 1) keys = jax.random.split(key, num=num_keys) total_count = jnp.reshape(self.total_count, (-1,)) logits = jnp.reshape(self.logits, (-1,) + self.event_shape) sample_fn = jax.vmap( self._sample_n_scalar, in_axes=(0, 0, None, 0, None), out_axes=1) samples = sample_fn(keys, total_count, n, logits, self._dtype) # [n, B, K] return samples.reshape((n,) + self.batch_shape + self.event_shape)
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,033
distrax._src.distributions.multinomial
_sample_n_scalar
Sample method for a Multinomial with integer `total_count`.
@staticmethod def _sample_n_scalar( key: PRNGKey, total_count: Union[int, Array], n: int, logits: Array, dtype: jnp.dtype) -> Array: """Sample method for a Multinomial with integer `total_count`.""" def cond_func(args): i, _, _ = args return jnp.less(i, total_count) def body_func(args): i, key_i, sample_aggregator = args key_i, current_key = jax.random.split(key_i) sample_i = jax.random.categorical(current_key, logits=logits, shape=(n,)) one_hot_i = jax.nn.one_hot(sample_i, logits.shape[0]).astype(dtype) return i + 1, key_i, sample_aggregator + one_hot_i init_aggregator = jnp.zeros((n, logits.shape[0]), dtype=dtype) return lax.while_loop(cond_func, body_func, (0, key, init_aggregator))[2]
(key: jax.Array, total_count: Union[int, jax.Array, numpy.ndarray, numpy.bool_, numpy.number], n: int, logits: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], dtype: numpy.dtype) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,035
distrax._src.distributions.multinomial
covariance
Calculates the covariance.
def covariance(self) -> Array: """Calculates the covariance.""" probs = self.probs cov_matrix = -self._total_count[..., None, None] * ( probs[..., None, :] * probs[..., :, None]) chex.assert_shape(cov_matrix, probs.shape + self.event_shape) # Missing diagonal term in the covariance matrix. cov_matrix += jnp.vectorize( jnp.diag, signature='(k)->(k,k)')( self._total_count[..., None] * probs) return cov_matrix
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,037
distrax._src.distributions.multinomial
entropy
Calculates the Shannon entropy (in nats).
def entropy(self) -> Array: """Calculates the Shannon entropy (in nats).""" # The method `_entropy_scalar` does not work when `self.total_count` is an # array (instead of a scalar) or when we jit the function, so we default to # computing the entropy using an alternative method that uses a lax while # loop and does not create intermediate arrays whose shape depends on # `self.total_count`. entropy_fn = jnp.vectorize( self._entropy_scalar_with_lax, signature='(),(k),(k)->()') return entropy_fn(self.total_count, self.probs, self.log_of_probs)
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,040
distrax._src.distributions.multinomial
log_prob
See `Distribution.log_prob`.
def log_prob(self, value: EventT) -> Array: """See `Distribution.log_prob`.""" total_permutations = lax.lgamma(self._total_count + 1.) counts_factorial = lax.lgamma(value + 1.) redundant_permutations = jnp.sum(counts_factorial, axis=-1) log_combinations = total_permutations - redundant_permutations return log_combinations + jnp.sum( math.multiply_no_nan(self.log_of_probs, value), axis=-1)
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,042
distrax._src.distributions.multinomial
mean
Calculates the mean.
def mean(self) -> Array: """Calculates the mean.""" return self._total_count[..., None] * self.probs
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,051
distrax._src.distributions.multinomial
variance
Calculates the variance.
def variance(self) -> Array: """Calculates the variance.""" probs = self.probs return self._total_count[..., None] * probs * (1. - probs)
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,052
distrax._src.distributions.mvn_diag
MultivariateNormalDiag
Multivariate normal distribution on `R^k` with diagonal covariance.
class MultivariateNormalDiag(MultivariateNormalFromBijector): """Multivariate normal distribution on `R^k` with diagonal covariance.""" equiv_tfp_cls = tfd.MultivariateNormalDiag def __init__(self, loc: Optional[Array] = None, scale_diag: Optional[Array] = None): """Initializes a MultivariateNormalDiag distribution. Args: loc: Mean vector of the distribution. Can also be a batch of vectors. If not specified, it defaults to zeros. At least one of `loc` and `scale_diag` must be specified. scale_diag: Vector of standard deviations. Can also be a batch of vectors. If not specified, it defaults to ones. At least one of `loc` and `scale_diag` must be specified. """ _check_parameters(loc, scale_diag) if scale_diag is None: loc = conversion.as_float_array(loc) scale_diag = jnp.ones(loc.shape[-1], loc.dtype) elif loc is None: scale_diag = conversion.as_float_array(scale_diag) loc = jnp.zeros(scale_diag.shape[-1], scale_diag.dtype) else: loc = conversion.as_float_array(loc) scale_diag = conversion.as_float_array(scale_diag) # Add leading dimensions to the paramteters to match the batch shape. This # prevents automatic rank promotion. broadcasted_shapes = jnp.broadcast_shapes(loc.shape, scale_diag.shape) loc = jnp.expand_dims( loc, axis=list(range(len(broadcasted_shapes) - loc.ndim))) scale_diag = jnp.expand_dims( scale_diag, axis=list(range(len(broadcasted_shapes) - scale_diag.ndim))) bias = jnp.zeros_like(loc, shape=loc.shape[-1:]) bias = jnp.expand_dims( bias, axis=list(range(len(broadcasted_shapes) - bias.ndim))) scale = DiagLinear(scale_diag) super().__init__(loc=loc, scale=scale) self._scale_diag = scale_diag @property def scale_diag(self) -> Array: """Scale of the distribution.""" return jnp.broadcast_to( self._scale_diag, self.batch_shape + self.event_shape) def _standardize(self, value: Array) -> Array: return (value - self._loc) / self._scale_diag def cdf(self, value: EventT) -> Array: """See `Distribution.cdf`.""" return jnp.prod(jax.scipy.special.ndtr(self._standardize(value)), axis=-1) def log_cdf(self, value: EventT) -> Array: """See `Distribution.log_cdf`.""" return jnp.sum( jax.scipy.special.log_ndtr(self._standardize(value)), axis=-1) def __getitem__(self, index) -> 'MultivariateNormalDiag': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return MultivariateNormalDiag( loc=self.loc[index], scale_diag=self.scale_diag[index])
(loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, scale_diag: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None)
56,053
distrax._src.distributions.mvn_diag
__getitem__
See `Distribution.__getitem__`.
def __getitem__(self, index) -> 'MultivariateNormalDiag': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return MultivariateNormalDiag( loc=self.loc[index], scale_diag=self.scale_diag[index])
(self, index) -> distrax._src.distributions.mvn_diag.MultivariateNormalDiag
56,054
distrax._src.distributions.mvn_diag
__init__
Initializes a MultivariateNormalDiag distribution. Args: loc: Mean vector of the distribution. Can also be a batch of vectors. If not specified, it defaults to zeros. At least one of `loc` and `scale_diag` must be specified. scale_diag: Vector of standard deviations. Can also be a batch of vectors. If not specified, it defaults to ones. At least one of `loc` and `scale_diag` must be specified.
def __init__(self, loc: Optional[Array] = None, scale_diag: Optional[Array] = None): """Initializes a MultivariateNormalDiag distribution. Args: loc: Mean vector of the distribution. Can also be a batch of vectors. If not specified, it defaults to zeros. At least one of `loc` and `scale_diag` must be specified. scale_diag: Vector of standard deviations. Can also be a batch of vectors. If not specified, it defaults to ones. At least one of `loc` and `scale_diag` must be specified. """ _check_parameters(loc, scale_diag) if scale_diag is None: loc = conversion.as_float_array(loc) scale_diag = jnp.ones(loc.shape[-1], loc.dtype) elif loc is None: scale_diag = conversion.as_float_array(scale_diag) loc = jnp.zeros(scale_diag.shape[-1], scale_diag.dtype) else: loc = conversion.as_float_array(loc) scale_diag = conversion.as_float_array(scale_diag) # Add leading dimensions to the paramteters to match the batch shape. This # prevents automatic rank promotion. broadcasted_shapes = jnp.broadcast_shapes(loc.shape, scale_diag.shape) loc = jnp.expand_dims( loc, axis=list(range(len(broadcasted_shapes) - loc.ndim))) scale_diag = jnp.expand_dims( scale_diag, axis=list(range(len(broadcasted_shapes) - scale_diag.ndim))) bias = jnp.zeros_like(loc, shape=loc.shape[-1:]) bias = jnp.expand_dims( bias, axis=list(range(len(broadcasted_shapes) - bias.ndim))) scale = DiagLinear(scale_diag) super().__init__(loc=loc, scale=scale) self._scale_diag = scale_diag
(self, loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, scale_diag: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None)
56,056
distrax._src.distributions.transformed
_infer_shapes_and_dtype
Infer the batch shape, event shape, and dtype by tracing `forward`.
def _infer_shapes_and_dtype(self): """Infer the batch shape, event shape, and dtype by tracing `forward`.""" dummy_shape = self.distribution.batch_shape + self.distribution.event_shape dummy = jnp.zeros(dummy_shape, dtype=self.distribution.dtype) shape_dtype = jax.eval_shape(self.bijector.forward, dummy) self._dtype = shape_dtype.dtype if self.bijector.event_ndims_out == 0: self._event_shape = () self._batch_shape = shape_dtype.shape else: # pylint: disable-next=invalid-unary-operand-type self._event_shape = shape_dtype.shape[-self.bijector.event_ndims_out:] # pylint: disable-next=invalid-unary-operand-type self._batch_shape = shape_dtype.shape[:-self.bijector.event_ndims_out]
(self)
56,058
distrax._src.distributions.transformed
_sample_n
Returns `n` samples.
def _sample_n(self, key: PRNGKey, n: int) -> Array: """Returns `n` samples.""" x = self.distribution.sample(seed=key, sample_shape=n) y = jax.vmap(self.bijector.forward)(x) return y
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,059
distrax._src.distributions.transformed
_sample_n_and_log_prob
Returns `n` samples and their log probs. This function is more efficient than calling `sample` and `log_prob` separately, because it uses only the forward methods of the bijector. It also works for bijectors that don't implement inverse methods. Args: key: PRNG key. n: Number of samples to generate. Returns: A tuple of `n` samples and their log probs.
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]: """Returns `n` samples and their log probs. This function is more efficient than calling `sample` and `log_prob` separately, because it uses only the forward methods of the bijector. It also works for bijectors that don't implement inverse methods. Args: key: PRNG key. n: Number of samples to generate. Returns: A tuple of `n` samples and their log probs. """ x, lp_x = self.distribution.sample_and_log_prob(seed=key, sample_shape=n) y, fldj = jax.vmap(self.bijector.forward_and_log_det)(x) lp_y = jax.vmap(jnp.subtract)(lp_x, fldj) return y, lp_y
(self, key: jax.Array, n: int) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
56,060
distrax._src.distributions.mvn_diag
_standardize
null
def _standardize(self, value: Array) -> Array: return (value - self._loc) / self._scale_diag
(self, value: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,061
distrax._src.distributions.mvn_diag
cdf
See `Distribution.cdf`.
def cdf(self, value: EventT) -> Array: """See `Distribution.cdf`.""" return jnp.prod(jax.scipy.special.ndtr(self._standardize(value)), axis=-1)
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,062
distrax._src.distributions.mvn_from_bijector
covariance
Calculates the covariance matrix. Unlike TFP, which would drop leading dimensions, in Distrax the covariance matrix always has shape `batch_shape + (num_dims, num_dims)`. This helps to keep things simple and predictable. Returns: The covariance matrix, of shape `k x k` (broadcasted to match the batch shape of the distribution).
def covariance(self) -> Array: """Calculates the covariance matrix. Unlike TFP, which would drop leading dimensions, in Distrax the covariance matrix always has shape `batch_shape + (num_dims, num_dims)`. This helps to keep things simple and predictable. Returns: The covariance matrix, of shape `k x k` (broadcasted to match the batch shape of the distribution). """ if isinstance(self.scale, diag_linear.DiagLinear): result = jnp.vectorize(jnp.diag, signature='(k)->(k,k)')(self.variance()) else: result = jax.vmap(self.scale.forward, in_axes=-2, out_axes=-2)( self._scale.matrix) return jnp.broadcast_to( result, self.batch_shape + self.event_shape + self.event_shape)
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,064
distrax._src.distributions.transformed
entropy
Calculates the Shannon entropy (in Nats). Only works for bijectors with constant Jacobian determinant. Args: input_hint: an example sample from the base distribution, used to compute the constant forward log-determinant. If not specified, it is computed using a zero array of the shape and dtype of a sample from the base distribution. Returns: the entropy of the distribution. Raises: NotImplementedError: if bijector's Jacobian determinant is not known to be constant.
def entropy( # pylint: disable=arguments-differ self, input_hint: Optional[Array] = None) -> Array: """Calculates the Shannon entropy (in Nats). Only works for bijectors with constant Jacobian determinant. Args: input_hint: an example sample from the base distribution, used to compute the constant forward log-determinant. If not specified, it is computed using a zero array of the shape and dtype of a sample from the base distribution. Returns: the entropy of the distribution. Raises: NotImplementedError: if bijector's Jacobian determinant is not known to be constant. """ if self.bijector.is_constant_log_det: if input_hint is None: shape = self.distribution.batch_shape + self.distribution.event_shape input_hint = jnp.zeros(shape, dtype=self.distribution.dtype) entropy = self.distribution.entropy() fldj = self.bijector.forward_log_det_jacobian(input_hint) return entropy + fldj else: raise NotImplementedError( "`entropy` is not implemented for this transformed distribution, " "because its bijector's Jacobian determinant is not known to be " "constant.")
(self, input_hint: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,066
distrax._src.distributions.mvn_diag
log_cdf
See `Distribution.log_cdf`.
def log_cdf(self, value: EventT) -> Array: """See `Distribution.log_cdf`.""" return jnp.sum( jax.scipy.special.log_ndtr(self._standardize(value)), axis=-1)
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,067
distrax._src.distributions.transformed
log_prob
See `Distribution.log_prob`.
def log_prob(self, value: EventT) -> Array: """See `Distribution.log_prob`.""" x, ildj_y = self.bijector.inverse_and_log_det(value) lp_x = self.distribution.log_prob(x) lp_y = lp_x + ildj_y return lp_y
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,070
distrax._src.distributions.mvn_from_bijector
median
Calculates the median.
def median(self) -> Array: """Calculates the median.""" return self.loc
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,075
distrax._src.distributions.mvn_from_bijector
stddev
Calculates the standard deviation (the square root of the variance).
def stddev(self) -> Array: """Calculates the standard deviation (the square root of the variance).""" if isinstance(self.scale, diag_linear.DiagLinear): result = jnp.abs(self.scale.diag) else: result = jnp.sqrt(self.variance()) return jnp.broadcast_to(result, self.batch_shape + self.event_shape)
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,078
distrax._src.distributions.mvn_from_bijector
variance
Calculates the variance of all one-dimensional marginals.
def variance(self) -> Array: """Calculates the variance of all one-dimensional marginals.""" if isinstance(self.scale, diag_linear.DiagLinear): result = jnp.square(self.scale.diag) else: scale_matrix = self._scale.matrix result = jnp.sum(scale_matrix * scale_matrix, axis=-1) return jnp.broadcast_to(result, self.batch_shape + self.event_shape)
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,079
distrax._src.distributions.mvn_diag_plus_low_rank
MultivariateNormalDiagPlusLowRank
Multivariate normal distribution on `R^k`. The `MultivariateNormalDiagPlusLowRank` distribution is parameterized by a location (mean) vector `b` and a scale matrix `S` that has the following structure: `S = diag(D) + U @ V.T`, where `D` is a `k`-length vector, and both `U` and `V` are `k x r` matrices (with `r < k` typically). The covariance matrix of the multivariate normal distribution is `C = S @ S.T`. This class makes no attempt to verify that the scale matrix `S` is invertible, which happens if and only if both `diag(D)` and `I + V^T diag(D)^{-1} U` are invertible. It is the responsibility of the user to make sure that this is the case.
class MultivariateNormalDiagPlusLowRank(MultivariateNormalFromBijector): """Multivariate normal distribution on `R^k`. The `MultivariateNormalDiagPlusLowRank` distribution is parameterized by a location (mean) vector `b` and a scale matrix `S` that has the following structure: `S = diag(D) + U @ V.T`, where `D` is a `k`-length vector, and both `U` and `V` are `k x r` matrices (with `r < k` typically). The covariance matrix of the multivariate normal distribution is `C = S @ S.T`. This class makes no attempt to verify that the scale matrix `S` is invertible, which happens if and only if both `diag(D)` and `I + V^T diag(D)^{-1} U` are invertible. It is the responsibility of the user to make sure that this is the case. """ equiv_tfp_cls = tfd.MultivariateNormalDiagPlusLowRank def __init__(self, loc: Optional[Array] = None, scale_diag: Optional[Array] = None, scale_u_matrix: Optional[Array] = None, scale_v_matrix: Optional[Array] = None): """Initializes a MultivariateNormalDiagPlusLowRank distribution. Args: loc: Mean vector of the distribution of shape `k` (can also be a batch of such vectors). If not specified, it defaults to zeros. scale_diag: The diagonal matrix added to the scale `S`, specified by a `k`-length vector containing its diagonal entries (or a batch of vectors). If not specified, the diagonal matrix defaults to the identity. scale_u_matrix: The low-rank matrix `U` that specifies the scale, as described in the class docstring. It is a `k x r` matrix (or a batch of such matrices). If not specified, it defaults to zeros. At least one of `loc`, `scale_diag`, and `scale_u_matrix` must be specified. scale_v_matrix: The low-rank matrix `V` that specifies the scale, as described in the class docstring. It is a `k x r` matrix (or a batch of such matrices). If not specified, it defaults to `scale_u_matrix`. It can only be specified if `scale_u_matrix` is also specified. """ loc = None if loc is None else conversion.as_float_array(loc) scale_diag = None if scale_diag is None else conversion.as_float_array( scale_diag) scale_u_matrix = ( None if scale_u_matrix is None else conversion.as_float_array( scale_u_matrix)) scale_v_matrix = ( None if scale_v_matrix is None else conversion.as_float_array( scale_v_matrix)) _check_parameters(loc, scale_diag, scale_u_matrix, scale_v_matrix) if loc is not None: num_dims = loc.shape[-1] elif scale_diag is not None: num_dims = scale_diag.shape[-1] elif scale_u_matrix is not None: num_dims = scale_u_matrix.shape[-2] dtype = jnp.result_type( *[x for x in [loc, scale_diag, scale_u_matrix, scale_v_matrix] if x is not None]) if loc is None: loc = jnp.zeros((num_dims,), dtype=dtype) self._scale_diag = scale_diag if scale_diag is None: self._scale_diag = jnp.ones((num_dims,), dtype=dtype) self._scale_u_matrix = scale_u_matrix if scale_u_matrix is None: self._scale_u_matrix = jnp.zeros((num_dims, 1), dtype=dtype) self._scale_v_matrix = scale_v_matrix if scale_v_matrix is None: self._scale_v_matrix = self._scale_u_matrix if scale_u_matrix is None: # The scale matrix is diagonal. scale = DiagLinear(self._scale_diag) else: scale = DiagPlusLowRankLinear( u_matrix=self._scale_u_matrix, v_matrix=self._scale_v_matrix, diag=self._scale_diag) super().__init__(loc=loc, scale=scale) @property def scale_diag(self) -> Array: """Diagonal matrix that is added to the scale.""" return jnp.broadcast_to( self._scale_diag, self.batch_shape + self.event_shape) @property def scale_u_matrix(self) -> Array: """Matrix `U` that defines the low-rank part of the scale matrix.""" return jnp.broadcast_to( self._scale_u_matrix, self.batch_shape + self._scale_u_matrix.shape[-2:]) @property def scale_v_matrix(self) -> Array: """Matrix `V` that defines the low-rank part of the scale matrix.""" return jnp.broadcast_to( self._scale_v_matrix, self.batch_shape + self._scale_v_matrix.shape[-2:]) def __getitem__(self, index) -> 'MultivariateNormalDiagPlusLowRank': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return MultivariateNormalDiagPlusLowRank( loc=self.loc[index], scale_diag=self.scale_diag[index], scale_u_matrix=self.scale_u_matrix[index], scale_v_matrix=self.scale_v_matrix[index])
(loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, scale_diag: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, scale_u_matrix: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, scale_v_matrix: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None)
56,080
distrax._src.distributions.mvn_diag_plus_low_rank
__getitem__
See `Distribution.__getitem__`.
def __getitem__(self, index) -> 'MultivariateNormalDiagPlusLowRank': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return MultivariateNormalDiagPlusLowRank( loc=self.loc[index], scale_diag=self.scale_diag[index], scale_u_matrix=self.scale_u_matrix[index], scale_v_matrix=self.scale_v_matrix[index])
(self, index) -> distrax._src.distributions.mvn_diag_plus_low_rank.MultivariateNormalDiagPlusLowRank
56,081
distrax._src.distributions.mvn_diag_plus_low_rank
__init__
Initializes a MultivariateNormalDiagPlusLowRank distribution. Args: loc: Mean vector of the distribution of shape `k` (can also be a batch of such vectors). If not specified, it defaults to zeros. scale_diag: The diagonal matrix added to the scale `S`, specified by a `k`-length vector containing its diagonal entries (or a batch of vectors). If not specified, the diagonal matrix defaults to the identity. scale_u_matrix: The low-rank matrix `U` that specifies the scale, as described in the class docstring. It is a `k x r` matrix (or a batch of such matrices). If not specified, it defaults to zeros. At least one of `loc`, `scale_diag`, and `scale_u_matrix` must be specified. scale_v_matrix: The low-rank matrix `V` that specifies the scale, as described in the class docstring. It is a `k x r` matrix (or a batch of such matrices). If not specified, it defaults to `scale_u_matrix`. It can only be specified if `scale_u_matrix` is also specified.
def __init__(self, loc: Optional[Array] = None, scale_diag: Optional[Array] = None, scale_u_matrix: Optional[Array] = None, scale_v_matrix: Optional[Array] = None): """Initializes a MultivariateNormalDiagPlusLowRank distribution. Args: loc: Mean vector of the distribution of shape `k` (can also be a batch of such vectors). If not specified, it defaults to zeros. scale_diag: The diagonal matrix added to the scale `S`, specified by a `k`-length vector containing its diagonal entries (or a batch of vectors). If not specified, the diagonal matrix defaults to the identity. scale_u_matrix: The low-rank matrix `U` that specifies the scale, as described in the class docstring. It is a `k x r` matrix (or a batch of such matrices). If not specified, it defaults to zeros. At least one of `loc`, `scale_diag`, and `scale_u_matrix` must be specified. scale_v_matrix: The low-rank matrix `V` that specifies the scale, as described in the class docstring. It is a `k x r` matrix (or a batch of such matrices). If not specified, it defaults to `scale_u_matrix`. It can only be specified if `scale_u_matrix` is also specified. """ loc = None if loc is None else conversion.as_float_array(loc) scale_diag = None if scale_diag is None else conversion.as_float_array( scale_diag) scale_u_matrix = ( None if scale_u_matrix is None else conversion.as_float_array( scale_u_matrix)) scale_v_matrix = ( None if scale_v_matrix is None else conversion.as_float_array( scale_v_matrix)) _check_parameters(loc, scale_diag, scale_u_matrix, scale_v_matrix) if loc is not None: num_dims = loc.shape[-1] elif scale_diag is not None: num_dims = scale_diag.shape[-1] elif scale_u_matrix is not None: num_dims = scale_u_matrix.shape[-2] dtype = jnp.result_type( *[x for x in [loc, scale_diag, scale_u_matrix, scale_v_matrix] if x is not None]) if loc is None: loc = jnp.zeros((num_dims,), dtype=dtype) self._scale_diag = scale_diag if scale_diag is None: self._scale_diag = jnp.ones((num_dims,), dtype=dtype) self._scale_u_matrix = scale_u_matrix if scale_u_matrix is None: self._scale_u_matrix = jnp.zeros((num_dims, 1), dtype=dtype) self._scale_v_matrix = scale_v_matrix if scale_v_matrix is None: self._scale_v_matrix = self._scale_u_matrix if scale_u_matrix is None: # The scale matrix is diagonal. scale = DiagLinear(self._scale_diag) else: scale = DiagPlusLowRankLinear( u_matrix=self._scale_u_matrix, v_matrix=self._scale_v_matrix, diag=self._scale_diag) super().__init__(loc=loc, scale=scale)
(self, loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, scale_diag: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, scale_u_matrix: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, scale_v_matrix: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None)
56,105
distrax._src.distributions.mvn_from_bijector
MultivariateNormalFromBijector
Multivariate normal distribution on `R^k`. The multivariate normal over `x` is characterized by an invertible affine transformation `x = f(z) = A @ z + b`, where `z` is a random variable that follows a standard multivariate normal on `R^k`, i.e., `p(z) = N(0, I_k)`, `A` is a `k x k` transformation matrix, and `b` is a `k`-dimensional vector. The resulting PDF on `x` is a multivariate normal, `p(x) = N(b, C)`, where `C = A @ A.T` is the covariance matrix. Additional leading dimensions (if any) index batches. The transformation `x = f(z)` must be specified by a linear scale bijector implementing the operation `A @ z` and a shift (or location) term `b`.
class MultivariateNormalFromBijector(transformed.Transformed): """Multivariate normal distribution on `R^k`. The multivariate normal over `x` is characterized by an invertible affine transformation `x = f(z) = A @ z + b`, where `z` is a random variable that follows a standard multivariate normal on `R^k`, i.e., `p(z) = N(0, I_k)`, `A` is a `k x k` transformation matrix, and `b` is a `k`-dimensional vector. The resulting PDF on `x` is a multivariate normal, `p(x) = N(b, C)`, where `C = A @ A.T` is the covariance matrix. Additional leading dimensions (if any) index batches. The transformation `x = f(z)` must be specified by a linear scale bijector implementing the operation `A @ z` and a shift (or location) term `b`. """ def __init__(self, loc: Array, scale: linear.Linear): """Initializes the distribution. Args: loc: The term `b`, i.e., the mean of the multivariate normal distribution. scale: The bijector specifying the linear transformation `A @ z`, as described in the class docstring. """ _check_input_parameters_are_valid(scale, loc) batch_shape = jnp.broadcast_shapes(scale.batch_shape, loc.shape[:-1]) dtype = jnp.result_type(scale.dtype, loc.dtype) # Build a standard multivariate Gaussian with the right `batch_shape`. std_mvn_dist = independent.Independent( distribution=normal.Normal( loc=jnp.zeros(batch_shape + loc.shape[-1:], dtype=dtype), scale=1.), reinterpreted_batch_ndims=1) # Form the bijector `f(x) = Ax + b`. bijector = chain.Chain([block.Block(shift.Shift(loc), ndims=1), scale]) super().__init__(distribution=std_mvn_dist, bijector=bijector) self._scale = scale self._loc = loc self._event_shape = loc.shape[-1:] self._batch_shape = batch_shape self._dtype = dtype @property def scale(self) -> linear.Linear: """The scale bijector.""" return self._scale @property def loc(self) -> Array: """The `loc` parameter of the distribution.""" shape = self.batch_shape + self.event_shape return jnp.broadcast_to(self._loc, shape=shape) def mean(self) -> Array: """Calculates the mean.""" return self.loc def median(self) -> Array: """Calculates the median.""" return self.loc def mode(self) -> Array: """Calculates the mode.""" return self.loc def covariance(self) -> Array: """Calculates the covariance matrix. Unlike TFP, which would drop leading dimensions, in Distrax the covariance matrix always has shape `batch_shape + (num_dims, num_dims)`. This helps to keep things simple and predictable. Returns: The covariance matrix, of shape `k x k` (broadcasted to match the batch shape of the distribution). """ if isinstance(self.scale, diag_linear.DiagLinear): result = jnp.vectorize(jnp.diag, signature='(k)->(k,k)')(self.variance()) else: result = jax.vmap(self.scale.forward, in_axes=-2, out_axes=-2)( self._scale.matrix) return jnp.broadcast_to( result, self.batch_shape + self.event_shape + self.event_shape) def variance(self) -> Array: """Calculates the variance of all one-dimensional marginals.""" if isinstance(self.scale, diag_linear.DiagLinear): result = jnp.square(self.scale.diag) else: scale_matrix = self._scale.matrix result = jnp.sum(scale_matrix * scale_matrix, axis=-1) return jnp.broadcast_to(result, self.batch_shape + self.event_shape) def stddev(self) -> Array: """Calculates the standard deviation (the square root of the variance).""" if isinstance(self.scale, diag_linear.DiagLinear): result = jnp.abs(self.scale.diag) else: result = jnp.sqrt(self.variance()) return jnp.broadcast_to(result, self.batch_shape + self.event_shape)
(loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], scale: distrax._src.bijectors.linear.Linear)
56,107
distrax._src.distributions.mvn_from_bijector
__init__
Initializes the distribution. Args: loc: The term `b`, i.e., the mean of the multivariate normal distribution. scale: The bijector specifying the linear transformation `A @ z`, as described in the class docstring.
def __init__(self, loc: Array, scale: linear.Linear): """Initializes the distribution. Args: loc: The term `b`, i.e., the mean of the multivariate normal distribution. scale: The bijector specifying the linear transformation `A @ z`, as described in the class docstring. """ _check_input_parameters_are_valid(scale, loc) batch_shape = jnp.broadcast_shapes(scale.batch_shape, loc.shape[:-1]) dtype = jnp.result_type(scale.dtype, loc.dtype) # Build a standard multivariate Gaussian with the right `batch_shape`. std_mvn_dist = independent.Independent( distribution=normal.Normal( loc=jnp.zeros(batch_shape + loc.shape[-1:], dtype=dtype), scale=1.), reinterpreted_batch_ndims=1) # Form the bijector `f(x) = Ax + b`. bijector = chain.Chain([block.Block(shift.Shift(loc), ndims=1), scale]) super().__init__(distribution=std_mvn_dist, bijector=bijector) self._scale = scale self._loc = loc self._event_shape = loc.shape[-1:] self._batch_shape = batch_shape self._dtype = dtype
(self, loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], scale: distrax._src.bijectors.linear.Linear)
56,131
distrax._src.distributions.mvn_full_covariance
MultivariateNormalFullCovariance
Multivariate normal distribution on `R^k`. The `MultivariateNormalFullCovariance` distribution is parameterized by a `k`-length location (mean) vector `b` and a covariance matrix `C` of size `k x k` that must be positive definite and symmetric. This class makes no attempt to verify that `C` is positive definite or symmetric. It is the responsibility of the user to make sure that it is the case.
class MultivariateNormalFullCovariance(MultivariateNormalTri): """Multivariate normal distribution on `R^k`. The `MultivariateNormalFullCovariance` distribution is parameterized by a `k`-length location (mean) vector `b` and a covariance matrix `C` of size `k x k` that must be positive definite and symmetric. This class makes no attempt to verify that `C` is positive definite or symmetric. It is the responsibility of the user to make sure that it is the case. """ equiv_tfp_cls = tfd.MultivariateNormalFullCovariance def __init__(self, loc: Optional[Array] = None, covariance_matrix: Optional[Array] = None): """Initializes a MultivariateNormalFullCovariance distribution. Args: loc: Mean vector of the distribution of shape `k` (can also be a batch of such vectors). If not specified, it defaults to zeros. covariance_matrix: The covariance matrix `C`. It must be a `k x k` matrix (additional dimensions index batches). If not specified, it defaults to the identity. """ loc = None if loc is None else conversion.as_float_array(loc) covariance_matrix = None if covariance_matrix is None else ( conversion.as_float_array(covariance_matrix)) _check_parameters(loc, covariance_matrix) if loc is not None: num_dims = loc.shape[-1] elif covariance_matrix is not None: num_dims = covariance_matrix.shape[-1] dtype = jnp.result_type( *[x for x in [loc, covariance_matrix] if x is not None]) if loc is None: loc = jnp.zeros((num_dims,), dtype=dtype) if covariance_matrix is None: self._covariance_matrix = jnp.eye(num_dims, dtype=dtype) scale_tril = None else: self._covariance_matrix = covariance_matrix scale_tril = jnp.linalg.cholesky(covariance_matrix) super().__init__(loc=loc, scale_tri=scale_tril) @property def covariance_matrix(self) -> Array: """Covariance matrix `C`.""" return jnp.broadcast_to( self._covariance_matrix, self.batch_shape + self.event_shape + self.event_shape) def covariance(self) -> Array: """Covariance matrix `C`.""" return self.covariance_matrix def variance(self) -> Array: """Calculates the variance of all one-dimensional marginals.""" return jnp.vectorize(jnp.diag, signature='(k,k)->(k)')( self.covariance_matrix) def __getitem__(self, index) -> 'MultivariateNormalFullCovariance': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return MultivariateNormalFullCovariance( loc=self.loc[index], covariance_matrix=self.covariance_matrix[index])
(loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, covariance_matrix: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None)
56,132
distrax._src.distributions.mvn_full_covariance
__getitem__
See `Distribution.__getitem__`.
def __getitem__(self, index) -> 'MultivariateNormalFullCovariance': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return MultivariateNormalFullCovariance( loc=self.loc[index], covariance_matrix=self.covariance_matrix[index])
(self, index) -> distrax._src.distributions.mvn_full_covariance.MultivariateNormalFullCovariance
56,133
distrax._src.distributions.mvn_full_covariance
__init__
Initializes a MultivariateNormalFullCovariance distribution. Args: loc: Mean vector of the distribution of shape `k` (can also be a batch of such vectors). If not specified, it defaults to zeros. covariance_matrix: The covariance matrix `C`. It must be a `k x k` matrix (additional dimensions index batches). If not specified, it defaults to the identity.
def __init__(self, loc: Optional[Array] = None, covariance_matrix: Optional[Array] = None): """Initializes a MultivariateNormalFullCovariance distribution. Args: loc: Mean vector of the distribution of shape `k` (can also be a batch of such vectors). If not specified, it defaults to zeros. covariance_matrix: The covariance matrix `C`. It must be a `k x k` matrix (additional dimensions index batches). If not specified, it defaults to the identity. """ loc = None if loc is None else conversion.as_float_array(loc) covariance_matrix = None if covariance_matrix is None else ( conversion.as_float_array(covariance_matrix)) _check_parameters(loc, covariance_matrix) if loc is not None: num_dims = loc.shape[-1] elif covariance_matrix is not None: num_dims = covariance_matrix.shape[-1] dtype = jnp.result_type( *[x for x in [loc, covariance_matrix] if x is not None]) if loc is None: loc = jnp.zeros((num_dims,), dtype=dtype) if covariance_matrix is None: self._covariance_matrix = jnp.eye(num_dims, dtype=dtype) scale_tril = None else: self._covariance_matrix = covariance_matrix scale_tril = jnp.linalg.cholesky(covariance_matrix) super().__init__(loc=loc, scale_tri=scale_tril)
(self, loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, covariance_matrix: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None)
56,140
distrax._src.distributions.mvn_full_covariance
covariance
Covariance matrix `C`.
def covariance(self) -> Array: """Covariance matrix `C`.""" return self.covariance_matrix
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,156
distrax._src.distributions.mvn_full_covariance
variance
Calculates the variance of all one-dimensional marginals.
def variance(self) -> Array: """Calculates the variance of all one-dimensional marginals.""" return jnp.vectorize(jnp.diag, signature='(k,k)->(k)')( self.covariance_matrix)
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,157
distrax._src.distributions.mvn_tri
MultivariateNormalTri
Multivariate normal distribution on `R^k`. The `MultivariateNormalTri` distribution is parameterized by a `k`-length location (mean) vector `b` and a (lower or upper) triangular scale matrix `S` of size `k x k`. The covariance matrix is `C = S @ S.T`.
class MultivariateNormalTri(MultivariateNormalFromBijector): """Multivariate normal distribution on `R^k`. The `MultivariateNormalTri` distribution is parameterized by a `k`-length location (mean) vector `b` and a (lower or upper) triangular scale matrix `S` of size `k x k`. The covariance matrix is `C = S @ S.T`. """ equiv_tfp_cls = tfd.MultivariateNormalTriL def __init__(self, loc: Optional[Array] = None, scale_tri: Optional[Array] = None, is_lower: bool = True): """Initializes a MultivariateNormalTri distribution. Args: loc: Mean vector of the distribution of shape `k` (can also be a batch of such vectors). If not specified, it defaults to zeros. scale_tri: The scale matrix `S`. It must be a `k x k` triangular matrix (additional dimensions index batches). If `scale_tri` is not triangular, the entries above or below the main diagonal will be ignored. The parameter `is_lower` specifies if `scale_tri` is lower or upper triangular. It is the responsibility of the user to make sure that `scale_tri` only contains non-zero elements in its diagonal; this class makes no attempt to verify that. If `scale_tri` is not specified, it defaults to the identity. is_lower: Indicates if `scale_tri` is lower (if True) or upper (if False) triangular. """ loc = None if loc is None else conversion.as_float_array(loc) scale_tri = None if scale_tri is None else conversion.as_float_array( scale_tri) _check_parameters(loc, scale_tri) if loc is not None: num_dims = loc.shape[-1] elif scale_tri is not None: num_dims = scale_tri.shape[-1] dtype = jnp.result_type(*[x for x in [loc, scale_tri] if x is not None]) if loc is None: loc = jnp.zeros((num_dims,), dtype=dtype) if scale_tri is None: self._scale_tri = jnp.eye(num_dims, dtype=dtype) scale = DiagLinear(diag=jnp.ones(loc.shape[-1:], dtype=dtype)) else: tri_fn = jnp.tril if is_lower else jnp.triu self._scale_tri = tri_fn(scale_tri) scale = TriangularLinear(matrix=self._scale_tri, is_lower=is_lower) self._is_lower = is_lower super().__init__(loc=loc, scale=scale) @property def scale_tri(self) -> Array: """Triangular scale matrix `S`.""" return jnp.broadcast_to( self._scale_tri, self.batch_shape + self.event_shape + self.event_shape) @property def is_lower(self) -> bool: """Whether the `scale_tri` matrix is lower triangular.""" return self._is_lower def __getitem__(self, index) -> 'MultivariateNormalTri': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return MultivariateNormalTri( loc=self.loc[index], scale_tri=self.scale_tri[index], is_lower=self.is_lower)
(loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, scale_tri: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, is_lower: bool = True)
56,158
distrax._src.distributions.mvn_tri
__getitem__
See `Distribution.__getitem__`.
def __getitem__(self, index) -> 'MultivariateNormalTri': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return MultivariateNormalTri( loc=self.loc[index], scale_tri=self.scale_tri[index], is_lower=self.is_lower)
(self, index) -> distrax._src.distributions.mvn_tri.MultivariateNormalTri
56,159
distrax._src.distributions.mvn_tri
__init__
Initializes a MultivariateNormalTri distribution. Args: loc: Mean vector of the distribution of shape `k` (can also be a batch of such vectors). If not specified, it defaults to zeros. scale_tri: The scale matrix `S`. It must be a `k x k` triangular matrix (additional dimensions index batches). If `scale_tri` is not triangular, the entries above or below the main diagonal will be ignored. The parameter `is_lower` specifies if `scale_tri` is lower or upper triangular. It is the responsibility of the user to make sure that `scale_tri` only contains non-zero elements in its diagonal; this class makes no attempt to verify that. If `scale_tri` is not specified, it defaults to the identity. is_lower: Indicates if `scale_tri` is lower (if True) or upper (if False) triangular.
def __init__(self, loc: Optional[Array] = None, scale_tri: Optional[Array] = None, is_lower: bool = True): """Initializes a MultivariateNormalTri distribution. Args: loc: Mean vector of the distribution of shape `k` (can also be a batch of such vectors). If not specified, it defaults to zeros. scale_tri: The scale matrix `S`. It must be a `k x k` triangular matrix (additional dimensions index batches). If `scale_tri` is not triangular, the entries above or below the main diagonal will be ignored. The parameter `is_lower` specifies if `scale_tri` is lower or upper triangular. It is the responsibility of the user to make sure that `scale_tri` only contains non-zero elements in its diagonal; this class makes no attempt to verify that. If `scale_tri` is not specified, it defaults to the identity. is_lower: Indicates if `scale_tri` is lower (if True) or upper (if False) triangular. """ loc = None if loc is None else conversion.as_float_array(loc) scale_tri = None if scale_tri is None else conversion.as_float_array( scale_tri) _check_parameters(loc, scale_tri) if loc is not None: num_dims = loc.shape[-1] elif scale_tri is not None: num_dims = scale_tri.shape[-1] dtype = jnp.result_type(*[x for x in [loc, scale_tri] if x is not None]) if loc is None: loc = jnp.zeros((num_dims,), dtype=dtype) if scale_tri is None: self._scale_tri = jnp.eye(num_dims, dtype=dtype) scale = DiagLinear(diag=jnp.ones(loc.shape[-1:], dtype=dtype)) else: tri_fn = jnp.tril if is_lower else jnp.triu self._scale_tri = tri_fn(scale_tri) scale = TriangularLinear(matrix=self._scale_tri, is_lower=is_lower) self._is_lower = is_lower super().__init__(loc=loc, scale=scale)
(self, loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, scale_tri: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, is_lower: bool = True)
56,183
distrax._src.distributions.normal
Normal
Normal distribution with location `loc` and `scale` parameters.
class Normal(distribution.Distribution): """Normal distribution with location `loc` and `scale` parameters.""" equiv_tfp_cls = tfd.Normal def __init__(self, loc: Numeric, scale: Numeric): """Initializes a Normal distribution. Args: loc: Mean of the distribution. scale: Standard deviation of the distribution. """ super().__init__() self._loc = conversion.as_float_array(loc) self._scale = conversion.as_float_array(scale) @property def event_shape(self) -> Tuple[int, ...]: """Shape of event of distribution samples.""" return () @property def batch_shape(self) -> Tuple[int, ...]: """Shape of batch of distribution samples.""" return jax.lax.broadcast_shapes(self._loc.shape, self._scale.shape) @property def loc(self) -> Array: """Mean of the distribution.""" return jnp.broadcast_to(self._loc, self.batch_shape) @property def scale(self) -> Array: """Scale of the distribution.""" return jnp.broadcast_to(self._scale, self.batch_shape) def _sample_from_std_normal(self, key: PRNGKey, n: int) -> Array: out_shape = (n,) + self.batch_shape dtype = jnp.result_type(self._loc, self._scale) return jax.random.normal(key, shape=out_shape, dtype=dtype) def _sample_n(self, key: PRNGKey, n: int) -> Array: """See `Distribution._sample_n`.""" rnd = self._sample_from_std_normal(key, n) scale = jnp.expand_dims(self._scale, range(rnd.ndim - self._scale.ndim)) loc = jnp.expand_dims(self._loc, range(rnd.ndim - self._loc.ndim)) return scale * rnd + loc def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]: """See `Distribution._sample_n_and_log_prob`.""" rnd = self._sample_from_std_normal(key, n) samples = self._scale * rnd + self._loc log_prob = -0.5 * jnp.square(rnd) - _half_log2pi - jnp.log(self._scale) return samples, log_prob def log_prob(self, value: EventT) -> Array: """See `Distribution.log_prob`.""" log_unnormalized = -0.5 * jnp.square(self._standardize(value)) log_normalization = _half_log2pi + jnp.log(self._scale) return log_unnormalized - log_normalization def cdf(self, value: EventT) -> Array: """See `Distribution.cdf`.""" return jax.scipy.special.ndtr(self._standardize(value)) def log_cdf(self, value: EventT) -> Array: """See `Distribution.log_cdf`.""" return jax.scipy.special.log_ndtr(self._standardize(value)) def survival_function(self, value: EventT) -> Array: """See `Distribution.survival_function`.""" return jax.scipy.special.ndtr(-self._standardize(value)) def log_survival_function(self, value: EventT) -> Array: """See `Distribution.log_survival_function`.""" return jax.scipy.special.log_ndtr(-self._standardize(value)) def _standardize(self, value: EventT) -> Array: return (value - self._loc) / self._scale def entropy(self) -> Array: """Calculates the Shannon entropy (in nats).""" log_normalization = _half_log2pi + jnp.log(self.scale) entropy = 0.5 + log_normalization return entropy def mean(self) -> Array: """Calculates the mean.""" return self.loc def variance(self) -> Array: """Calculates the variance.""" return jnp.square(self.scale) def stddev(self) -> Array: """Calculates the standard deviation.""" return self.scale def mode(self) -> Array: """Calculates the mode.""" return self.mean() def median(self) -> Array: """Calculates the median.""" return self.mean() def __getitem__(self, index) -> 'Normal': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return Normal(loc=self.loc[index], scale=self.scale[index])
(loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int])
56,184
distrax._src.distributions.normal
__getitem__
See `Distribution.__getitem__`.
def __getitem__(self, index) -> 'Normal': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return Normal(loc=self.loc[index], scale=self.scale[index])
(self, index) -> distrax._src.distributions.normal.Normal
56,185
distrax._src.distributions.normal
__init__
Initializes a Normal distribution. Args: loc: Mean of the distribution. scale: Standard deviation of the distribution.
def __init__(self, loc: Numeric, scale: Numeric): """Initializes a Normal distribution. Args: loc: Mean of the distribution. scale: Standard deviation of the distribution. """ super().__init__() self._loc = conversion.as_float_array(loc) self._scale = conversion.as_float_array(scale)
(self, loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int])
56,209
distrax._src.distributions.one_hot_categorical
OneHotCategorical
OneHotCategorical distribution.
class OneHotCategorical(categorical.Categorical): """OneHotCategorical distribution.""" equiv_tfp_cls = tfd.OneHotCategorical def __init__(self, logits: Optional[Array] = None, probs: Optional[Array] = None, dtype: Union[jnp.dtype, type[Any]] = int): """Initializes a OneHotCategorical distribution. Args: logits: Logit transform of the probability of each category. Only one of `logits` or `probs` can be specified. probs: Probability of each category. Only one of `logits` or `probs` can be specified. dtype: The type of event samples. """ super().__init__(logits=logits, probs=probs, dtype=dtype) @property def event_shape(self) -> Tuple[int, ...]: """Shape of event of distribution samples.""" return (self.num_categories,) def _sample_n(self, key: PRNGKey, n: int) -> Array: """See `Distribution._sample_n`.""" new_shape = (n,) + self.logits.shape[:-1] is_valid = jnp.logical_and( jnp.all(jnp.isfinite(self.probs), axis=-1, keepdims=True), jnp.all(self.probs >= 0, axis=-1, keepdims=True)) draws = jax.random.categorical( key=key, logits=self.logits, axis=-1, shape=new_shape) draws_one_hot = jax.nn.one_hot( draws, num_classes=self.num_categories).astype(self._dtype) return jnp.where(is_valid, draws_one_hot, jnp.ones_like(draws_one_hot) * -1) def log_prob(self, value: EventT) -> Array: """See `Distribution.log_prob`.""" return jnp.sum(math.multiply_no_nan(self.logits, value), axis=-1) def prob(self, value: EventT) -> Array: """See `Distribution.prob`.""" return jnp.sum(math.multiply_no_nan(self.probs, value), axis=-1) def mode(self) -> Array: """Calculates the mode.""" preferences = self._probs if self._logits is None else self._logits assert preferences is not None greedy_index = jnp.argmax(preferences, axis=-1) return jax.nn.one_hot(greedy_index, self.num_categories).astype(self._dtype) def cdf(self, value: EventT) -> Array: """See `Distribution.cdf`.""" return jnp.sum(math.multiply_no_nan( jnp.cumsum(self.probs, axis=-1), value), axis=-1) def __getitem__(self, index) -> 'OneHotCategorical': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) if self._logits is not None: return OneHotCategorical(logits=self.logits[index], dtype=self._dtype) return OneHotCategorical(probs=self.probs[index], dtype=self._dtype)
(logits: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, probs: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, dtype: Union[numpy.dtype, type[Any]] = <class 'int'>)
56,210
distrax._src.distributions.one_hot_categorical
__getitem__
See `Distribution.__getitem__`.
def __getitem__(self, index) -> 'OneHotCategorical': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) if self._logits is not None: return OneHotCategorical(logits=self.logits[index], dtype=self._dtype) return OneHotCategorical(probs=self.probs[index], dtype=self._dtype)
(self, index) -> distrax._src.distributions.one_hot_categorical.OneHotCategorical
56,211
distrax._src.distributions.one_hot_categorical
__init__
Initializes a OneHotCategorical distribution. Args: logits: Logit transform of the probability of each category. Only one of `logits` or `probs` can be specified. probs: Probability of each category. Only one of `logits` or `probs` can be specified. dtype: The type of event samples.
def __init__(self, logits: Optional[Array] = None, probs: Optional[Array] = None, dtype: Union[jnp.dtype, type[Any]] = int): """Initializes a OneHotCategorical distribution. Args: logits: Logit transform of the probability of each category. Only one of `logits` or `probs` can be specified. probs: Probability of each category. Only one of `logits` or `probs` can be specified. dtype: The type of event samples. """ super().__init__(logits=logits, probs=probs, dtype=dtype)
(self, logits: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, probs: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None, dtype: Union[numpy.dtype, type[Any]] = <class 'int'>)
56,214
distrax._src.distributions.one_hot_categorical
_sample_n
See `Distribution._sample_n`.
def _sample_n(self, key: PRNGKey, n: int) -> Array: """See `Distribution._sample_n`.""" new_shape = (n,) + self.logits.shape[:-1] is_valid = jnp.logical_and( jnp.all(jnp.isfinite(self.probs), axis=-1, keepdims=True), jnp.all(self.probs >= 0, axis=-1, keepdims=True)) draws = jax.random.categorical( key=key, logits=self.logits, axis=-1, shape=new_shape) draws_one_hot = jax.nn.one_hot( draws, num_classes=self.num_categories).astype(self._dtype) return jnp.where(is_valid, draws_one_hot, jnp.ones_like(draws_one_hot) * -1)
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,216
distrax._src.distributions.one_hot_categorical
cdf
See `Distribution.cdf`.
def cdf(self, value: EventT) -> Array: """See `Distribution.cdf`.""" return jnp.sum(math.multiply_no_nan( jnp.cumsum(self.probs, axis=-1), value), axis=-1)
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]