index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
55,662 |
distrax._src.distributions.epsilon_greedy
|
__getitem__
|
See `Distribution.__getitem__`.
|
def __getitem__(self, index) -> 'EpsilonGreedy':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return EpsilonGreedy(
preferences=self.preferences[index],
epsilon=self.epsilon,
dtype=self.dtype)
|
(self, index) -> distrax._src.distributions.epsilon_greedy.EpsilonGreedy
|
55,663 |
distrax._src.distributions.epsilon_greedy
|
__init__
|
Initializes an EpsilonGreedy distribution.
Args:
preferences: Unnormalized preferences.
epsilon: Mixing parameter ε.
dtype: The type of event samples.
|
def __init__(self,
preferences: Array,
epsilon: float,
dtype: Union[jnp.dtype, type[Any]] = int):
"""Initializes an EpsilonGreedy distribution.
Args:
preferences: Unnormalized preferences.
epsilon: Mixing parameter ε.
dtype: The type of event samples.
"""
self._preferences = jnp.asarray(preferences)
self._epsilon = epsilon
greedy_probs = _argmax_with_random_tie_breaking(self._preferences)
probs = _mix_probs_with_uniform(greedy_probs, epsilon)
super().__init__(probs=probs, dtype=dtype)
|
(self, preferences: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], epsilon: float, dtype: Union[numpy.dtype, type[Any]] = <class 'int'>)
|
55,686 |
distrax._src.distributions.gamma
|
Gamma
|
Gamma distribution with parameters `concentration` and `rate`.
|
class Gamma(distribution.Distribution):
"""Gamma distribution with parameters `concentration` and `rate`."""
equiv_tfp_cls = tfd.Gamma
def __init__(self, concentration: Numeric, rate: Numeric):
"""Initializes a Gamma distribution.
Args:
concentration: Concentration parameter of the distribution.
rate: Inverse scale params of the distribution.
"""
super().__init__()
self._concentration = conversion.as_float_array(concentration)
self._rate = conversion.as_float_array(rate)
self._batch_shape = jax.lax.broadcast_shapes(
self._concentration.shape, self._rate.shape)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return self._batch_shape
@property
def concentration(self) -> Array:
"""Concentration of the distribution."""
return jnp.broadcast_to(self._concentration, self.batch_shape)
@property
def rate(self) -> Array:
"""Inverse scale of the distribution."""
return jnp.broadcast_to(self._rate, self.batch_shape)
def _sample_from_std_gamma(self, key: PRNGKey, n: int) -> Array:
out_shape = (n,) + self.batch_shape
dtype = jnp.result_type(self._concentration, self._rate)
return jax.random.gamma(
key, a=self._concentration, shape=out_shape, dtype=dtype
)
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
rnd = self._sample_from_std_gamma(key, n)
return rnd / self._rate
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return (
self._concentration * jnp.log(self._rate)
+ (self._concentration - 1) * jnp.log(value)
- self._rate * value
- jax.lax.lgamma(self._concentration)
)
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
log_rate = jnp.log(self._rate)
return (
self._concentration
- log_rate
+ jax.lax.lgamma(self._concentration)
+ (1.0 - self._concentration) * jax.lax.digamma(self._concentration)
)
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
return jax.lax.igamma(self._concentration, self._rate * value)
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return jnp.log(self.cdf(value))
def mean(self) -> Array:
"""Calculates the mean."""
return self._concentration / self._rate
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return jnp.sqrt(self._concentration) / self._rate
def variance(self) -> Array:
"""Calculates the variance."""
return self._concentration / jnp.square(self._rate)
def mode(self) -> Array:
"""Calculates the mode."""
mode = (self._concentration - 1.0) / self._rate
return jnp.where(self._concentration >= 1.0, mode, jnp.nan)
def __getitem__(self, index) -> 'Gamma':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Gamma(
concentration=self.concentration[index], rate=self.rate[index])
|
(concentration: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], rate: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int])
|
55,687 |
distrax._src.distributions.gamma
|
__getitem__
|
See `Distribution.__getitem__`.
|
def __getitem__(self, index) -> 'Gamma':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Gamma(
concentration=self.concentration[index], rate=self.rate[index])
|
(self, index) -> distrax._src.distributions.gamma.Gamma
|
55,688 |
distrax._src.distributions.gamma
|
__init__
|
Initializes a Gamma distribution.
Args:
concentration: Concentration parameter of the distribution.
rate: Inverse scale params of the distribution.
|
def __init__(self, concentration: Numeric, rate: Numeric):
"""Initializes a Gamma distribution.
Args:
concentration: Concentration parameter of the distribution.
rate: Inverse scale params of the distribution.
"""
super().__init__()
self._concentration = conversion.as_float_array(concentration)
self._rate = conversion.as_float_array(rate)
self._batch_shape = jax.lax.broadcast_shapes(
self._concentration.shape, self._rate.shape)
|
(self, concentration: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], rate: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int])
|
55,691 |
distrax._src.distributions.gamma
|
_sample_from_std_gamma
| null |
def _sample_from_std_gamma(self, key: PRNGKey, n: int) -> Array:
out_shape = (n,) + self.batch_shape
dtype = jnp.result_type(self._concentration, self._rate)
return jax.random.gamma(
key, a=self._concentration, shape=out_shape, dtype=dtype
)
|
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,692 |
distrax._src.distributions.gamma
|
_sample_n
|
See `Distribution._sample_n`.
|
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
rnd = self._sample_from_std_gamma(key, n)
return rnd / self._rate
|
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,694 |
distrax._src.distributions.gamma
|
cdf
|
See `Distribution.cdf`.
|
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
return jax.lax.igamma(self._concentration, self._rate * value)
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,696 |
distrax._src.distributions.gamma
|
entropy
|
Calculates the Shannon entropy (in nats).
|
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
log_rate = jnp.log(self._rate)
return (
self._concentration
- log_rate
+ jax.lax.lgamma(self._concentration)
+ (1.0 - self._concentration) * jax.lax.digamma(self._concentration)
)
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,699 |
distrax._src.distributions.gamma
|
log_prob
|
See `Distribution.log_prob`.
|
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return (
self._concentration * jnp.log(self._rate)
+ (self._concentration - 1) * jnp.log(value)
- self._rate * value
- jax.lax.lgamma(self._concentration)
)
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,701 |
distrax._src.distributions.gamma
|
mean
|
Calculates the mean.
|
def mean(self) -> Array:
"""Calculates the mean."""
return self._concentration / self._rate
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,703 |
distrax._src.distributions.gamma
|
mode
|
Calculates the mode.
|
def mode(self) -> Array:
"""Calculates the mode."""
mode = (self._concentration - 1.0) / self._rate
return jnp.where(self._concentration >= 1.0, mode, jnp.nan)
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,707 |
distrax._src.distributions.gamma
|
stddev
|
Calculates the standard deviation.
|
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return jnp.sqrt(self._concentration) / self._rate
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,710 |
distrax._src.distributions.gamma
|
variance
|
Calculates the variance.
|
def variance(self) -> Array:
"""Calculates the variance."""
return self._concentration / jnp.square(self._rate)
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,711 |
distrax._src.distributions.greedy
|
Greedy
|
A Categorical distribution that is greedy with respect to some preferences.
Given a set of unnormalized preferences, the probability mass is distributed
equally among all indices `i` such that `preferences[i] = max(preferences)`,
all other indices will be assigned a probability of zero.
|
class Greedy(categorical.Categorical):
"""A Categorical distribution that is greedy with respect to some preferences.
Given a set of unnormalized preferences, the probability mass is distributed
equally among all indices `i` such that `preferences[i] = max(preferences)`,
all other indices will be assigned a probability of zero.
"""
def __init__(
self, preferences: Array, dtype: Union[jnp.dtype, type[Any]] = int
):
"""Initializes a Greedy distribution.
Args:
preferences: Unnormalized preferences.
dtype: The type of event samples.
"""
self._preferences = jnp.asarray(preferences)
probs = _argmax_with_random_tie_breaking(self._preferences)
super().__init__(probs=probs, dtype=dtype)
@property
def preferences(self) -> Array:
"""Unnormalized preferences."""
return self._preferences
def __getitem__(self, index) -> 'Greedy':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Greedy(
preferences=self.preferences[index], dtype=self.dtype)
|
(preferences: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], dtype: Union[numpy.dtype, type[Any]] = <class 'int'>)
|
55,712 |
distrax._src.distributions.greedy
|
__getitem__
|
See `Distribution.__getitem__`.
|
def __getitem__(self, index) -> 'Greedy':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Greedy(
preferences=self.preferences[index], dtype=self.dtype)
|
(self, index) -> distrax._src.distributions.greedy.Greedy
|
55,713 |
distrax._src.distributions.greedy
|
__init__
|
Initializes a Greedy distribution.
Args:
preferences: Unnormalized preferences.
dtype: The type of event samples.
|
def __init__(
self, preferences: Array, dtype: Union[jnp.dtype, type[Any]] = int
):
"""Initializes a Greedy distribution.
Args:
preferences: Unnormalized preferences.
dtype: The type of event samples.
"""
self._preferences = jnp.asarray(preferences)
probs = _argmax_with_random_tie_breaking(self._preferences)
super().__init__(probs=probs, dtype=dtype)
|
(self, preferences: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], dtype: Union[numpy.dtype, type[Any]] = <class 'int'>)
|
55,736 |
distrax._src.distributions.gumbel
|
Gumbel
|
Gumbel distribution with location `loc` and `scale` parameters.
|
class Gumbel(distribution.Distribution):
"""Gumbel distribution with location `loc` and `scale` parameters."""
equiv_tfp_cls = tfd.Gumbel
def __init__(self, loc: Numeric, scale: Numeric):
"""Initializes a Gumbel distribution.
Args:
loc: Mean of the distribution.
scale: Spread of the distribution.
"""
super().__init__()
self._loc = conversion.as_float_array(loc)
self._scale = conversion.as_float_array(scale)
self._batch_shape = jax.lax.broadcast_shapes(
self._loc.shape, self._scale.shape)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return self._batch_shape
@property
def loc(self) -> Array:
"""Mean of the distribution."""
return jnp.broadcast_to(self._loc, self.batch_shape)
@property
def scale(self) -> Array:
"""Scale of the distribution."""
return jnp.broadcast_to(self._scale, self.batch_shape)
def _standardize(self, value: Array) -> Array:
"""Standardizes the input `value` in location and scale."""
return (value - self._loc) / self._scale
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
z = self._standardize(value)
return -(z + jnp.exp(-z)) - jnp.log(self._scale)
def _sample_from_std_gumbel(self, key: PRNGKey, n: int) -> Array:
out_shape = (n,) + self.batch_shape
dtype = jnp.result_type(self._loc, self._scale)
return jax.random.gumbel(key, shape=out_shape, dtype=dtype)
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
rnd = self._sample_from_std_gumbel(key, n)
return self._scale * rnd + self._loc
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
rnd = self._sample_from_std_gumbel(key, n)
samples = self._scale * rnd + self._loc
log_prob = -(rnd + jnp.exp(-rnd)) - jnp.log(self._scale)
return samples, log_prob
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
return jnp.log(self._scale) + 1. + jnp.euler_gamma
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
z = self._standardize(value)
return -jnp.exp(-z)
def mean(self) -> Array:
"""Calculates the mean."""
return self._loc + self._scale * jnp.euler_gamma
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return self._scale * jnp.ones_like(self._loc) * jnp.pi / math.sqrt(6.)
def variance(self) -> Array:
"""Calculates the variance."""
return jnp.square(self._scale * jnp.ones_like(self._loc) * jnp.pi) / 6.
def mode(self) -> Array:
"""Calculates the mode."""
return self.loc
def median(self) -> Array:
"""Calculates the median."""
return self._loc - self._scale * math.log(math.log(2.))
def __getitem__(self, index) -> 'Gumbel':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Gumbel(loc=self.loc[index], scale=self.scale[index])
|
(loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int])
|
55,737 |
distrax._src.distributions.gumbel
|
__getitem__
|
See `Distribution.__getitem__`.
|
def __getitem__(self, index) -> 'Gumbel':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Gumbel(loc=self.loc[index], scale=self.scale[index])
|
(self, index) -> distrax._src.distributions.gumbel.Gumbel
|
55,738 |
distrax._src.distributions.gumbel
|
__init__
|
Initializes a Gumbel distribution.
Args:
loc: Mean of the distribution.
scale: Spread of the distribution.
|
def __init__(self, loc: Numeric, scale: Numeric):
"""Initializes a Gumbel distribution.
Args:
loc: Mean of the distribution.
scale: Spread of the distribution.
"""
super().__init__()
self._loc = conversion.as_float_array(loc)
self._scale = conversion.as_float_array(scale)
self._batch_shape = jax.lax.broadcast_shapes(
self._loc.shape, self._scale.shape)
|
(self, loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int])
|
55,741 |
distrax._src.distributions.gumbel
|
_sample_from_std_gumbel
| null |
def _sample_from_std_gumbel(self, key: PRNGKey, n: int) -> Array:
out_shape = (n,) + self.batch_shape
dtype = jnp.result_type(self._loc, self._scale)
return jax.random.gumbel(key, shape=out_shape, dtype=dtype)
|
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,742 |
distrax._src.distributions.gumbel
|
_sample_n
|
See `Distribution._sample_n`.
|
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
rnd = self._sample_from_std_gumbel(key, n)
return self._scale * rnd + self._loc
|
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,743 |
distrax._src.distributions.gumbel
|
_sample_n_and_log_prob
|
See `Distribution._sample_n_and_log_prob`.
|
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
rnd = self._sample_from_std_gumbel(key, n)
samples = self._scale * rnd + self._loc
log_prob = -(rnd + jnp.exp(-rnd)) - jnp.log(self._scale)
return samples, log_prob
|
(self, key: jax.Array, n: int) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,744 |
distrax._src.distributions.gumbel
|
_standardize
|
Standardizes the input `value` in location and scale.
|
def _standardize(self, value: Array) -> Array:
"""Standardizes the input `value` in location and scale."""
return (value - self._loc) / self._scale
|
(self, value: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,747 |
distrax._src.distributions.gumbel
|
entropy
|
Calculates the Shannon entropy (in nats).
|
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
return jnp.log(self._scale) + 1. + jnp.euler_gamma
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,749 |
distrax._src.distributions.gumbel
|
log_cdf
|
See `Distribution.log_cdf`.
|
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
z = self._standardize(value)
return -jnp.exp(-z)
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,750 |
distrax._src.distributions.gumbel
|
log_prob
|
See `Distribution.log_prob`.
|
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
z = self._standardize(value)
return -(z + jnp.exp(-z)) - jnp.log(self._scale)
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,752 |
distrax._src.distributions.gumbel
|
mean
|
Calculates the mean.
|
def mean(self) -> Array:
"""Calculates the mean."""
return self._loc + self._scale * jnp.euler_gamma
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,753 |
distrax._src.distributions.gumbel
|
median
|
Calculates the median.
|
def median(self) -> Array:
"""Calculates the median."""
return self._loc - self._scale * math.log(math.log(2.))
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,754 |
distrax._src.distributions.gumbel
|
mode
|
Calculates the mode.
|
def mode(self) -> Array:
"""Calculates the mode."""
return self.loc
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,758 |
distrax._src.distributions.gumbel
|
stddev
|
Calculates the standard deviation.
|
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return self._scale * jnp.ones_like(self._loc) * jnp.pi / math.sqrt(6.)
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,761 |
distrax._src.distributions.gumbel
|
variance
|
Calculates the variance.
|
def variance(self) -> Array:
"""Calculates the variance."""
return jnp.square(self._scale * jnp.ones_like(self._loc) * jnp.pi) / 6.
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,762 |
distrax._src.bijectors.gumbel_cdf
|
GumbelCDF
|
A bijector that computes the Gumbel cumulative density function (CDF).
The Gumbel CDF is given by `y = f(x) = exp(-exp(-x))` for a scalar input `x`.
Its inverse is `x = -log(-log(y))`. The log-det Jacobian of the transformation
is `log df/dx = -exp(-x) - x`.
|
class GumbelCDF(base.Bijector):
"""A bijector that computes the Gumbel cumulative density function (CDF).
The Gumbel CDF is given by `y = f(x) = exp(-exp(-x))` for a scalar input `x`.
Its inverse is `x = -log(-log(y))`. The log-det Jacobian of the transformation
is `log df/dx = -exp(-x) - x`.
"""
def __init__(self):
"""Initializes a GumbelCDF bijector."""
super().__init__(event_ndims_in=0)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
exp_neg_x = jnp.exp(-x)
y = jnp.exp(-exp_neg_x)
log_det = - x - exp_neg_x
return y, log_det
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
log_y = jnp.log(y)
x = -jnp.log(-log_y)
return x, x - log_y
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
return type(other) is GumbelCDF # pylint: disable=unidiomatic-typecheck
|
()
|
55,763 |
distrax._src.bijectors.gumbel_cdf
|
__init__
|
Initializes a GumbelCDF bijector.
|
def __init__(self):
"""Initializes a GumbelCDF bijector."""
super().__init__(event_ndims_in=0)
|
(self)
|
55,768 |
distrax._src.bijectors.gumbel_cdf
|
forward_and_log_det
|
Computes y = f(x) and log|det J(f)(x)|.
|
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
exp_neg_x = jnp.exp(-x)
y = jnp.exp(-exp_neg_x)
log_det = - x - exp_neg_x
return y, log_det
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,771 |
distrax._src.bijectors.gumbel_cdf
|
inverse_and_log_det
|
Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.
|
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
log_y = jnp.log(y)
x = -jnp.log(-log_y)
return x, x - log_y
|
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,773 |
distrax._src.bijectors.gumbel_cdf
|
same_as
|
Returns True if this bijector is guaranteed to be the same as `other`.
|
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
return type(other) is GumbelCDF # pylint: disable=unidiomatic-typecheck
|
(self, other: distrax._src.bijectors.bijector.Bijector) -> bool
|
55,775 |
distrax._src.utils.hmm
|
HMM
|
Hidden Markov Model class.
|
class HMM(jittable.Jittable):
"""Hidden Markov Model class."""
def __init__(self,
init_dist: categorical.CategoricalLike,
trans_dist: categorical.CategoricalLike,
obs_dist: distribution.DistributionLike):
"""Constructs an N-state Hidden Markov Model from component distributions.
Args:
init_dist: Integer-valued categorical distribution with parameters of
shape (N,), representing the distribution over initial latent states.
trans_dist: Integer-valued categorical distribution with parameters of
shape (N, N), representing the transition probability matrix between
latent states.
obs_dist: Any observation distribution with batch shape (N,), representing
`p(observation|latent state)`.
"""
self._init_dist = conversion.as_distribution(init_dist)
self._trans_dist = conversion.as_distribution(trans_dist)
self._obs_dist = conversion.as_distribution(obs_dist)
self._n_states = self._init_dist.num_categories
if not jnp.issubdtype(self._init_dist.dtype, jnp.integer):
raise TypeError(
f'init_dist must be categorical-like with integer dtype, but its '
f'dtype is {self._init_dist.dtype}.')
if not jnp.issubdtype(self._trans_dist.dtype, jnp.integer):
raise TypeError(
f'trans_dist must be categorical-like with integer dtype, but its '
f'dtype is {self._trans_dist.dtype}.')
if self._init_dist.batch_shape:
raise ValueError(
f'init_dist must be unbatched, but it has a batch shape of '
f'{self._init_dist.batch_shape}.')
if self._obs_dist.batch_shape != (self._n_states,):
raise ValueError(
f'obs_dist should have batch shape of ({self._n_states},) equal to '
f'the number of latent states in the model, but its batch shape is '
f'{self._obs_dist.batch_shape}.')
if self._trans_dist.batch_shape != (self._n_states,):
raise ValueError(
f'trans_dist should have batch shape of ({self._n_states},) equal to '
f'the number of latent states in the model, but its batch shape is '
f'{self._trans_dist.batch_shape}.')
if self._trans_dist.num_categories != self._n_states:
raise ValueError(
f'trans_dist should have `num_categories` of {self._n_states} equal '
f'to the number of latent states in the model, but it has '
f'`num_categories` of {self._trans_dist.num_categories}.')
@property
def init_dist(self) -> categorical.CategoricalLike:
return self._init_dist
@property
def trans_dist(self) -> categorical.CategoricalLike:
return self._trans_dist
@property
def obs_dist(self) -> distribution.DistributionLike:
return self._obs_dist
def sample(self,
*,
seed: chex.PRNGKey,
seq_len: int) -> Tuple[chex.Array, chex.Array]:
"""Sample from this HMM.
Samples an observation of given length according to this
Hidden Markov Model and gives the sequence of the hidden states
as well as the observation.
Args:
seed: Random key of shape (2,) and dtype uint32.
seq_len: The length of the observation sequence.
Returns:
Tuple of hidden state sequence, and observation sequence.
"""
rng_key, rng_init = jax.random.split(seed)
initial_state = self._init_dist.sample(seed=rng_init)
def draw_state(prev_state, key):
state = self._trans_dist.sample(seed=key)[prev_state]
return state, state
rng_state, rng_obs = jax.random.split(rng_key)
keys = jax.random.split(rng_state, seq_len - 1)
_, states = jax.lax.scan(draw_state, initial_state, keys)
states = jnp.append(initial_state, states)
def draw_obs(state, key):
return self._obs_dist.sample(seed=key)[state]
keys = jax.random.split(rng_obs, seq_len)
obs_seq = jax.vmap(draw_obs, in_axes=(0, 0))(states, keys)
return states, obs_seq
def forward(self,
obs_seq: chex.Array,
length: Optional[chex.Array] = None) -> Tuple[float, chex.Array]:
"""Calculates a belief state.
Args:
obs_seq: Observation sequence.
length: The valid length of the observation sequence, used to truncate the
computation for batches of varying length. If set to None, the entire
sequence is used.
Returns:
Tuple of `log(p(x_{1:T}|model))` and the array of forward joint
probabilities `p(z_t,x_{1:t})` for each sample `x_t`.
"""
seq_len = len(obs_seq)
if length is None:
length = seq_len
def scan_fn(carry, t):
(alpha_prev, log_ll_prev) = carry
alpha_n = jnp.where(
t < length,
(self._obs_dist.prob(obs_seq[t])
* (alpha_prev[:, None] * self._trans_dist.probs).sum(axis=0)),
jnp.zeros_like(alpha_prev))
alpha_n, cn = _normalize(alpha_n)
carry = (alpha_n, jnp.log(cn) + log_ll_prev)
return carry, alpha_n
# initial belief state
alpha_0, c0 = _normalize(
self._init_dist.probs * self._obs_dist.prob(obs_seq[0]))
# setup scan loop
init_state = (alpha_0, jnp.log(c0))
ts = jnp.arange(1, seq_len)
carry, alpha_hist = jax.lax.scan(scan_fn, init_state, ts)
# post-process
alpha_hist = jnp.vstack([alpha_0.reshape(1, self._n_states), alpha_hist])
(_, log_ll) = carry
return log_ll, alpha_hist
def backward(self,
obs_seq: chex.Array,
length: Optional[chex.Array] = None) -> chex.Array:
"""Computes the backward probabilities.
Args:
obs_seq: Observation sequence.
length: The valid length of the observation sequence, used to truncate the
computation for batches of varying length. If set to None, the entire
sequence is used.
Returns:
Array of backward joint probabilities `p(x_{t+1:T}|z_t)`.
"""
seq_len = len(obs_seq)
if length is None:
length = seq_len
beta_t = jnp.ones((self._n_states,))
def scan_fn(beta_prev, t):
beta_t = jnp.where(
t > length,
jnp.zeros_like(beta_prev),
_normalize((beta_prev * self._obs_dist.prob(obs_seq[t-1])
* self._trans_dist.probs).sum(axis=1))[0])
return beta_t, beta_t
ts = jnp.arange(seq_len, 1, -1)
_, beta_hist = jax.lax.scan(scan_fn, beta_t, ts)
beta_hist = jnp.flip(
jnp.vstack([beta_t.reshape(1, self._n_states), beta_hist]), axis=0)
return beta_hist
def forward_backward(
self,
obs_seq: chex.Array,
length: Optional[chex.Array] = None,
) -> Tuple[chex.Array, chex.Array, chex.Array, float]:
"""HMM forward-backward algorithm.
Computes, for each time step, the marginal conditional probability that the
Hidden Markov Model was in each possible state given the observations that
were made at each time step, i.e. P(z[i] | x[0], ..., x[num_steps - 1])
for all i from 0 to num_steps - 1.
Args:
obs_seq: Observation sequence.
length: The valid length of the observation sequence, used to truncate the
computation for batches of varying length. If set to None, the entire
sequence is used.
Returns:
Tuple of:
* Forward joint probabilities `p(z_t,x_{1:t})`.
* Backward joint probabilities `p(x_{t+1:T}|z_t)`.
* Marginal conditional probability of the observations.
* The log-likelihood log(p(x_{1:T}|model)).
"""
seq_len = len(obs_seq)
if length is None:
length = seq_len
def gamma_t(t):
return alpha[t] * beta[t]
ll, alpha = self.forward(obs_seq, length)
beta = self.backward(obs_seq, length)
ts = jnp.arange(seq_len)
gamma = jax.vmap(gamma_t)(ts)
gamma = jax.vmap(lambda x: _normalize(x)[0])(gamma)
return alpha, beta, gamma, ll
def viterbi(self, obs_seq: chex.Array) -> chex.Array:
"""Viterbi algorithm.
Computes the most probable sequence of hidden states given the observations.
Args:
obs_seq: Observation sequence.
Returns:
The most probable sequence of hidden states.
"""
trans_log_probs = jax.nn.log_softmax(self._trans_dist.logits)
init_log_probs = jax.nn.log_softmax(self._init_dist.logits)
first_log_prob = init_log_probs + self._obs_dist.log_prob(obs_seq[0])
if len(obs_seq) == 1:
return jnp.expand_dims(jnp.argmax(first_log_prob), axis=0)
def viterbi_forward(prev_logp, obs):
obs_logp = self._obs_dist.log_prob(obs)
logp = prev_logp[..., None] + trans_log_probs + obs_logp[..., None, :]
max_logp_given_successor = jnp.max(logp, axis=-2)
most_likely_given_successor = jnp.argmax(logp, axis=-2)
return max_logp_given_successor, most_likely_given_successor
final_log_prob, most_likely_sources = jax.lax.scan(
viterbi_forward, first_log_prob, obs_seq[1:])
most_likely_initial_given_successor = jnp.argmax(
trans_log_probs + first_log_prob, axis=-2)
most_likely_sources = jnp.concatenate([
jnp.expand_dims(most_likely_initial_given_successor, axis=0),
most_likely_sources], axis=0)
def viterbi_backward(state, most_likely_sources):
state = jax.nn.one_hot(state, self._n_states)
most_likely = jnp.sum(most_likely_sources * state).astype(jnp.int64)
return most_likely, most_likely
final_state = jnp.argmax(final_log_prob)
_, most_likely_path = jax.lax.scan(
viterbi_backward, final_state, most_likely_sources[1:], reverse=True)
return jnp.append(most_likely_path, final_state)
|
(init_dist: Union[distrax._src.distributions.categorical.Categorical, tensorflow_probability.substrates.jax.distributions.categorical.Categorical], trans_dist: Union[distrax._src.distributions.categorical.Categorical, tensorflow_probability.substrates.jax.distributions.categorical.Categorical], obs_dist: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution])
|
55,776 |
distrax._src.utils.hmm
|
__init__
|
Constructs an N-state Hidden Markov Model from component distributions.
Args:
init_dist: Integer-valued categorical distribution with parameters of
shape (N,), representing the distribution over initial latent states.
trans_dist: Integer-valued categorical distribution with parameters of
shape (N, N), representing the transition probability matrix between
latent states.
obs_dist: Any observation distribution with batch shape (N,), representing
`p(observation|latent state)`.
|
def __init__(self,
init_dist: categorical.CategoricalLike,
trans_dist: categorical.CategoricalLike,
obs_dist: distribution.DistributionLike):
"""Constructs an N-state Hidden Markov Model from component distributions.
Args:
init_dist: Integer-valued categorical distribution with parameters of
shape (N,), representing the distribution over initial latent states.
trans_dist: Integer-valued categorical distribution with parameters of
shape (N, N), representing the transition probability matrix between
latent states.
obs_dist: Any observation distribution with batch shape (N,), representing
`p(observation|latent state)`.
"""
self._init_dist = conversion.as_distribution(init_dist)
self._trans_dist = conversion.as_distribution(trans_dist)
self._obs_dist = conversion.as_distribution(obs_dist)
self._n_states = self._init_dist.num_categories
if not jnp.issubdtype(self._init_dist.dtype, jnp.integer):
raise TypeError(
f'init_dist must be categorical-like with integer dtype, but its '
f'dtype is {self._init_dist.dtype}.')
if not jnp.issubdtype(self._trans_dist.dtype, jnp.integer):
raise TypeError(
f'trans_dist must be categorical-like with integer dtype, but its '
f'dtype is {self._trans_dist.dtype}.')
if self._init_dist.batch_shape:
raise ValueError(
f'init_dist must be unbatched, but it has a batch shape of '
f'{self._init_dist.batch_shape}.')
if self._obs_dist.batch_shape != (self._n_states,):
raise ValueError(
f'obs_dist should have batch shape of ({self._n_states},) equal to '
f'the number of latent states in the model, but its batch shape is '
f'{self._obs_dist.batch_shape}.')
if self._trans_dist.batch_shape != (self._n_states,):
raise ValueError(
f'trans_dist should have batch shape of ({self._n_states},) equal to '
f'the number of latent states in the model, but its batch shape is '
f'{self._trans_dist.batch_shape}.')
if self._trans_dist.num_categories != self._n_states:
raise ValueError(
f'trans_dist should have `num_categories` of {self._n_states} equal '
f'to the number of latent states in the model, but it has '
f'`num_categories` of {self._trans_dist.num_categories}.')
|
(self, init_dist: Union[distrax._src.distributions.categorical.Categorical, tensorflow_probability.substrates.jax.distributions.categorical.Categorical], trans_dist: Union[distrax._src.distributions.categorical.Categorical, tensorflow_probability.substrates.jax.distributions.categorical.Categorical], obs_dist: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution])
|
55,778 |
distrax._src.utils.hmm
|
backward
|
Computes the backward probabilities.
Args:
obs_seq: Observation sequence.
length: The valid length of the observation sequence, used to truncate the
computation for batches of varying length. If set to None, the entire
sequence is used.
Returns:
Array of backward joint probabilities `p(x_{t+1:T}|z_t)`.
|
def backward(self,
obs_seq: chex.Array,
length: Optional[chex.Array] = None) -> chex.Array:
"""Computes the backward probabilities.
Args:
obs_seq: Observation sequence.
length: The valid length of the observation sequence, used to truncate the
computation for batches of varying length. If set to None, the entire
sequence is used.
Returns:
Array of backward joint probabilities `p(x_{t+1:T}|z_t)`.
"""
seq_len = len(obs_seq)
if length is None:
length = seq_len
beta_t = jnp.ones((self._n_states,))
def scan_fn(beta_prev, t):
beta_t = jnp.where(
t > length,
jnp.zeros_like(beta_prev),
_normalize((beta_prev * self._obs_dist.prob(obs_seq[t-1])
* self._trans_dist.probs).sum(axis=1))[0])
return beta_t, beta_t
ts = jnp.arange(seq_len, 1, -1)
_, beta_hist = jax.lax.scan(scan_fn, beta_t, ts)
beta_hist = jnp.flip(
jnp.vstack([beta_t.reshape(1, self._n_states), beta_hist]), axis=0)
return beta_hist
|
(self, obs_seq: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], length: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,779 |
distrax._src.utils.hmm
|
forward
|
Calculates a belief state.
Args:
obs_seq: Observation sequence.
length: The valid length of the observation sequence, used to truncate the
computation for batches of varying length. If set to None, the entire
sequence is used.
Returns:
Tuple of `log(p(x_{1:T}|model))` and the array of forward joint
probabilities `p(z_t,x_{1:t})` for each sample `x_t`.
|
def forward(self,
obs_seq: chex.Array,
length: Optional[chex.Array] = None) -> Tuple[float, chex.Array]:
"""Calculates a belief state.
Args:
obs_seq: Observation sequence.
length: The valid length of the observation sequence, used to truncate the
computation for batches of varying length. If set to None, the entire
sequence is used.
Returns:
Tuple of `log(p(x_{1:T}|model))` and the array of forward joint
probabilities `p(z_t,x_{1:t})` for each sample `x_t`.
"""
seq_len = len(obs_seq)
if length is None:
length = seq_len
def scan_fn(carry, t):
(alpha_prev, log_ll_prev) = carry
alpha_n = jnp.where(
t < length,
(self._obs_dist.prob(obs_seq[t])
* (alpha_prev[:, None] * self._trans_dist.probs).sum(axis=0)),
jnp.zeros_like(alpha_prev))
alpha_n, cn = _normalize(alpha_n)
carry = (alpha_n, jnp.log(cn) + log_ll_prev)
return carry, alpha_n
# initial belief state
alpha_0, c0 = _normalize(
self._init_dist.probs * self._obs_dist.prob(obs_seq[0]))
# setup scan loop
init_state = (alpha_0, jnp.log(c0))
ts = jnp.arange(1, seq_len)
carry, alpha_hist = jax.lax.scan(scan_fn, init_state, ts)
# post-process
alpha_hist = jnp.vstack([alpha_0.reshape(1, self._n_states), alpha_hist])
(_, log_ll) = carry
return log_ll, alpha_hist
|
(self, obs_seq: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], length: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None) -> Tuple[float, Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,780 |
distrax._src.utils.hmm
|
forward_backward
|
HMM forward-backward algorithm.
Computes, for each time step, the marginal conditional probability that the
Hidden Markov Model was in each possible state given the observations that
were made at each time step, i.e. P(z[i] | x[0], ..., x[num_steps - 1])
for all i from 0 to num_steps - 1.
Args:
obs_seq: Observation sequence.
length: The valid length of the observation sequence, used to truncate the
computation for batches of varying length. If set to None, the entire
sequence is used.
Returns:
Tuple of:
* Forward joint probabilities `p(z_t,x_{1:t})`.
* Backward joint probabilities `p(x_{t+1:T}|z_t)`.
* Marginal conditional probability of the observations.
* The log-likelihood log(p(x_{1:T}|model)).
|
def forward_backward(
self,
obs_seq: chex.Array,
length: Optional[chex.Array] = None,
) -> Tuple[chex.Array, chex.Array, chex.Array, float]:
"""HMM forward-backward algorithm.
Computes, for each time step, the marginal conditional probability that the
Hidden Markov Model was in each possible state given the observations that
were made at each time step, i.e. P(z[i] | x[0], ..., x[num_steps - 1])
for all i from 0 to num_steps - 1.
Args:
obs_seq: Observation sequence.
length: The valid length of the observation sequence, used to truncate the
computation for batches of varying length. If set to None, the entire
sequence is used.
Returns:
Tuple of:
* Forward joint probabilities `p(z_t,x_{1:t})`.
* Backward joint probabilities `p(x_{t+1:T}|z_t)`.
* Marginal conditional probability of the observations.
* The log-likelihood log(p(x_{1:T}|model)).
"""
seq_len = len(obs_seq)
if length is None:
length = seq_len
def gamma_t(t):
return alpha[t] * beta[t]
ll, alpha = self.forward(obs_seq, length)
beta = self.backward(obs_seq, length)
ts = jnp.arange(seq_len)
gamma = jax.vmap(gamma_t)(ts)
gamma = jax.vmap(lambda x: _normalize(x)[0])(gamma)
return alpha, beta, gamma, ll
|
(self, obs_seq: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], length: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, NoneType] = None) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], float]
|
55,781 |
distrax._src.utils.hmm
|
sample
|
Sample from this HMM.
Samples an observation of given length according to this
Hidden Markov Model and gives the sequence of the hidden states
as well as the observation.
Args:
seed: Random key of shape (2,) and dtype uint32.
seq_len: The length of the observation sequence.
Returns:
Tuple of hidden state sequence, and observation sequence.
|
def sample(self,
*,
seed: chex.PRNGKey,
seq_len: int) -> Tuple[chex.Array, chex.Array]:
"""Sample from this HMM.
Samples an observation of given length according to this
Hidden Markov Model and gives the sequence of the hidden states
as well as the observation.
Args:
seed: Random key of shape (2,) and dtype uint32.
seq_len: The length of the observation sequence.
Returns:
Tuple of hidden state sequence, and observation sequence.
"""
rng_key, rng_init = jax.random.split(seed)
initial_state = self._init_dist.sample(seed=rng_init)
def draw_state(prev_state, key):
state = self._trans_dist.sample(seed=key)[prev_state]
return state, state
rng_state, rng_obs = jax.random.split(rng_key)
keys = jax.random.split(rng_state, seq_len - 1)
_, states = jax.lax.scan(draw_state, initial_state, keys)
states = jnp.append(initial_state, states)
def draw_obs(state, key):
return self._obs_dist.sample(seed=key)[state]
keys = jax.random.split(rng_obs, seq_len)
obs_seq = jax.vmap(draw_obs, in_axes=(0, 0))(states, keys)
return states, obs_seq
|
(self, *, seed: jax.Array, seq_len: int) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,783 |
distrax._src.utils.hmm
|
viterbi
|
Viterbi algorithm.
Computes the most probable sequence of hidden states given the observations.
Args:
obs_seq: Observation sequence.
Returns:
The most probable sequence of hidden states.
|
def viterbi(self, obs_seq: chex.Array) -> chex.Array:
"""Viterbi algorithm.
Computes the most probable sequence of hidden states given the observations.
Args:
obs_seq: Observation sequence.
Returns:
The most probable sequence of hidden states.
"""
trans_log_probs = jax.nn.log_softmax(self._trans_dist.logits)
init_log_probs = jax.nn.log_softmax(self._init_dist.logits)
first_log_prob = init_log_probs + self._obs_dist.log_prob(obs_seq[0])
if len(obs_seq) == 1:
return jnp.expand_dims(jnp.argmax(first_log_prob), axis=0)
def viterbi_forward(prev_logp, obs):
obs_logp = self._obs_dist.log_prob(obs)
logp = prev_logp[..., None] + trans_log_probs + obs_logp[..., None, :]
max_logp_given_successor = jnp.max(logp, axis=-2)
most_likely_given_successor = jnp.argmax(logp, axis=-2)
return max_logp_given_successor, most_likely_given_successor
final_log_prob, most_likely_sources = jax.lax.scan(
viterbi_forward, first_log_prob, obs_seq[1:])
most_likely_initial_given_successor = jnp.argmax(
trans_log_probs + first_log_prob, axis=-2)
most_likely_sources = jnp.concatenate([
jnp.expand_dims(most_likely_initial_given_successor, axis=0),
most_likely_sources], axis=0)
def viterbi_backward(state, most_likely_sources):
state = jax.nn.one_hot(state, self._n_states)
most_likely = jnp.sum(most_likely_sources * state).astype(jnp.int64)
return most_likely, most_likely
final_state = jnp.argmax(final_log_prob)
_, most_likely_path = jax.lax.scan(
viterbi_backward, final_state, most_likely_sources[1:], reverse=True)
return jnp.append(most_likely_path, final_state)
|
(self, obs_seq: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,784 |
distrax._src.distributions.independent
|
Independent
|
Independent distribution obtained from child distributions.
|
class Independent(distrax_distribution.Distribution):
"""Independent distribution obtained from child distributions."""
equiv_tfp_cls = tfd.Independent
def __init__(self,
distribution: DistributionLike,
reinterpreted_batch_ndims: Optional[int] = None):
"""Initializes an Independent distribution.
Args:
distribution: Base distribution instance.
reinterpreted_batch_ndims: Number of event dimensions.
"""
super().__init__()
distribution = conversion.as_distribution(distribution)
self._distribution = distribution
# Check if event shape is a tuple of integers (i.e. not nested).
event_shape = distribution.event_shape
if not (isinstance(event_shape, tuple) and
all(isinstance(i, int) for i in event_shape)):
raise ValueError(
f"'Independent' currently only supports distributions with Array "
f"events (i.e. not nested). Received '{distribution.name}' with "
f"event shape '{distribution.event_shape}'.")
dist_batch_shape = distribution.batch_shape
if reinterpreted_batch_ndims is not None:
dist_batch_ndims = len(dist_batch_shape)
if reinterpreted_batch_ndims > dist_batch_ndims:
raise ValueError(
f'`reinterpreted_batch_ndims` is {reinterpreted_batch_ndims}, but'
f' distribution `{distribution.name}` has only {dist_batch_ndims}'
f' batch dimensions.')
elif reinterpreted_batch_ndims < 0:
raise ValueError(f'`reinterpreted_batch_ndims` can\'t be negative; got'
f' {reinterpreted_batch_ndims}.')
self._reinterpreted_batch_ndims = reinterpreted_batch_ndims
else:
self._reinterpreted_batch_ndims = max(len(dist_batch_shape) - 1, 0)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
dist_batch_shape = self._distribution.batch_shape
event_ndims = len(dist_batch_shape) - self._reinterpreted_batch_ndims
return dist_batch_shape[event_ndims:] + self._distribution.event_shape
@property
def distribution(self):
return self._distribution
@property
def reinterpreted_batch_ndims(self) -> int:
return self._reinterpreted_batch_ndims
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
dist_batch_shape = self._distribution.batch_shape
d = len(dist_batch_shape) - self.reinterpreted_batch_ndims
return dist_batch_shape[:d]
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
return self._distribution.sample(seed=key, sample_shape=n)
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
samples, log_prob = self._distribution.sample_and_log_prob(
seed=key, sample_shape=n)
log_prob = self._reduce(jnp.sum, log_prob)
return samples, log_prob
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return self._reduce(jnp.sum, self._distribution.log_prob(value))
def entropy(self) -> Array:
"""See `Distribution.entropy`."""
return self._reduce(jnp.sum, self._distribution.entropy())
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return self._reduce(jnp.sum, self._distribution.log_cdf(value))
def mean(self) -> Array:
"""Calculates the mean."""
return self._distribution.mean()
def median(self) -> Array:
"""Calculates the median."""
return self._distribution.median()
def variance(self) -> Array:
"""Calculates the variance."""
return self._distribution.variance()
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return self._distribution.stddev()
def mode(self) -> Array:
"""Calculates the mode."""
return self._distribution.mode()
def _reduce(self, fn: Callable[..., Array], value: Array) -> Array:
return fn(value,
axis=[-i - 1 for i in range(0, self.reinterpreted_batch_ndims)])
def __getitem__(self, index) -> 'Independent':
"""See `Distribution.__getitem__`."""
index = distrax_distribution.to_batch_shape_index(self.batch_shape, index)
return Independent(
distribution=self.distribution[index],
reinterpreted_batch_ndims=self.reinterpreted_batch_ndims)
|
(distribution: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution], reinterpreted_batch_ndims: Optional[int] = None)
|
55,785 |
distrax._src.distributions.independent
|
__getitem__
|
See `Distribution.__getitem__`.
|
def __getitem__(self, index) -> 'Independent':
"""See `Distribution.__getitem__`."""
index = distrax_distribution.to_batch_shape_index(self.batch_shape, index)
return Independent(
distribution=self.distribution[index],
reinterpreted_batch_ndims=self.reinterpreted_batch_ndims)
|
(self, index) -> distrax._src.distributions.independent.Independent
|
55,786 |
distrax._src.distributions.independent
|
__init__
|
Initializes an Independent distribution.
Args:
distribution: Base distribution instance.
reinterpreted_batch_ndims: Number of event dimensions.
|
def __init__(self,
distribution: DistributionLike,
reinterpreted_batch_ndims: Optional[int] = None):
"""Initializes an Independent distribution.
Args:
distribution: Base distribution instance.
reinterpreted_batch_ndims: Number of event dimensions.
"""
super().__init__()
distribution = conversion.as_distribution(distribution)
self._distribution = distribution
# Check if event shape is a tuple of integers (i.e. not nested).
event_shape = distribution.event_shape
if not (isinstance(event_shape, tuple) and
all(isinstance(i, int) for i in event_shape)):
raise ValueError(
f"'Independent' currently only supports distributions with Array "
f"events (i.e. not nested). Received '{distribution.name}' with "
f"event shape '{distribution.event_shape}'.")
dist_batch_shape = distribution.batch_shape
if reinterpreted_batch_ndims is not None:
dist_batch_ndims = len(dist_batch_shape)
if reinterpreted_batch_ndims > dist_batch_ndims:
raise ValueError(
f'`reinterpreted_batch_ndims` is {reinterpreted_batch_ndims}, but'
f' distribution `{distribution.name}` has only {dist_batch_ndims}'
f' batch dimensions.')
elif reinterpreted_batch_ndims < 0:
raise ValueError(f'`reinterpreted_batch_ndims` can\'t be negative; got'
f' {reinterpreted_batch_ndims}.')
self._reinterpreted_batch_ndims = reinterpreted_batch_ndims
else:
self._reinterpreted_batch_ndims = max(len(dist_batch_shape) - 1, 0)
|
(self, distribution: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution], reinterpreted_batch_ndims: Optional[int] = None)
|
55,789 |
distrax._src.distributions.independent
|
_reduce
| null |
def _reduce(self, fn: Callable[..., Array], value: Array) -> Array:
return fn(value,
axis=[-i - 1 for i in range(0, self.reinterpreted_batch_ndims)])
|
(self, fn: Callable[..., Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], value: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,790 |
distrax._src.distributions.independent
|
_sample_n
|
See `Distribution._sample_n`.
|
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
return self._distribution.sample(seed=key, sample_shape=n)
|
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,791 |
distrax._src.distributions.independent
|
_sample_n_and_log_prob
|
See `Distribution._sample_n_and_log_prob`.
|
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
samples, log_prob = self._distribution.sample_and_log_prob(
seed=key, sample_shape=n)
log_prob = self._reduce(jnp.sum, log_prob)
return samples, log_prob
|
(self, key: jax.Array, n: int) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,794 |
distrax._src.distributions.independent
|
entropy
|
See `Distribution.entropy`.
|
def entropy(self) -> Array:
"""See `Distribution.entropy`."""
return self._reduce(jnp.sum, self._distribution.entropy())
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,796 |
distrax._src.distributions.independent
|
log_cdf
|
See `Distribution.log_cdf`.
|
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return self._reduce(jnp.sum, self._distribution.log_cdf(value))
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,797 |
distrax._src.distributions.independent
|
log_prob
|
See `Distribution.log_prob`.
|
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return self._reduce(jnp.sum, self._distribution.log_prob(value))
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,799 |
distrax._src.distributions.independent
|
mean
|
Calculates the mean.
|
def mean(self) -> Array:
"""Calculates the mean."""
return self._distribution.mean()
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,800 |
distrax._src.distributions.independent
|
median
|
Calculates the median.
|
def median(self) -> Array:
"""Calculates the median."""
return self._distribution.median()
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,801 |
distrax._src.distributions.independent
|
mode
|
Calculates the mode.
|
def mode(self) -> Array:
"""Calculates the mode."""
return self._distribution.mode()
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,805 |
distrax._src.distributions.independent
|
stddev
|
Calculates the standard deviation.
|
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return self._distribution.stddev()
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,808 |
distrax._src.distributions.independent
|
variance
|
Calculates the variance.
|
def variance(self) -> Array:
"""Calculates the variance."""
return self._distribution.variance()
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,809 |
distrax._src.bijectors.inverse
|
Inverse
|
A bijector that inverts a given bijector.
That is, if `bijector` implements the transformation `f`, `Inverse(bijector)`
implements the inverse transformation `f^{-1}`.
The inversion is performed by swapping the forward with the corresponding
inverse methods of the given bijector.
|
class Inverse(base.Bijector):
"""A bijector that inverts a given bijector.
That is, if `bijector` implements the transformation `f`, `Inverse(bijector)`
implements the inverse transformation `f^{-1}`.
The inversion is performed by swapping the forward with the corresponding
inverse methods of the given bijector.
"""
def __init__(self, bijector: BijectorLike):
"""Initializes an Inverse bijector.
Args:
bijector: the bijector to be inverted. It can be a distrax bijector, a TFP
bijector, or a callable to be wrapped by `Lambda`.
"""
self._bijector = conversion.as_bijector(bijector)
super().__init__(
event_ndims_in=self._bijector.event_ndims_out,
event_ndims_out=self._bijector.event_ndims_in,
is_constant_jacobian=self._bijector.is_constant_jacobian,
is_constant_log_det=self._bijector.is_constant_log_det)
@property
def bijector(self) -> BijectorT:
"""The base bijector that was the input to `Inverse`."""
return self._bijector
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
return self._bijector.inverse(x)
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
return self._bijector.forward(y)
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
return self._bijector.inverse_log_det_jacobian(x)
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
return self._bijector.forward_log_det_jacobian(y)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return self._bijector.inverse_and_log_det(x)
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
return self._bijector.forward_and_log_det(y)
@property
def name(self) -> str:
"""Name of the bijector."""
return self.__class__.__name__ + self._bijector.name
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is Inverse: # pylint: disable=unidiomatic-typecheck
return self.bijector.same_as(other.bijector)
return False
|
(bijector: Union[distrax._src.bijectors.bijector.Bijector, tensorflow_probability.substrates.jax.bijectors.bijector.Bijector, Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]])
|
55,810 |
distrax._src.bijectors.inverse
|
__init__
|
Initializes an Inverse bijector.
Args:
bijector: the bijector to be inverted. It can be a distrax bijector, a TFP
bijector, or a callable to be wrapped by `Lambda`.
|
def __init__(self, bijector: BijectorLike):
"""Initializes an Inverse bijector.
Args:
bijector: the bijector to be inverted. It can be a distrax bijector, a TFP
bijector, or a callable to be wrapped by `Lambda`.
"""
self._bijector = conversion.as_bijector(bijector)
super().__init__(
event_ndims_in=self._bijector.event_ndims_out,
event_ndims_out=self._bijector.event_ndims_in,
is_constant_jacobian=self._bijector.is_constant_jacobian,
is_constant_log_det=self._bijector.is_constant_log_det)
|
(self, bijector: Union[distrax._src.bijectors.bijector.Bijector, tensorflow_probability.substrates.jax.bijectors.bijector.Bijector, Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]])
|
55,814 |
distrax._src.bijectors.inverse
|
forward
|
Computes y = f(x).
|
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
return self._bijector.inverse(x)
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,815 |
distrax._src.bijectors.inverse
|
forward_and_log_det
|
Computes y = f(x) and log|det J(f)(x)|.
|
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return self._bijector.inverse_and_log_det(x)
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,816 |
distrax._src.bijectors.inverse
|
forward_log_det_jacobian
|
Computes log|det J(f)(x)|.
|
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
return self._bijector.inverse_log_det_jacobian(x)
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,817 |
distrax._src.bijectors.inverse
|
inverse
|
Computes x = f^{-1}(y).
|
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
return self._bijector.forward(y)
|
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,818 |
distrax._src.bijectors.inverse
|
inverse_and_log_det
|
Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.
|
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
return self._bijector.forward_and_log_det(y)
|
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,819 |
distrax._src.bijectors.inverse
|
inverse_log_det_jacobian
|
Computes log|det J(f^{-1})(y)|.
|
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
return self._bijector.forward_log_det_jacobian(y)
|
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,820 |
distrax._src.bijectors.inverse
|
same_as
|
Returns True if this bijector is guaranteed to be the same as `other`.
|
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is Inverse: # pylint: disable=unidiomatic-typecheck
return self.bijector.same_as(other.bijector)
return False
|
(self, other: distrax._src.bijectors.bijector.Bijector) -> bool
|
55,822 |
distrax._src.distributions.joint
|
Joint
|
Joint distribution over a tree of statistically independent distributions.
Samples from the Joint distribution take the form of a tree structure that
matches the structure of the underlying distributions. Log-probabilities
are summed over the tree.
All distributions in the tree must have the same `batch_shape` in order for
log-probabilities to be computed correctly and for the `batch_shape` of the
Joint distribution to be correct.
|
class Joint(distribution.Distribution):
"""Joint distribution over a tree of statistically independent distributions.
Samples from the Joint distribution take the form of a tree structure that
matches the structure of the underlying distributions. Log-probabilities
are summed over the tree.
All distributions in the tree must have the same `batch_shape` in order for
log-probabilities to be computed correctly and for the `batch_shape` of the
Joint distribution to be correct.
"""
def __init__(self, distributions: DistributionT):
"""Initializes a Joint distribution over a tree of distributions.
Args:
distributions: Tree of distributions that must have the same batch shape.
"""
super().__init__()
self._distributions = tree.map_structure(conversion.as_distribution,
distributions)
batch_shape = None
first_path = None
for path, dist in tree.flatten_with_path(self._distributions):
batch_shape = batch_shape or dist.batch_shape
first_path = '.'.join(map(str, path))
if dist.batch_shape != batch_shape:
path = '.'.join(map(str, path))
raise ValueError(
f'Joint distributions must have the same batch shape, but '
f'distribution "{dist.name}" at location {path} had batch shape '
f'{dist.batch_shape} which is not equal to the batch shape '
f'{batch_shape} of the distribution at location {first_path}.')
def _sample_n(
self,
key: chex.PRNGKey,
n: int) -> distribution.EventT:
keys = list(jax.random.split(key, len(tree.flatten(self._distributions))))
keys = tree.unflatten_as(self._distributions, keys)
return tree.map_structure(lambda d, k: d.sample(seed=k, sample_shape=n),
self._distributions, keys)
def _sample_n_and_log_prob(
self,
key: chex.PRNGKey,
n: int) -> Tuple[distribution.EventT, chex.Array]:
keys = list(jax.random.split(key, len(tree.flatten(self._distributions))))
keys = tree.unflatten_as(self._distributions, keys)
samples_and_log_probs = tree.map_structure(
lambda d, k: d.sample_and_log_prob(seed=k, sample_shape=n),
self._distributions, keys)
samples = tree.map_structure_up_to(
self._distributions, lambda p: p[0], samples_and_log_probs)
log_probs = tree.map_structure_up_to(
self._distributions, lambda p: p[1], samples_and_log_probs)
log_probs = jnp.stack(tree.flatten(log_probs))
log_probs = jnp.sum(log_probs, axis=0)
return samples, log_probs
def log_prob(self, value: distribution.EventT) -> chex.Array:
"""Compute the total log probability of the distributions in the tree."""
log_probs = tree.map_structure(lambda dist, value: dist.log_prob(value),
self._distributions, value)
log_probs = jnp.stack(tree.flatten(log_probs))
return jnp.sum(log_probs, axis=0)
@property
def distributions(self) -> DistributionT:
return self._distributions
@property
def event_shape(self) -> distribution.ShapeT:
return tree.map_structure(lambda dist: dist.event_shape,
self._distributions)
@property
def batch_shape(self) -> Tuple[int, ...]:
return tree.flatten(self._distributions)[0].batch_shape
@property
def dtype(self) -> distribution.DTypeT:
return tree.map_structure(lambda dist: dist.dtype, self._distributions)
def entropy(self) -> chex.Array:
return sum(dist.entropy() for dist in tree.flatten(self._distributions))
def log_cdf(self, value: distribution.EventT) -> chex.Array:
return sum(dist.log_cdf(v)
for dist, v in zip(tree.flatten(self._distributions),
tree.flatten(value)))
def mean(self) -> distribution.EventT:
"""Calculates the mean."""
return tree.map_structure(lambda dist: dist.mean(), self._distributions)
def median(self) -> distribution.EventT:
"""Calculates the median."""
return tree.map_structure(lambda dist: dist.median(), self._distributions)
def mode(self) -> distribution.EventT:
"""Calculates the mode."""
return tree.map_structure(lambda dist: dist.mode(), self._distributions)
def __getitem__(self, index) -> 'Joint':
"""See `Distribution.__getitem__`."""
return Joint(tree.map_structure(lambda dist: dist[index],
self._distributions))
|
(distributions: ~DistributionT)
|
55,823 |
distrax._src.distributions.joint
|
__getitem__
|
See `Distribution.__getitem__`.
|
def __getitem__(self, index) -> 'Joint':
"""See `Distribution.__getitem__`."""
return Joint(tree.map_structure(lambda dist: dist[index],
self._distributions))
|
(self, index) -> distrax._src.distributions.joint.Joint
|
55,824 |
distrax._src.distributions.joint
|
__init__
|
Initializes a Joint distribution over a tree of distributions.
Args:
distributions: Tree of distributions that must have the same batch shape.
|
def __init__(self, distributions: DistributionT):
"""Initializes a Joint distribution over a tree of distributions.
Args:
distributions: Tree of distributions that must have the same batch shape.
"""
super().__init__()
self._distributions = tree.map_structure(conversion.as_distribution,
distributions)
batch_shape = None
first_path = None
for path, dist in tree.flatten_with_path(self._distributions):
batch_shape = batch_shape or dist.batch_shape
first_path = '.'.join(map(str, path))
if dist.batch_shape != batch_shape:
path = '.'.join(map(str, path))
raise ValueError(
f'Joint distributions must have the same batch shape, but '
f'distribution "{dist.name}" at location {path} had batch shape '
f'{dist.batch_shape} which is not equal to the batch shape '
f'{batch_shape} of the distribution at location {first_path}.')
|
(self, distributions: ~DistributionT)
|
55,827 |
distrax._src.distributions.joint
|
_sample_n
| null |
def _sample_n(
self,
key: chex.PRNGKey,
n: int) -> distribution.EventT:
keys = list(jax.random.split(key, len(tree.flatten(self._distributions))))
keys = tree.unflatten_as(self._distributions, keys)
return tree.map_structure(lambda d, k: d.sample(seed=k, sample_shape=n),
self._distributions, keys)
|
(self, key: jax.Array, n: int) -> ~EventT
|
55,828 |
distrax._src.distributions.joint
|
_sample_n_and_log_prob
| null |
def _sample_n_and_log_prob(
self,
key: chex.PRNGKey,
n: int) -> Tuple[distribution.EventT, chex.Array]:
keys = list(jax.random.split(key, len(tree.flatten(self._distributions))))
keys = tree.unflatten_as(self._distributions, keys)
samples_and_log_probs = tree.map_structure(
lambda d, k: d.sample_and_log_prob(seed=k, sample_shape=n),
self._distributions, keys)
samples = tree.map_structure_up_to(
self._distributions, lambda p: p[0], samples_and_log_probs)
log_probs = tree.map_structure_up_to(
self._distributions, lambda p: p[1], samples_and_log_probs)
log_probs = jnp.stack(tree.flatten(log_probs))
log_probs = jnp.sum(log_probs, axis=0)
return samples, log_probs
|
(self, key: jax.Array, n: int) -> Tuple[~EventT, Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,831 |
distrax._src.distributions.joint
|
entropy
| null |
def entropy(self) -> chex.Array:
return sum(dist.entropy() for dist in tree.flatten(self._distributions))
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,833 |
distrax._src.distributions.joint
|
log_cdf
| null |
def log_cdf(self, value: distribution.EventT) -> chex.Array:
return sum(dist.log_cdf(v)
for dist, v in zip(tree.flatten(self._distributions),
tree.flatten(value)))
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,834 |
distrax._src.distributions.joint
|
log_prob
|
Compute the total log probability of the distributions in the tree.
|
def log_prob(self, value: distribution.EventT) -> chex.Array:
"""Compute the total log probability of the distributions in the tree."""
log_probs = tree.map_structure(lambda dist, value: dist.log_prob(value),
self._distributions, value)
log_probs = jnp.stack(tree.flatten(log_probs))
return jnp.sum(log_probs, axis=0)
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,836 |
distrax._src.distributions.joint
|
mean
|
Calculates the mean.
|
def mean(self) -> distribution.EventT:
"""Calculates the mean."""
return tree.map_structure(lambda dist: dist.mean(), self._distributions)
|
(self) -> ~EventT
|
55,837 |
distrax._src.distributions.joint
|
median
|
Calculates the median.
|
def median(self) -> distribution.EventT:
"""Calculates the median."""
return tree.map_structure(lambda dist: dist.median(), self._distributions)
|
(self) -> ~EventT
|
55,838 |
distrax._src.distributions.joint
|
mode
|
Calculates the mode.
|
def mode(self) -> distribution.EventT:
"""Calculates the mode."""
return tree.map_structure(lambda dist: dist.mode(), self._distributions)
|
(self) -> ~EventT
|
55,846 |
distrax._src.bijectors.lambda_bijector
|
Lambda
|
Wrapper to automatically turn JAX functions into fully fledged bijectors.
This class takes in JAX functions that implement bijector methods (such as
`forward`, `inverse`, `forward_log_det_jacobian`, etc.), and constructs a
bijector out of them. Any functions not explicitly specified by the user will
be automatically derived from the existing functions where possible, by
tracing their JAXPR representation. Missing functions will be derived on
demand: if a missing function is not used, it will not be derived. At a
minimum, either `forward` or `inverse` must be given; all other methods will
be derived (where possible).
The Lambda bijector can be useful for creating simple one-line bijectors that
would otherwise be tedious to define. Examples of scalar bijectors that can be
easily constructed with Lambda are:
- Identity: `Lambda(lambda x: x)`
- Affine: `Lambda(lambda x: a*x + b)`
- Tanh: `Lambda(jnp.tanh)`
- Composite: `Lambda(lambda x: jnp.tanh(a*x + b))`
Requirements and limitations:
- Only functions composed entirely of invertible primitives can be
automatically inverted (see `bijection_utils.py` for a list of invertible
primitives). If the inverse is needed but is not automatically derivable,
the user must provide it explicitly.
- If log-determinant functions are not provided, Lambda will assume that
`forward` and `inverse` are scalar functions applied elementwise. If the
bijector is not meant to be scalar, its log-determinant functions must be
provided explicitly by the user.
|
class Lambda(base.Bijector):
"""Wrapper to automatically turn JAX functions into fully fledged bijectors.
This class takes in JAX functions that implement bijector methods (such as
`forward`, `inverse`, `forward_log_det_jacobian`, etc.), and constructs a
bijector out of them. Any functions not explicitly specified by the user will
be automatically derived from the existing functions where possible, by
tracing their JAXPR representation. Missing functions will be derived on
demand: if a missing function is not used, it will not be derived. At a
minimum, either `forward` or `inverse` must be given; all other methods will
be derived (where possible).
The Lambda bijector can be useful for creating simple one-line bijectors that
would otherwise be tedious to define. Examples of scalar bijectors that can be
easily constructed with Lambda are:
- Identity: `Lambda(lambda x: x)`
- Affine: `Lambda(lambda x: a*x + b)`
- Tanh: `Lambda(jnp.tanh)`
- Composite: `Lambda(lambda x: jnp.tanh(a*x + b))`
Requirements and limitations:
- Only functions composed entirely of invertible primitives can be
automatically inverted (see `bijection_utils.py` for a list of invertible
primitives). If the inverse is needed but is not automatically derivable,
the user must provide it explicitly.
- If log-determinant functions are not provided, Lambda will assume that
`forward` and `inverse` are scalar functions applied elementwise. If the
bijector is not meant to be scalar, its log-determinant functions must be
provided explicitly by the user.
"""
def __init__(
self,
forward: Optional[Callable[[Array], Array]] = None,
inverse: Optional[Callable[[Array], Array]] = None,
forward_log_det_jacobian: Optional[Callable[[Array], Array]] = None,
inverse_log_det_jacobian: Optional[Callable[[Array], Array]] = None,
event_ndims_in: Optional[int] = None,
event_ndims_out: Optional[int] = None,
is_constant_jacobian: Optional[bool] = None):
"""Initializes a Lambda bijector with methods specified as args."""
if forward is None and inverse is None:
raise ValueError("The Lambda bijector requires at least one of `forward` "
"or `inverse` to be specified, but neither is.")
jac_functions_specified = (forward_log_det_jacobian is not None
or inverse_log_det_jacobian is not None)
if jac_functions_specified:
if event_ndims_in is None:
raise ValueError("When log det Jacobian functions are specified, you "
"must also specify `event_ndims_in`.")
else:
if event_ndims_in is not None or event_ndims_out is not None:
raise ValueError("When log det Jacobian functions are unspecified, you "
"must leave `event_ndims_in` and `event_ndims_out` "
"unspecified; they will default to 0.")
event_ndims_in = 0
if is_constant_jacobian is None:
fn = inverse if forward is None else forward
is_constant_jacobian = transformations.is_constant_jacobian(fn)
super().__init__(
event_ndims_in=event_ndims_in,
event_ndims_out=event_ndims_out,
is_constant_jacobian=is_constant_jacobian)
self._forward = forward
self._inverse = inverse
self._forward_log_det_jacobian = forward_log_det_jacobian
self._inverse_log_det_jacobian = inverse_log_det_jacobian
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
self._check_forward_input_shape(x)
if self._forward is None:
self._forward = transformations.inv(self._inverse)
return self._forward(x)
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
self._check_inverse_input_shape(y)
if self._inverse is None:
self._inverse = transformations.inv(self._forward)
return self._inverse(y)
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
self._check_forward_input_shape(x)
if self._forward_log_det_jacobian is None:
self._forward_log_det_jacobian = transformations.log_det_scalar(
self.forward)
return self._forward_log_det_jacobian(x)
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
self._check_inverse_input_shape(y)
if self._inverse_log_det_jacobian is None:
self._inverse_log_det_jacobian = transformations.log_det_scalar(
self.inverse)
return self._inverse_log_det_jacobian(y)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return self.forward(x), self.forward_log_det_jacobian(x)
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
return self.inverse(y), self.inverse_log_det_jacobian(y)
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is Lambda: # pylint: disable=unidiomatic-typecheck
return all((
self.forward is other.forward,
self.inverse is other.inverse,
self.forward_log_det_jacobian is other.forward_log_det_jacobian,
self.inverse_log_det_jacobian is other.inverse_log_det_jacobian,
self.forward_and_log_det is other.forward_and_log_det,
self.inverse_and_log_det is other.inverse_and_log_det,
))
return False
|
(forward: Optional[Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]] = None, inverse: Optional[Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]] = None, forward_log_det_jacobian: Optional[Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]] = None, inverse_log_det_jacobian: Optional[Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]] = None, event_ndims_in: Optional[int] = None, event_ndims_out: Optional[int] = None, is_constant_jacobian: Optional[bool] = None)
|
55,847 |
distrax._src.bijectors.lambda_bijector
|
__init__
|
Initializes a Lambda bijector with methods specified as args.
|
def __init__(
self,
forward: Optional[Callable[[Array], Array]] = None,
inverse: Optional[Callable[[Array], Array]] = None,
forward_log_det_jacobian: Optional[Callable[[Array], Array]] = None,
inverse_log_det_jacobian: Optional[Callable[[Array], Array]] = None,
event_ndims_in: Optional[int] = None,
event_ndims_out: Optional[int] = None,
is_constant_jacobian: Optional[bool] = None):
"""Initializes a Lambda bijector with methods specified as args."""
if forward is None and inverse is None:
raise ValueError("The Lambda bijector requires at least one of `forward` "
"or `inverse` to be specified, but neither is.")
jac_functions_specified = (forward_log_det_jacobian is not None
or inverse_log_det_jacobian is not None)
if jac_functions_specified:
if event_ndims_in is None:
raise ValueError("When log det Jacobian functions are specified, you "
"must also specify `event_ndims_in`.")
else:
if event_ndims_in is not None or event_ndims_out is not None:
raise ValueError("When log det Jacobian functions are unspecified, you "
"must leave `event_ndims_in` and `event_ndims_out` "
"unspecified; they will default to 0.")
event_ndims_in = 0
if is_constant_jacobian is None:
fn = inverse if forward is None else forward
is_constant_jacobian = transformations.is_constant_jacobian(fn)
super().__init__(
event_ndims_in=event_ndims_in,
event_ndims_out=event_ndims_out,
is_constant_jacobian=is_constant_jacobian)
self._forward = forward
self._inverse = inverse
self._forward_log_det_jacobian = forward_log_det_jacobian
self._inverse_log_det_jacobian = inverse_log_det_jacobian
|
(self, forward: Optional[Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]] = None, inverse: Optional[Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]] = None, forward_log_det_jacobian: Optional[Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]] = None, inverse_log_det_jacobian: Optional[Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]] = None, event_ndims_in: Optional[int] = None, event_ndims_out: Optional[int] = None, is_constant_jacobian: Optional[bool] = None)
|
55,851 |
distrax._src.bijectors.lambda_bijector
|
forward
|
Computes y = f(x).
|
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
self._check_forward_input_shape(x)
if self._forward is None:
self._forward = transformations.inv(self._inverse)
return self._forward(x)
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,852 |
distrax._src.bijectors.lambda_bijector
|
forward_and_log_det
|
Computes y = f(x) and log|det J(f)(x)|.
|
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return self.forward(x), self.forward_log_det_jacobian(x)
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,853 |
distrax._src.bijectors.lambda_bijector
|
forward_log_det_jacobian
|
Computes log|det J(f)(x)|.
|
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
self._check_forward_input_shape(x)
if self._forward_log_det_jacobian is None:
self._forward_log_det_jacobian = transformations.log_det_scalar(
self.forward)
return self._forward_log_det_jacobian(x)
|
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,854 |
distrax._src.bijectors.lambda_bijector
|
inverse
|
Computes x = f^{-1}(y).
|
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
self._check_inverse_input_shape(y)
if self._inverse is None:
self._inverse = transformations.inv(self._forward)
return self._inverse(y)
|
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,855 |
distrax._src.bijectors.lambda_bijector
|
inverse_and_log_det
|
Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.
|
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
return self.inverse(y), self.inverse_log_det_jacobian(y)
|
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,856 |
distrax._src.bijectors.lambda_bijector
|
inverse_log_det_jacobian
|
Computes log|det J(f^{-1})(y)|.
|
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
self._check_inverse_input_shape(y)
if self._inverse_log_det_jacobian is None:
self._inverse_log_det_jacobian = transformations.log_det_scalar(
self.inverse)
return self._inverse_log_det_jacobian(y)
|
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,857 |
distrax._src.bijectors.lambda_bijector
|
same_as
|
Returns True if this bijector is guaranteed to be the same as `other`.
|
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is Lambda: # pylint: disable=unidiomatic-typecheck
return all((
self.forward is other.forward,
self.inverse is other.inverse,
self.forward_log_det_jacobian is other.forward_log_det_jacobian,
self.inverse_log_det_jacobian is other.inverse_log_det_jacobian,
self.forward_and_log_det is other.forward_and_log_det,
self.inverse_and_log_det is other.inverse_and_log_det,
))
return False
|
(self, other: distrax._src.bijectors.bijector.Bijector) -> bool
|
55,859 |
distrax._src.distributions.laplace
|
Laplace
|
Laplace distribution with location `loc` and `scale` parameters.
|
class Laplace(distribution.Distribution):
"""Laplace distribution with location `loc` and `scale` parameters."""
equiv_tfp_cls = tfd.Laplace
def __init__(self, loc: Numeric, scale: Numeric):
"""Initializes a Laplace distribution.
Args:
loc: Mean of the distribution.
scale: Spread of the distribution.
"""
super().__init__()
self._loc = conversion.as_float_array(loc)
self._scale = conversion.as_float_array(scale)
self._batch_shape = jax.lax.broadcast_shapes(
self._loc.shape, self._scale.shape)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return self._batch_shape
@property
def loc(self) -> Array:
"""Mean of the distribution."""
return jnp.broadcast_to(self._loc, self.batch_shape)
@property
def scale(self) -> Array:
"""Scale of the distribution."""
return jnp.broadcast_to(self._scale, self.batch_shape)
def _sample_from_std_laplace(self, key: PRNGKey, n: int) -> Array:
out_shape = (n,) + self.batch_shape
dtype = jnp.result_type(self._loc, self._scale)
return jax.random.laplace(key, shape=out_shape, dtype=dtype)
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
rnd = self._sample_from_std_laplace(key, n)
return self._loc + self._scale * rnd
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
rnd = self._sample_from_std_laplace(key, n)
samples = self._loc + self._scale * rnd
log_prob = -jnp.abs(rnd) - math.log(2.) - jnp.log(self._scale)
return samples, log_prob
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
norm_value = self._standardize(value)
return -jnp.abs(norm_value) - math.log(2.) - jnp.log(self._scale)
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
return math.log(2.) + 1. + jnp.log(self.scale)
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
norm_value = self._standardize(value)
return 0.5 - 0.5 * jnp.sign(norm_value) * jnp.expm1(-jnp.abs(norm_value))
def _standardize(self, value: Array) -> Array:
return (value - self._loc) / self._scale
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
norm_value = self._standardize(value)
return _log_cdf_laplace(norm_value)
def log_survival_function(self, value: EventT) -> Array:
"""See `Distribution.log_survival_function`."""
norm_value = self._standardize(value)
return _log_cdf_laplace(-norm_value)
def mean(self) -> Array:
"""Calculates the mean."""
return self.loc
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return math.sqrt(2.) * self.scale
def variance(self) -> Array:
"""Calculates the variance."""
return 2. * jnp.square(self.scale)
def mode(self) -> Array:
"""Calculates the mode."""
return self.mean()
def median(self) -> Array:
"""Calculates the median."""
return self.mean()
def __getitem__(self, index) -> 'Laplace':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Laplace(loc=self.loc[index], scale=self.scale[index])
|
(loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int])
|
55,860 |
distrax._src.distributions.laplace
|
__getitem__
|
See `Distribution.__getitem__`.
|
def __getitem__(self, index) -> 'Laplace':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Laplace(loc=self.loc[index], scale=self.scale[index])
|
(self, index) -> distrax._src.distributions.laplace.Laplace
|
55,861 |
distrax._src.distributions.laplace
|
__init__
|
Initializes a Laplace distribution.
Args:
loc: Mean of the distribution.
scale: Spread of the distribution.
|
def __init__(self, loc: Numeric, scale: Numeric):
"""Initializes a Laplace distribution.
Args:
loc: Mean of the distribution.
scale: Spread of the distribution.
"""
super().__init__()
self._loc = conversion.as_float_array(loc)
self._scale = conversion.as_float_array(scale)
self._batch_shape = jax.lax.broadcast_shapes(
self._loc.shape, self._scale.shape)
|
(self, loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int])
|
55,864 |
distrax._src.distributions.laplace
|
_sample_from_std_laplace
| null |
def _sample_from_std_laplace(self, key: PRNGKey, n: int) -> Array:
out_shape = (n,) + self.batch_shape
dtype = jnp.result_type(self._loc, self._scale)
return jax.random.laplace(key, shape=out_shape, dtype=dtype)
|
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,865 |
distrax._src.distributions.laplace
|
_sample_n
|
See `Distribution._sample_n`.
|
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
rnd = self._sample_from_std_laplace(key, n)
return self._loc + self._scale * rnd
|
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,866 |
distrax._src.distributions.laplace
|
_sample_n_and_log_prob
|
See `Distribution._sample_n_and_log_prob`.
|
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
rnd = self._sample_from_std_laplace(key, n)
samples = self._loc + self._scale * rnd
log_prob = -jnp.abs(rnd) - math.log(2.) - jnp.log(self._scale)
return samples, log_prob
|
(self, key: jax.Array, n: int) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
|
55,867 |
distrax._src.distributions.laplace
|
_standardize
| null |
def _standardize(self, value: Array) -> Array:
return (value - self._loc) / self._scale
|
(self, value: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,868 |
distrax._src.distributions.laplace
|
cdf
|
See `Distribution.cdf`.
|
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
norm_value = self._standardize(value)
return 0.5 - 0.5 * jnp.sign(norm_value) * jnp.expm1(-jnp.abs(norm_value))
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,870 |
distrax._src.distributions.laplace
|
entropy
|
Calculates the Shannon entropy (in nats).
|
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
return math.log(2.) + 1. + jnp.log(self.scale)
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,872 |
distrax._src.distributions.laplace
|
log_cdf
|
See `Distribution.log_cdf`.
|
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
norm_value = self._standardize(value)
return _log_cdf_laplace(norm_value)
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,873 |
distrax._src.distributions.laplace
|
log_prob
|
See `Distribution.log_prob`.
|
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
norm_value = self._standardize(value)
return -jnp.abs(norm_value) - math.log(2.) - jnp.log(self._scale)
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,874 |
distrax._src.distributions.laplace
|
log_survival_function
|
See `Distribution.log_survival_function`.
|
def log_survival_function(self, value: EventT) -> Array:
"""See `Distribution.log_survival_function`."""
norm_value = self._standardize(value)
return _log_cdf_laplace(-norm_value)
|
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
55,876 |
distrax._src.distributions.laplace
|
median
|
Calculates the median.
|
def median(self) -> Array:
"""Calculates the median."""
return self.mean()
|
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.