index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
56,221
distrax._src.distributions.one_hot_categorical
log_prob
See `Distribution.log_prob`.
def log_prob(self, value: EventT) -> Array: """See `Distribution.log_prob`.""" return jnp.sum(math.multiply_no_nan(self.logits, value), axis=-1)
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,226
distrax._src.distributions.one_hot_categorical
mode
Calculates the mode.
def mode(self) -> Array: """Calculates the mode.""" preferences = self._probs if self._logits is None else self._logits assert preferences is not None greedy_index = jnp.argmax(preferences, axis=-1) return jax.nn.one_hot(greedy_index, self.num_categories).astype(self._dtype)
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,227
distrax._src.distributions.one_hot_categorical
prob
See `Distribution.prob`.
def prob(self, value: EventT) -> Array: """See `Distribution.prob`.""" return jnp.sum(math.multiply_no_nan(self.probs, value), axis=-1)
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,234
distrax._src.distributions.quantized
Quantized
Distribution representing the quantization `Y = ceil(X)`. Given an input distribution `p(x)` over a univariate random variable `X`, sampling from a `Quantized` distribution amounts to sampling `x ~ p(x)` and then setting `y = ceil(x)`. The returned samples are integer-valued and of the same `dtype` as the base distribution.
class Quantized( base_distribution.Distribution[Array, Tuple[int, ...], jnp.dtype],): """Distribution representing the quantization `Y = ceil(X)`. Given an input distribution `p(x)` over a univariate random variable `X`, sampling from a `Quantized` distribution amounts to sampling `x ~ p(x)` and then setting `y = ceil(x)`. The returned samples are integer-valued and of the same `dtype` as the base distribution. """ equiv_tfp_cls = tfd.QuantizedDistribution def __init__(self, distribution: DistributionLike, low: Optional[Numeric] = None, high: Optional[Numeric] = None, eps: Optional[Numeric] = None): """Initializes a Quantized distribution. Args: distribution: The base distribution to be quantized. low: Lowest possible quantized value, such that samples are `y >= ceil(low)`. Its shape must broadcast with the shape of samples from `distribution` and must not result in additional batch dimensions after broadcasting. high: Highest possible quantized value, such that samples are `y <= floor(high)`. Its shape must broadcast with the shape of samples from `distribution` and must not result in additional batch dimensions after broadcasting. eps: An optional gap to enforce between "big" and "small". Useful for avoiding NANs in computing log_probs, when "big" and "small" are too close. """ self._dist: base_distribution.Distribution[Array, Tuple[ int, ...], jnp.dtype] = conversion.as_distribution(distribution) self._eps = eps if self._dist.event_shape: raise ValueError(f'The base distribution must be univariate, but its ' f'`event_shape` is {self._dist.event_shape}.') dtype = self._dist.dtype if low is None: self._low = None else: self._low = jnp.asarray(jnp.ceil(low), dtype=dtype) if len(self._low.shape) > len(self._dist.batch_shape): raise ValueError('The parameter `low` must not result in additional ' 'batch dimensions.') if high is None: self._high = None else: self._high = jnp.asarray(jnp.floor(high), dtype=dtype) if len(self._high.shape) > len(self._dist.batch_shape): raise ValueError('The parameter `high` must not result in additional ' 'batch dimensions.') super().__init__() @property def distribution( self ) -> base_distribution.Distribution[Array, Tuple[int, ...], jnp.dtype]: """Base distribution `p(x)`.""" return self._dist @property def low(self) -> Optional[Array]: """Lowest value that quantization returns.""" if self._low is None: return None return jnp.broadcast_to(self._low, self.batch_shape + self.event_shape) @property def high(self) -> Optional[Array]: """Highest value that quantization returns.""" if self._high is None: return None return jnp.broadcast_to(self._high, self.batch_shape + self.event_shape) @property def event_shape(self) -> Tuple[int, ...]: """Shape of event of distribution samples.""" event_shape = self.distribution.event_shape # TODO(b/149413467): Remove explicit casting when resolved. return cast(Tuple[int, ...], event_shape) @property def batch_shape(self) -> Tuple[int, ...]: """Shape of batch of distribution samples.""" return self.distribution.batch_shape def _sample_n(self, key: PRNGKey, n: int) -> Array: """See `Distribution._sample_n`.""" samples = self.distribution.sample(seed=key, sample_shape=n) samples = jnp.ceil(samples) # Apply overflow and underflow conditions. if self.low is not None: samples = jnp.where(samples < self.low, self.low, samples) if self.high is not None: samples = jnp.where(samples > self.high, self.high, samples) return samples def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]: """See `Distribution._sample_n_and_log_prob`.""" samples = self._sample_n(key, n) log_cdf = self.distribution.log_cdf(samples) log_cdf_m1 = self.distribution.log_cdf(samples - 1.) log_sf = self.distribution.log_survival_function(samples) log_sf_m1 = self.distribution.log_survival_function(samples - 1.) if self.high is not None: # `samples - 1.` is definitely lower than `high`. log_cdf = jnp.where(samples < self.high, log_cdf, 0.) log_sf = jnp.where(samples < self.high, log_sf, -jnp.inf) if self.low is not None: # `samples` is definitely greater than or equal to `low`. log_cdf_m1 = jnp.where(samples - 1. < self.low, -jnp.inf, log_cdf_m1) log_sf_m1 = jnp.where(samples - 1. < self.low, 0., log_sf_m1) # Use the survival function instead of the CDF when its value is smaller, # which happens to the right of the median of the distribution. big = jnp.where(log_sf < log_cdf, log_sf_m1, log_cdf) small = jnp.where(log_sf < log_cdf, log_sf, log_cdf_m1) log_probs = math.log_expbig_minus_expsmall(big, small) return samples, log_probs def log_prob(self, value: EventT) -> Array: """Calculates the log probability of an event. This implementation differs slightly from the one in TFP, as it returns `-jnp.inf` on non-integer values instead of returning the log prob of the floor of the input. In addition, this implementation also returns `-jnp.inf` on inputs that are outside the support of the distribution (as opposed to `nan`, like TFP does). On other integer values, both implementations are identical. Similar to TFP, the log prob is computed using either the CDF or the survival function to improve numerical stability. With infinite precision the two computations would be equal. Args: value: An event. Returns: The log probability log P(value). """ log_cdf = self.log_cdf(value) log_cdf_m1 = self.log_cdf(value - 1.) log_sf = self.log_survival_function(value) log_sf_m1 = self.log_survival_function(value - 1.) # Use the survival function instead of the CDF when its value is smaller, # which happens to the right of the median of the distribution. big = jnp.where(log_sf < log_cdf, log_sf_m1, log_cdf) small = jnp.where(log_sf < log_cdf, log_sf, log_cdf_m1) if self._eps is not None: # use stop_gradient to block updating in this case big = jnp.where(big - small > self._eps, big, jax.lax.stop_gradient(small) + self._eps) log_probs = math.log_expbig_minus_expsmall(big, small) # Return -inf when evaluating on non-integer value. is_integer = jnp.where(value > jnp.floor(value), False, True) log_probs = jnp.where(is_integer, log_probs, -jnp.inf) # Return -inf and not NaN when outside of [low, high]. # If the CDF is used, `value > high` is already treated correctly; # to fix the return value for `value < low` we test whether `log_cdf` is # finite; `log_sf_m1` will be `0.` in this regime. # If the survival function is used the reverse case applies; to fix the # case `value > high` we test whether `log_sf_m1` is finite; `log_cdf` will # be `0.` in this regime. is_outside = jnp.logical_or(jnp.isinf(log_cdf), jnp.isinf(log_sf_m1)) log_probs = jnp.where(is_outside, -jnp.inf, log_probs) return log_probs def prob(self, value: EventT) -> Array: """Calculates the probability of an event. This implementation differs slightly from the one in TFP, as it returns 0 on non-integer values instead of returning the prob of the floor of the input. It is identical for integer values. Similar to TFP, the probability is computed using either the CDF or the survival function to improve numerical stability. With infinite precision the two computations would be equal. Args: value: An event. Returns: The probability P(value). """ cdf = self.cdf(value) cdf_m1 = self.cdf(value - 1.) sf = self.survival_function(value) sf_m1 = self.survival_function(value - 1.) # Use the survival function instead of the CDF when its value is smaller, # which happens to the right of the median of the distribution. probs = jnp.where(sf < cdf, sf_m1 - sf, cdf - cdf_m1) # Return 0. when evaluating on non-integer value. is_integer = jnp.where(value > jnp.floor(value), False, True) probs = jnp.where(is_integer, probs, 0.) return probs def log_cdf(self, value: EventT) -> Array: """See `Distribution.log_cdf`.""" # The log CDF of a quantized distribution is piecewise constant on half-open # intervals: # ... [n-2 n-1) [n-1 n) [n n+1) [n+1 n+2) ... # with log CDF(n) <= log CDF(n+1), because the distribution only has mass on # integer values. Therefore: P[Y <= value] = P[Y <= floor(value)]. y = jnp.floor(value) result = self.distribution.log_cdf(y) # Update result outside of the interval [low, high]. if self.low is not None: result = jnp.where(y < self.low, -jnp.inf, result) if self.high is not None: result = jnp.where(y < self.high, result, 0.) return result def cdf(self, value: EventT) -> Array: """See `Distribution.cdf`.""" # The CDF of a quantized distribution is piecewise constant on half-open # intervals: # ... [n-2 n-1) [n-1 n) [n n+1) [n+1 n+2) ... # with CDF(n) <= CDF(n+1), because the distribution only has mass on integer # values. Therefore: P[Y <= value] = P[Y <= floor(value)]. y = jnp.floor(value) result = self.distribution.cdf(y) # Update result outside of the interval [low, high]. if self.low is not None: result = jnp.where(y < self.low, 0., result) if self.high is not None: result = jnp.where(y < self.high, result, 1.) return result def log_survival_function(self, value: EventT) -> Array: """Calculates the log of the survival function of an event. This implementation differs slightly from TFP, in that it returns the correct log of the survival function for non-integer values, that is, it always equates to `log(1 - CDF(value))`. It is identical for integer values. Args: value: An event. Returns: The log of the survival function `log P[Y > value]`. """ # The log of the survival function of a quantized distribution is piecewise # constant on half-open intervals: # ... [n-2 n-1) [n-1 n) [n n+1) [n+1 n+2) ... # with log sf(n) >= log sf(n+1), because the distribution only has mass on # integer values. Therefore: log P[Y > value] = log P[Y > floor(value)]. y = jnp.floor(value) result = self.distribution.log_survival_function(y) # Update result outside of the interval [low, high]. if self._low is not None: result = jnp.where(y < self._low, 0., result) if self._high is not None: result = jnp.where(y < self._high, result, -jnp.inf) return result def survival_function(self, value: EventT) -> Array: """Calculates the survival function of an event. This implementation differs slightly from TFP, in that it returns the correct survival function for non-integer values, that is, it always equates to `1 - CDF(value)`. It is identical for integer values. Args: value: An event. Returns: The survival function `P[Y > value]`. """ # The survival function of a quantized distribution is piecewise # constant on half-open intervals: # ... [n-2 n-1) [n-1 n) [n n+1) [n+1 n+2) ... # with sf(n) >= sf(n+1), because the distribution only has mass on # integer values. Therefore: P[Y > value] = P[Y > floor(value)]. y = jnp.floor(value) result = self.distribution.survival_function(y) # Update result outside of the interval [low, high]. if self._low is not None: result = jnp.where(y < self._low, 1., result) if self._high is not None: result = jnp.where(y < self._high, result, 0.) return result def __getitem__(self, index) -> 'Quantized': """See `Distribution.__getitem__`.""" index = base_distribution.to_batch_shape_index(self.batch_shape, index) low = None if self._low is None else self.low[index] high = None if self._high is None else self.high[index] return Quantized(distribution=self.distribution[index], low=low, high=high)
(distribution: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution], low: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int, NoneType] = None, high: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int, NoneType] = None, eps: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int, NoneType] = None)
56,235
distrax._src.distributions.quantized
__getitem__
See `Distribution.__getitem__`.
def __getitem__(self, index) -> 'Quantized': """See `Distribution.__getitem__`.""" index = base_distribution.to_batch_shape_index(self.batch_shape, index) low = None if self._low is None else self.low[index] high = None if self._high is None else self.high[index] return Quantized(distribution=self.distribution[index], low=low, high=high)
(self, index) -> distrax._src.distributions.quantized.Quantized
56,236
distrax._src.distributions.quantized
__init__
Initializes a Quantized distribution. Args: distribution: The base distribution to be quantized. low: Lowest possible quantized value, such that samples are `y >= ceil(low)`. Its shape must broadcast with the shape of samples from `distribution` and must not result in additional batch dimensions after broadcasting. high: Highest possible quantized value, such that samples are `y <= floor(high)`. Its shape must broadcast with the shape of samples from `distribution` and must not result in additional batch dimensions after broadcasting. eps: An optional gap to enforce between "big" and "small". Useful for avoiding NANs in computing log_probs, when "big" and "small" are too close.
def __init__(self, distribution: DistributionLike, low: Optional[Numeric] = None, high: Optional[Numeric] = None, eps: Optional[Numeric] = None): """Initializes a Quantized distribution. Args: distribution: The base distribution to be quantized. low: Lowest possible quantized value, such that samples are `y >= ceil(low)`. Its shape must broadcast with the shape of samples from `distribution` and must not result in additional batch dimensions after broadcasting. high: Highest possible quantized value, such that samples are `y <= floor(high)`. Its shape must broadcast with the shape of samples from `distribution` and must not result in additional batch dimensions after broadcasting. eps: An optional gap to enforce between "big" and "small". Useful for avoiding NANs in computing log_probs, when "big" and "small" are too close. """ self._dist: base_distribution.Distribution[Array, Tuple[ int, ...], jnp.dtype] = conversion.as_distribution(distribution) self._eps = eps if self._dist.event_shape: raise ValueError(f'The base distribution must be univariate, but its ' f'`event_shape` is {self._dist.event_shape}.') dtype = self._dist.dtype if low is None: self._low = None else: self._low = jnp.asarray(jnp.ceil(low), dtype=dtype) if len(self._low.shape) > len(self._dist.batch_shape): raise ValueError('The parameter `low` must not result in additional ' 'batch dimensions.') if high is None: self._high = None else: self._high = jnp.asarray(jnp.floor(high), dtype=dtype) if len(self._high.shape) > len(self._dist.batch_shape): raise ValueError('The parameter `high` must not result in additional ' 'batch dimensions.') super().__init__()
(self, distribution: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution], low: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int, NoneType] = None, high: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int, NoneType] = None, eps: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int, NoneType] = None)
56,239
distrax._src.distributions.quantized
_sample_n
See `Distribution._sample_n`.
def _sample_n(self, key: PRNGKey, n: int) -> Array: """See `Distribution._sample_n`.""" samples = self.distribution.sample(seed=key, sample_shape=n) samples = jnp.ceil(samples) # Apply overflow and underflow conditions. if self.low is not None: samples = jnp.where(samples < self.low, self.low, samples) if self.high is not None: samples = jnp.where(samples > self.high, self.high, samples) return samples
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,240
distrax._src.distributions.quantized
_sample_n_and_log_prob
See `Distribution._sample_n_and_log_prob`.
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]: """See `Distribution._sample_n_and_log_prob`.""" samples = self._sample_n(key, n) log_cdf = self.distribution.log_cdf(samples) log_cdf_m1 = self.distribution.log_cdf(samples - 1.) log_sf = self.distribution.log_survival_function(samples) log_sf_m1 = self.distribution.log_survival_function(samples - 1.) if self.high is not None: # `samples - 1.` is definitely lower than `high`. log_cdf = jnp.where(samples < self.high, log_cdf, 0.) log_sf = jnp.where(samples < self.high, log_sf, -jnp.inf) if self.low is not None: # `samples` is definitely greater than or equal to `low`. log_cdf_m1 = jnp.where(samples - 1. < self.low, -jnp.inf, log_cdf_m1) log_sf_m1 = jnp.where(samples - 1. < self.low, 0., log_sf_m1) # Use the survival function instead of the CDF when its value is smaller, # which happens to the right of the median of the distribution. big = jnp.where(log_sf < log_cdf, log_sf_m1, log_cdf) small = jnp.where(log_sf < log_cdf, log_sf, log_cdf_m1) log_probs = math.log_expbig_minus_expsmall(big, small) return samples, log_probs
(self, key: jax.Array, n: int) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
56,241
distrax._src.distributions.quantized
cdf
See `Distribution.cdf`.
def cdf(self, value: EventT) -> Array: """See `Distribution.cdf`.""" # The CDF of a quantized distribution is piecewise constant on half-open # intervals: # ... [n-2 n-1) [n-1 n) [n n+1) [n+1 n+2) ... # with CDF(n) <= CDF(n+1), because the distribution only has mass on integer # values. Therefore: P[Y <= value] = P[Y <= floor(value)]. y = jnp.floor(value) result = self.distribution.cdf(y) # Update result outside of the interval [low, high]. if self.low is not None: result = jnp.where(y < self.low, 0., result) if self.high is not None: result = jnp.where(y < self.high, result, 1.) return result
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,245
distrax._src.distributions.quantized
log_cdf
See `Distribution.log_cdf`.
def log_cdf(self, value: EventT) -> Array: """See `Distribution.log_cdf`.""" # The log CDF of a quantized distribution is piecewise constant on half-open # intervals: # ... [n-2 n-1) [n-1 n) [n n+1) [n+1 n+2) ... # with log CDF(n) <= log CDF(n+1), because the distribution only has mass on # integer values. Therefore: P[Y <= value] = P[Y <= floor(value)]. y = jnp.floor(value) result = self.distribution.log_cdf(y) # Update result outside of the interval [low, high]. if self.low is not None: result = jnp.where(y < self.low, -jnp.inf, result) if self.high is not None: result = jnp.where(y < self.high, result, 0.) return result
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,246
distrax._src.distributions.quantized
log_prob
Calculates the log probability of an event. This implementation differs slightly from the one in TFP, as it returns `-jnp.inf` on non-integer values instead of returning the log prob of the floor of the input. In addition, this implementation also returns `-jnp.inf` on inputs that are outside the support of the distribution (as opposed to `nan`, like TFP does). On other integer values, both implementations are identical. Similar to TFP, the log prob is computed using either the CDF or the survival function to improve numerical stability. With infinite precision the two computations would be equal. Args: value: An event. Returns: The log probability log P(value).
def log_prob(self, value: EventT) -> Array: """Calculates the log probability of an event. This implementation differs slightly from the one in TFP, as it returns `-jnp.inf` on non-integer values instead of returning the log prob of the floor of the input. In addition, this implementation also returns `-jnp.inf` on inputs that are outside the support of the distribution (as opposed to `nan`, like TFP does). On other integer values, both implementations are identical. Similar to TFP, the log prob is computed using either the CDF or the survival function to improve numerical stability. With infinite precision the two computations would be equal. Args: value: An event. Returns: The log probability log P(value). """ log_cdf = self.log_cdf(value) log_cdf_m1 = self.log_cdf(value - 1.) log_sf = self.log_survival_function(value) log_sf_m1 = self.log_survival_function(value - 1.) # Use the survival function instead of the CDF when its value is smaller, # which happens to the right of the median of the distribution. big = jnp.where(log_sf < log_cdf, log_sf_m1, log_cdf) small = jnp.where(log_sf < log_cdf, log_sf, log_cdf_m1) if self._eps is not None: # use stop_gradient to block updating in this case big = jnp.where(big - small > self._eps, big, jax.lax.stop_gradient(small) + self._eps) log_probs = math.log_expbig_minus_expsmall(big, small) # Return -inf when evaluating on non-integer value. is_integer = jnp.where(value > jnp.floor(value), False, True) log_probs = jnp.where(is_integer, log_probs, -jnp.inf) # Return -inf and not NaN when outside of [low, high]. # If the CDF is used, `value > high` is already treated correctly; # to fix the return value for `value < low` we test whether `log_cdf` is # finite; `log_sf_m1` will be `0.` in this regime. # If the survival function is used the reverse case applies; to fix the # case `value > high` we test whether `log_sf_m1` is finite; `log_cdf` will # be `0.` in this regime. is_outside = jnp.logical_or(jnp.isinf(log_cdf), jnp.isinf(log_sf_m1)) log_probs = jnp.where(is_outside, -jnp.inf, log_probs) return log_probs
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,247
distrax._src.distributions.quantized
log_survival_function
Calculates the log of the survival function of an event. This implementation differs slightly from TFP, in that it returns the correct log of the survival function for non-integer values, that is, it always equates to `log(1 - CDF(value))`. It is identical for integer values. Args: value: An event. Returns: The log of the survival function `log P[Y > value]`.
def log_survival_function(self, value: EventT) -> Array: """Calculates the log of the survival function of an event. This implementation differs slightly from TFP, in that it returns the correct log of the survival function for non-integer values, that is, it always equates to `log(1 - CDF(value))`. It is identical for integer values. Args: value: An event. Returns: The log of the survival function `log P[Y > value]`. """ # The log of the survival function of a quantized distribution is piecewise # constant on half-open intervals: # ... [n-2 n-1) [n-1 n) [n n+1) [n+1 n+2) ... # with log sf(n) >= log sf(n+1), because the distribution only has mass on # integer values. Therefore: log P[Y > value] = log P[Y > floor(value)]. y = jnp.floor(value) result = self.distribution.log_survival_function(y) # Update result outside of the interval [low, high]. if self._low is not None: result = jnp.where(y < self._low, 0., result) if self._high is not None: result = jnp.where(y < self._high, result, -jnp.inf) return result
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,251
distrax._src.distributions.quantized
prob
Calculates the probability of an event. This implementation differs slightly from the one in TFP, as it returns 0 on non-integer values instead of returning the prob of the floor of the input. It is identical for integer values. Similar to TFP, the probability is computed using either the CDF or the survival function to improve numerical stability. With infinite precision the two computations would be equal. Args: value: An event. Returns: The probability P(value).
def prob(self, value: EventT) -> Array: """Calculates the probability of an event. This implementation differs slightly from the one in TFP, as it returns 0 on non-integer values instead of returning the prob of the floor of the input. It is identical for integer values. Similar to TFP, the probability is computed using either the CDF or the survival function to improve numerical stability. With infinite precision the two computations would be equal. Args: value: An event. Returns: The probability P(value). """ cdf = self.cdf(value) cdf_m1 = self.cdf(value - 1.) sf = self.survival_function(value) sf_m1 = self.survival_function(value - 1.) # Use the survival function instead of the CDF when its value is smaller, # which happens to the right of the median of the distribution. probs = jnp.where(sf < cdf, sf_m1 - sf, cdf - cdf_m1) # Return 0. when evaluating on non-integer value. is_integer = jnp.where(value > jnp.floor(value), False, True) probs = jnp.where(is_integer, probs, 0.) return probs
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,255
distrax._src.distributions.quantized
survival_function
Calculates the survival function of an event. This implementation differs slightly from TFP, in that it returns the correct survival function for non-integer values, that is, it always equates to `1 - CDF(value)`. It is identical for integer values. Args: value: An event. Returns: The survival function `P[Y > value]`.
def survival_function(self, value: EventT) -> Array: """Calculates the survival function of an event. This implementation differs slightly from TFP, in that it returns the correct survival function for non-integer values, that is, it always equates to `1 - CDF(value)`. It is identical for integer values. Args: value: An event. Returns: The survival function `P[Y > value]`. """ # The survival function of a quantized distribution is piecewise # constant on half-open intervals: # ... [n-2 n-1) [n-1 n) [n n+1) [n+1 n+2) ... # with sf(n) >= sf(n+1), because the distribution only has mass on # integer values. Therefore: P[Y > value] = P[Y > floor(value)]. y = jnp.floor(value) result = self.distribution.survival_function(y) # Update result outside of the interval [low, high]. if self._low is not None: result = jnp.where(y < self._low, 1., result) if self._high is not None: result = jnp.where(y < self._high, result, 0.) return result
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,258
distrax._src.bijectors.rational_quadratic_spline
RationalQuadraticSpline
A rational-quadratic spline bijector. Implements the spline bijector introduced by: > Durkan et al., Neural Spline Flows, https://arxiv.org/abs/1906.04032, 2019. This bijector is a monotonically increasing spline operating on an interval [a, b], such that f(a) = a and f(b) = b. Outside the interval [a, b], the bijector defaults to a linear transformation whose slope matches that of the spline at the nearest boundary (either a or b). The range boundaries a and b are hyperparameters passed to the constructor. The spline on the interval [a, b] consists of `num_bins` segments, on each of which the spline takes the form of a rational quadratic (ratio of two quadratic polynomials). The first derivative of the bijector is guaranteed to be continuous on the whole real line. The second derivative is generally not continuous at the knot points (bin boundaries). The spline is parameterized by the bin sizes on the x and y axis, and by the slopes at the knot points. All spline parameters are passed to the constructor as an unconstrained array `params` of shape `[..., 3 * num_bins + 1]`. The spline parameters are extracted from `params`, and are reparameterized internally as appropriate. The number of bins is a hyperparameter, and is implicitly defined by the last dimension of `params`. This bijector is applied elementwise. Given some input `x`, the parameters `params` and the input `x` are broadcast against each other. For example, suppose `x` is of shape `[N, D]`. Then: - If `params` is of shape `[3 * num_bins + 1]`, the same spline is identically applied to each element of `x`. - If `params` is of shape `[D, 3 * num_bins + 1]`, the same spline is applied along the first axis of `x` but a different spline is applied along the second axis of `x`. - If `params` is of shape `[N, D, 3 * num_bins + 1]`, a different spline is applied to each element of `x`. - If `params` is of shape `[M, N, D, 3 * num_bins + 1]`, `M` different splines are applied to each element of `x`, and the output is of shape `[M, N, D]`.
class RationalQuadraticSpline(base.Bijector): """A rational-quadratic spline bijector. Implements the spline bijector introduced by: > Durkan et al., Neural Spline Flows, https://arxiv.org/abs/1906.04032, 2019. This bijector is a monotonically increasing spline operating on an interval [a, b], such that f(a) = a and f(b) = b. Outside the interval [a, b], the bijector defaults to a linear transformation whose slope matches that of the spline at the nearest boundary (either a or b). The range boundaries a and b are hyperparameters passed to the constructor. The spline on the interval [a, b] consists of `num_bins` segments, on each of which the spline takes the form of a rational quadratic (ratio of two quadratic polynomials). The first derivative of the bijector is guaranteed to be continuous on the whole real line. The second derivative is generally not continuous at the knot points (bin boundaries). The spline is parameterized by the bin sizes on the x and y axis, and by the slopes at the knot points. All spline parameters are passed to the constructor as an unconstrained array `params` of shape `[..., 3 * num_bins + 1]`. The spline parameters are extracted from `params`, and are reparameterized internally as appropriate. The number of bins is a hyperparameter, and is implicitly defined by the last dimension of `params`. This bijector is applied elementwise. Given some input `x`, the parameters `params` and the input `x` are broadcast against each other. For example, suppose `x` is of shape `[N, D]`. Then: - If `params` is of shape `[3 * num_bins + 1]`, the same spline is identically applied to each element of `x`. - If `params` is of shape `[D, 3 * num_bins + 1]`, the same spline is applied along the first axis of `x` but a different spline is applied along the second axis of `x`. - If `params` is of shape `[N, D, 3 * num_bins + 1]`, a different spline is applied to each element of `x`. - If `params` is of shape `[M, N, D, 3 * num_bins + 1]`, `M` different splines are applied to each element of `x`, and the output is of shape `[M, N, D]`. """ def __init__(self, params: Array, range_min: float, range_max: float, boundary_slopes: str = 'unconstrained', min_bin_size: float = 1e-4, min_knot_slope: float = 1e-4): """Initializes a RationalQuadraticSpline bijector. Args: params: array of shape `[..., 3 * num_bins + 1]`, the unconstrained parameters of the bijector. The number of bins is implicitly defined by the last dimension of `params`. The parameters can take arbitrary unconstrained values; the bijector will reparameterize them internally and make sure they obey appropriate constraints. If `params` is the all-zeros array, the bijector becomes the identity function everywhere on the real line. range_min: the lower bound of the spline's range. Below `range_min`, the bijector defaults to a linear transformation. range_max: the upper bound of the spline's range. Above `range_max`, the bijector defaults to a linear transformation. boundary_slopes: controls the behaviour of the slope of the spline at the range boundaries (`range_min` and `range_max`). It is used to enforce certain boundary conditions on the spline. Available options are: - 'unconstrained': no boundary conditions are imposed; the slopes at the boundaries can vary freely. - 'lower_identity': the slope of the spline is set equal to 1 at the lower boundary (`range_min`). This makes the bijector equal to the identity function for values less than `range_min`. - 'upper_identity': similar to `lower_identity`, but now the slope of the spline is set equal to 1 at the upper boundary (`range_max`). This makes the bijector equal to the identity function for values greater than `range_max`. - 'identity': combines the effects of 'lower_identity' and 'upper_identity' together. The slope of the spline is set equal to 1 at both boundaries (`range_min` and `range_max`). This makes the bijector equal to the identity function outside the interval `[range_min, range_max]`. - 'circular': makes the slope at `range_min` and `range_max` be the same. This implements the "circular spline" introduced by: > Rezende et al., Normalizing Flows on Tori and Spheres, > https://arxiv.org/abs/2002.02428, 2020. This option should be used when the spline operates on a circle parameterized by an angle in the interval `[range_min, range_max]`, where `range_min` and `range_max` correspond to the same point on the circle. min_bin_size: The minimum bin size, in either the x or the y axis. Should be a small positive number, chosen for numerical stability. Guarantees that no bin in either the x or the y axis will be less than this value. min_knot_slope: The minimum slope at each knot point. Should be a small positive number, chosen for numerical stability. Guarantess that no knot will have a slope less than this value. """ super().__init__(event_ndims_in=0) if params.shape[-1] % 3 != 1 or params.shape[-1] < 4: raise ValueError(f'The last dimension of `params` must have size' f' `3 * num_bins + 1` and `num_bins` must be at least 1.' f' Got size {params.shape[-1]}.') if range_min >= range_max: raise ValueError(f'`range_min` must be less than `range_max`. Got' f' `range_min={range_min}` and `range_max={range_max}`.') if min_bin_size <= 0.: raise ValueError(f'The minimum bin size must be positive; got' f' {min_bin_size}.') if min_knot_slope <= 0.: raise ValueError(f'The minimum knot slope must be positive; got' f' {min_knot_slope}.') self._dtype = params.dtype self._num_bins = (params.shape[-1] - 1) // 3 # Extract unnormalized parameters. unnormalized_bin_widths = params[..., :self._num_bins] unnormalized_bin_heights = params[..., self._num_bins : 2 * self._num_bins] unnormalized_knot_slopes = params[..., 2 * self._num_bins:] # Normalize bin sizes and compute bin positions on the x and y axis. range_size = range_max - range_min bin_widths = _normalize_bin_sizes(unnormalized_bin_widths, range_size, min_bin_size) bin_heights = _normalize_bin_sizes(unnormalized_bin_heights, range_size, min_bin_size) x_pos = range_min + jnp.cumsum(bin_widths[..., :-1], axis=-1) y_pos = range_min + jnp.cumsum(bin_heights[..., :-1], axis=-1) pad_shape = params.shape[:-1] + (1,) pad_below = jnp.full(pad_shape, range_min, dtype=self._dtype) pad_above = jnp.full(pad_shape, range_max, dtype=self._dtype) self._x_pos = jnp.concatenate([pad_below, x_pos, pad_above], axis=-1) self._y_pos = jnp.concatenate([pad_below, y_pos, pad_above], axis=-1) # Normalize knot slopes and enforce requested boundary conditions. knot_slopes = _normalize_knot_slopes(unnormalized_knot_slopes, min_knot_slope) if boundary_slopes == 'unconstrained': self._knot_slopes = knot_slopes elif boundary_slopes == 'lower_identity': ones = jnp.ones(pad_shape, self._dtype) self._knot_slopes = jnp.concatenate([ones, knot_slopes[..., 1:]], axis=-1) elif boundary_slopes == 'upper_identity': ones = jnp.ones(pad_shape, self._dtype) self._knot_slopes = jnp.concatenate( [knot_slopes[..., :-1], ones], axis=-1) elif boundary_slopes == 'identity': ones = jnp.ones(pad_shape, self._dtype) self._knot_slopes = jnp.concatenate( [ones, knot_slopes[..., 1:-1], ones], axis=-1) elif boundary_slopes == 'circular': self._knot_slopes = jnp.concatenate( [knot_slopes[..., :-1], knot_slopes[..., :1]], axis=-1) else: raise ValueError(f'Unknown option for boundary slopes:' f' `{boundary_slopes}`.') @property def num_bins(self) -> int: """The number of segments on the interval.""" return self._num_bins @property def knot_slopes(self) -> Array: """The slopes at the knot points.""" return self._knot_slopes @property def x_pos(self) -> Array: """The bin boundaries on the `x`-axis.""" return self._x_pos @property def y_pos(self) -> Array: """The bin boundaries on the `y`-axis.""" return self._y_pos def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]: """Computes y = f(x) and log|det J(f)(x)|.""" fn = jnp.vectorize( _rational_quadratic_spline_fwd, signature='(),(n),(n),(n)->(),()') y, logdet = fn(x, self._x_pos, self._y_pos, self._knot_slopes) return y, logdet def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]: """Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.""" fn = jnp.vectorize( _rational_quadratic_spline_inv, signature='(),(n),(n),(n)->(),()') x, logdet = fn(y, self._x_pos, self._y_pos, self._knot_slopes) return x, logdet
(params: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], range_min: float, range_max: float, boundary_slopes: str = 'unconstrained', min_bin_size: float = 0.0001, min_knot_slope: float = 0.0001)
56,259
distrax._src.bijectors.rational_quadratic_spline
__init__
Initializes a RationalQuadraticSpline bijector. Args: params: array of shape `[..., 3 * num_bins + 1]`, the unconstrained parameters of the bijector. The number of bins is implicitly defined by the last dimension of `params`. The parameters can take arbitrary unconstrained values; the bijector will reparameterize them internally and make sure they obey appropriate constraints. If `params` is the all-zeros array, the bijector becomes the identity function everywhere on the real line. range_min: the lower bound of the spline's range. Below `range_min`, the bijector defaults to a linear transformation. range_max: the upper bound of the spline's range. Above `range_max`, the bijector defaults to a linear transformation. boundary_slopes: controls the behaviour of the slope of the spline at the range boundaries (`range_min` and `range_max`). It is used to enforce certain boundary conditions on the spline. Available options are: - 'unconstrained': no boundary conditions are imposed; the slopes at the boundaries can vary freely. - 'lower_identity': the slope of the spline is set equal to 1 at the lower boundary (`range_min`). This makes the bijector equal to the identity function for values less than `range_min`. - 'upper_identity': similar to `lower_identity`, but now the slope of the spline is set equal to 1 at the upper boundary (`range_max`). This makes the bijector equal to the identity function for values greater than `range_max`. - 'identity': combines the effects of 'lower_identity' and 'upper_identity' together. The slope of the spline is set equal to 1 at both boundaries (`range_min` and `range_max`). This makes the bijector equal to the identity function outside the interval `[range_min, range_max]`. - 'circular': makes the slope at `range_min` and `range_max` be the same. This implements the "circular spline" introduced by: > Rezende et al., Normalizing Flows on Tori and Spheres, > https://arxiv.org/abs/2002.02428, 2020. This option should be used when the spline operates on a circle parameterized by an angle in the interval `[range_min, range_max]`, where `range_min` and `range_max` correspond to the same point on the circle. min_bin_size: The minimum bin size, in either the x or the y axis. Should be a small positive number, chosen for numerical stability. Guarantees that no bin in either the x or the y axis will be less than this value. min_knot_slope: The minimum slope at each knot point. Should be a small positive number, chosen for numerical stability. Guarantess that no knot will have a slope less than this value.
def __init__(self, params: Array, range_min: float, range_max: float, boundary_slopes: str = 'unconstrained', min_bin_size: float = 1e-4, min_knot_slope: float = 1e-4): """Initializes a RationalQuadraticSpline bijector. Args: params: array of shape `[..., 3 * num_bins + 1]`, the unconstrained parameters of the bijector. The number of bins is implicitly defined by the last dimension of `params`. The parameters can take arbitrary unconstrained values; the bijector will reparameterize them internally and make sure they obey appropriate constraints. If `params` is the all-zeros array, the bijector becomes the identity function everywhere on the real line. range_min: the lower bound of the spline's range. Below `range_min`, the bijector defaults to a linear transformation. range_max: the upper bound of the spline's range. Above `range_max`, the bijector defaults to a linear transformation. boundary_slopes: controls the behaviour of the slope of the spline at the range boundaries (`range_min` and `range_max`). It is used to enforce certain boundary conditions on the spline. Available options are: - 'unconstrained': no boundary conditions are imposed; the slopes at the boundaries can vary freely. - 'lower_identity': the slope of the spline is set equal to 1 at the lower boundary (`range_min`). This makes the bijector equal to the identity function for values less than `range_min`. - 'upper_identity': similar to `lower_identity`, but now the slope of the spline is set equal to 1 at the upper boundary (`range_max`). This makes the bijector equal to the identity function for values greater than `range_max`. - 'identity': combines the effects of 'lower_identity' and 'upper_identity' together. The slope of the spline is set equal to 1 at both boundaries (`range_min` and `range_max`). This makes the bijector equal to the identity function outside the interval `[range_min, range_max]`. - 'circular': makes the slope at `range_min` and `range_max` be the same. This implements the "circular spline" introduced by: > Rezende et al., Normalizing Flows on Tori and Spheres, > https://arxiv.org/abs/2002.02428, 2020. This option should be used when the spline operates on a circle parameterized by an angle in the interval `[range_min, range_max]`, where `range_min` and `range_max` correspond to the same point on the circle. min_bin_size: The minimum bin size, in either the x or the y axis. Should be a small positive number, chosen for numerical stability. Guarantees that no bin in either the x or the y axis will be less than this value. min_knot_slope: The minimum slope at each knot point. Should be a small positive number, chosen for numerical stability. Guarantess that no knot will have a slope less than this value. """ super().__init__(event_ndims_in=0) if params.shape[-1] % 3 != 1 or params.shape[-1] < 4: raise ValueError(f'The last dimension of `params` must have size' f' `3 * num_bins + 1` and `num_bins` must be at least 1.' f' Got size {params.shape[-1]}.') if range_min >= range_max: raise ValueError(f'`range_min` must be less than `range_max`. Got' f' `range_min={range_min}` and `range_max={range_max}`.') if min_bin_size <= 0.: raise ValueError(f'The minimum bin size must be positive; got' f' {min_bin_size}.') if min_knot_slope <= 0.: raise ValueError(f'The minimum knot slope must be positive; got' f' {min_knot_slope}.') self._dtype = params.dtype self._num_bins = (params.shape[-1] - 1) // 3 # Extract unnormalized parameters. unnormalized_bin_widths = params[..., :self._num_bins] unnormalized_bin_heights = params[..., self._num_bins : 2 * self._num_bins] unnormalized_knot_slopes = params[..., 2 * self._num_bins:] # Normalize bin sizes and compute bin positions on the x and y axis. range_size = range_max - range_min bin_widths = _normalize_bin_sizes(unnormalized_bin_widths, range_size, min_bin_size) bin_heights = _normalize_bin_sizes(unnormalized_bin_heights, range_size, min_bin_size) x_pos = range_min + jnp.cumsum(bin_widths[..., :-1], axis=-1) y_pos = range_min + jnp.cumsum(bin_heights[..., :-1], axis=-1) pad_shape = params.shape[:-1] + (1,) pad_below = jnp.full(pad_shape, range_min, dtype=self._dtype) pad_above = jnp.full(pad_shape, range_max, dtype=self._dtype) self._x_pos = jnp.concatenate([pad_below, x_pos, pad_above], axis=-1) self._y_pos = jnp.concatenate([pad_below, y_pos, pad_above], axis=-1) # Normalize knot slopes and enforce requested boundary conditions. knot_slopes = _normalize_knot_slopes(unnormalized_knot_slopes, min_knot_slope) if boundary_slopes == 'unconstrained': self._knot_slopes = knot_slopes elif boundary_slopes == 'lower_identity': ones = jnp.ones(pad_shape, self._dtype) self._knot_slopes = jnp.concatenate([ones, knot_slopes[..., 1:]], axis=-1) elif boundary_slopes == 'upper_identity': ones = jnp.ones(pad_shape, self._dtype) self._knot_slopes = jnp.concatenate( [knot_slopes[..., :-1], ones], axis=-1) elif boundary_slopes == 'identity': ones = jnp.ones(pad_shape, self._dtype) self._knot_slopes = jnp.concatenate( [ones, knot_slopes[..., 1:-1], ones], axis=-1) elif boundary_slopes == 'circular': self._knot_slopes = jnp.concatenate( [knot_slopes[..., :-1], knot_slopes[..., :1]], axis=-1) else: raise ValueError(f'Unknown option for boundary slopes:' f' `{boundary_slopes}`.')
(self, params: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], range_min: float, range_max: float, boundary_slopes: str = 'unconstrained', min_bin_size: float = 0.0001, min_knot_slope: float = 0.0001)
56,264
distrax._src.bijectors.rational_quadratic_spline
forward_and_log_det
Computes y = f(x) and log|det J(f)(x)|.
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]: """Computes y = f(x) and log|det J(f)(x)|.""" fn = jnp.vectorize( _rational_quadratic_spline_fwd, signature='(),(n),(n),(n)->(),()') y, logdet = fn(x, self._x_pos, self._y_pos, self._knot_slopes) return y, logdet
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
56,267
distrax._src.bijectors.rational_quadratic_spline
inverse_and_log_det
Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]: """Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.""" fn = jnp.vectorize( _rational_quadratic_spline_inv, signature='(),(n),(n),(n)->(),()') x, logdet = fn(y, self._x_pos, self._y_pos, self._knot_slopes) return x, logdet
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
56,271
distrax._src.bijectors.scalar_affine
ScalarAffine
An affine bijector that acts elementwise. The bijector is defined as follows: - Forward: `y = scale * x + shift` - Forward Jacobian determinant: `log|det J(x)| = log|scale|` - Inverse: `x = (y - shift) / scale` - Inverse Jacobian determinant: `log|det J(y)| = -log|scale|` where `scale` and `shift` are the bijector's parameters.
class ScalarAffine(base.Bijector): """An affine bijector that acts elementwise. The bijector is defined as follows: - Forward: `y = scale * x + shift` - Forward Jacobian determinant: `log|det J(x)| = log|scale|` - Inverse: `x = (y - shift) / scale` - Inverse Jacobian determinant: `log|det J(y)| = -log|scale|` where `scale` and `shift` are the bijector's parameters. """ def __init__(self, shift: Numeric, scale: Optional[Numeric] = None, log_scale: Optional[Numeric] = None): """Initializes a ScalarAffine bijector. Args: shift: the bijector's shift parameter. Can also be batched. scale: the bijector's scale parameter. Can also be batched. NOTE: `scale` must be non-zero, otherwise the bijector is not invertible. It is the user's responsibility to make sure `scale` is non-zero; the class will make no attempt to verify this. log_scale: the log of the scale parameter. Can also be batched. If specified, the bijector's scale is set equal to `exp(log_scale)`. Unlike `scale`, `log_scale` is an unconstrained parameter. NOTE: either `scale` or `log_scale` can be specified, but not both. If neither is specified, the bijector's scale will default to 1. Raises: ValueError: if both `scale` and `log_scale` are not None. """ super().__init__(event_ndims_in=0, is_constant_jacobian=True) self._shift = shift if scale is None and log_scale is None: self._scale = 1. self._inv_scale = 1. self._log_scale = 0. elif log_scale is None: self._scale = scale self._inv_scale = 1. / scale self._log_scale = jnp.log(jnp.abs(scale)) elif scale is None: self._scale = jnp.exp(log_scale) self._inv_scale = jnp.exp(jnp.negative(log_scale)) self._log_scale = log_scale else: raise ValueError( 'Only one of `scale` and `log_scale` can be specified, not both.') self._batch_shape = jax.lax.broadcast_shapes( jnp.shape(self._shift), jnp.shape(self._scale)) @property def shift(self) -> Numeric: """The bijector's shift.""" return self._shift @property def log_scale(self) -> Numeric: """The log of the bijector's scale.""" return self._log_scale @property def scale(self) -> Numeric: """The bijector's scale.""" assert self._scale is not None # By construction. return self._scale def forward(self, x: Array) -> Array: """Computes y = f(x).""" batch_shape = jax.lax.broadcast_shapes(self._batch_shape, x.shape) batched_scale = jnp.broadcast_to(self._scale, batch_shape) batched_shift = jnp.broadcast_to(self._shift, batch_shape) return batched_scale * x + batched_shift def forward_log_det_jacobian(self, x: Array) -> Array: """Computes log|det J(f)(x)|.""" batch_shape = jax.lax.broadcast_shapes(self._batch_shape, x.shape) return jnp.broadcast_to(self._log_scale, batch_shape) def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]: """Computes y = f(x) and log|det J(f)(x)|.""" return self.forward(x), self.forward_log_det_jacobian(x) def inverse(self, y: Array) -> Array: """Computes x = f^{-1}(y).""" batch_shape = jax.lax.broadcast_shapes(self._batch_shape, y.shape) batched_inv_scale = jnp.broadcast_to(self._inv_scale, batch_shape) batched_shift = jnp.broadcast_to(self._shift, batch_shape) return batched_inv_scale * (y - batched_shift) def inverse_log_det_jacobian(self, y: Array) -> Array: """Computes log|det J(f^{-1})(y)|.""" batch_shape = jax.lax.broadcast_shapes(self._batch_shape, y.shape) return jnp.broadcast_to(jnp.negative(self._log_scale), batch_shape) def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]: """Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.""" return self.inverse(y), self.inverse_log_det_jacobian(y) def same_as(self, other: base.Bijector) -> bool: """Returns True if this bijector is guaranteed to be the same as `other`.""" if type(other) is ScalarAffine: # pylint: disable=unidiomatic-typecheck return all(( self.shift is other.shift, self.scale is other.scale, self.log_scale is other.log_scale, )) else: return False
(shift: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float], scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, NoneType] = None, log_scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, NoneType] = None)
56,272
distrax._src.bijectors.scalar_affine
__init__
Initializes a ScalarAffine bijector. Args: shift: the bijector's shift parameter. Can also be batched. scale: the bijector's scale parameter. Can also be batched. NOTE: `scale` must be non-zero, otherwise the bijector is not invertible. It is the user's responsibility to make sure `scale` is non-zero; the class will make no attempt to verify this. log_scale: the log of the scale parameter. Can also be batched. If specified, the bijector's scale is set equal to `exp(log_scale)`. Unlike `scale`, `log_scale` is an unconstrained parameter. NOTE: either `scale` or `log_scale` can be specified, but not both. If neither is specified, the bijector's scale will default to 1. Raises: ValueError: if both `scale` and `log_scale` are not None.
def __init__(self, shift: Numeric, scale: Optional[Numeric] = None, log_scale: Optional[Numeric] = None): """Initializes a ScalarAffine bijector. Args: shift: the bijector's shift parameter. Can also be batched. scale: the bijector's scale parameter. Can also be batched. NOTE: `scale` must be non-zero, otherwise the bijector is not invertible. It is the user's responsibility to make sure `scale` is non-zero; the class will make no attempt to verify this. log_scale: the log of the scale parameter. Can also be batched. If specified, the bijector's scale is set equal to `exp(log_scale)`. Unlike `scale`, `log_scale` is an unconstrained parameter. NOTE: either `scale` or `log_scale` can be specified, but not both. If neither is specified, the bijector's scale will default to 1. Raises: ValueError: if both `scale` and `log_scale` are not None. """ super().__init__(event_ndims_in=0, is_constant_jacobian=True) self._shift = shift if scale is None and log_scale is None: self._scale = 1. self._inv_scale = 1. self._log_scale = 0. elif log_scale is None: self._scale = scale self._inv_scale = 1. / scale self._log_scale = jnp.log(jnp.abs(scale)) elif scale is None: self._scale = jnp.exp(log_scale) self._inv_scale = jnp.exp(jnp.negative(log_scale)) self._log_scale = log_scale else: raise ValueError( 'Only one of `scale` and `log_scale` can be specified, not both.') self._batch_shape = jax.lax.broadcast_shapes( jnp.shape(self._shift), jnp.shape(self._scale))
(self, shift: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float], scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, NoneType] = None, log_scale: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, NoneType] = None)
56,276
distrax._src.bijectors.scalar_affine
forward
Computes y = f(x).
def forward(self, x: Array) -> Array: """Computes y = f(x).""" batch_shape = jax.lax.broadcast_shapes(self._batch_shape, x.shape) batched_scale = jnp.broadcast_to(self._scale, batch_shape) batched_shift = jnp.broadcast_to(self._shift, batch_shape) return batched_scale * x + batched_shift
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,278
distrax._src.bijectors.scalar_affine
forward_log_det_jacobian
Computes log|det J(f)(x)|.
def forward_log_det_jacobian(self, x: Array) -> Array: """Computes log|det J(f)(x)|.""" batch_shape = jax.lax.broadcast_shapes(self._batch_shape, x.shape) return jnp.broadcast_to(self._log_scale, batch_shape)
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,279
distrax._src.bijectors.scalar_affine
inverse
Computes x = f^{-1}(y).
def inverse(self, y: Array) -> Array: """Computes x = f^{-1}(y).""" batch_shape = jax.lax.broadcast_shapes(self._batch_shape, y.shape) batched_inv_scale = jnp.broadcast_to(self._inv_scale, batch_shape) batched_shift = jnp.broadcast_to(self._shift, batch_shape) return batched_inv_scale * (y - batched_shift)
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,281
distrax._src.bijectors.scalar_affine
inverse_log_det_jacobian
Computes log|det J(f^{-1})(y)|.
def inverse_log_det_jacobian(self, y: Array) -> Array: """Computes log|det J(f^{-1})(y)|.""" batch_shape = jax.lax.broadcast_shapes(self._batch_shape, y.shape) return jnp.broadcast_to(jnp.negative(self._log_scale), batch_shape)
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,282
distrax._src.bijectors.scalar_affine
same_as
Returns True if this bijector is guaranteed to be the same as `other`.
def same_as(self, other: base.Bijector) -> bool: """Returns True if this bijector is guaranteed to be the same as `other`.""" if type(other) is ScalarAffine: # pylint: disable=unidiomatic-typecheck return all(( self.shift is other.shift, self.scale is other.scale, self.log_scale is other.log_scale, )) else: return False
(self, other: distrax._src.bijectors.bijector.Bijector) -> bool
56,284
distrax._src.bijectors.shift
Shift
Bijector that translates its input elementwise. The bijector is defined as follows: - Forward: `y = x + shift` - Forward Jacobian determinant: `log|det J(x)| = 0` - Inverse: `x = y - shift` - Inverse Jacobian determinant: `log|det J(y)| = 0` where `shift` parameterizes the bijector.
class Shift(base.Bijector): """Bijector that translates its input elementwise. The bijector is defined as follows: - Forward: `y = x + shift` - Forward Jacobian determinant: `log|det J(x)| = 0` - Inverse: `x = y - shift` - Inverse Jacobian determinant: `log|det J(y)| = 0` where `shift` parameterizes the bijector. """ def __init__(self, shift: Numeric): """Initializes a `Shift` bijector. Args: shift: the bijector's shift parameter. Can also be batched. """ super().__init__(event_ndims_in=0, is_constant_jacobian=True) self._shift = shift self._batch_shape = jnp.shape(self._shift) @property def shift(self) -> Numeric: """The bijector's shift.""" return self._shift def forward(self, x: Array) -> Array: """Computes y = f(x).""" return x + self._shift def forward_log_det_jacobian(self, x: Array) -> Array: """Computes log|det J(f)(x)|.""" batch_shape = jax.lax.broadcast_shapes(self._batch_shape, x.shape) return jnp.zeros(batch_shape, dtype=x.dtype) def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]: """Computes y = f(x) and log|det J(f)(x)|.""" return self.forward(x), self.forward_log_det_jacobian(x) def inverse(self, y: Array) -> Array: """Computes x = f^{-1}(y).""" return y - self._shift def inverse_log_det_jacobian(self, y: Array) -> Array: """Computes log|det J(f^{-1})(y)|.""" return self.forward_log_det_jacobian(y) def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]: """Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.""" return self.inverse(y), self.inverse_log_det_jacobian(y) def same_as(self, other: base.Bijector) -> bool: """Returns True if this bijector is guaranteed to be the same as `other`.""" if type(other) is Shift: # pylint: disable=unidiomatic-typecheck return self.shift is other.shift return False
(shift: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float])
56,285
distrax._src.bijectors.shift
__init__
Initializes a `Shift` bijector. Args: shift: the bijector's shift parameter. Can also be batched.
def __init__(self, shift: Numeric): """Initializes a `Shift` bijector. Args: shift: the bijector's shift parameter. Can also be batched. """ super().__init__(event_ndims_in=0, is_constant_jacobian=True) self._shift = shift self._batch_shape = jnp.shape(self._shift)
(self, shift: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float])
56,289
distrax._src.bijectors.shift
forward
Computes y = f(x).
def forward(self, x: Array) -> Array: """Computes y = f(x).""" return x + self._shift
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,291
distrax._src.bijectors.shift
forward_log_det_jacobian
Computes log|det J(f)(x)|.
def forward_log_det_jacobian(self, x: Array) -> Array: """Computes log|det J(f)(x)|.""" batch_shape = jax.lax.broadcast_shapes(self._batch_shape, x.shape) return jnp.zeros(batch_shape, dtype=x.dtype)
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,292
distrax._src.bijectors.shift
inverse
Computes x = f^{-1}(y).
def inverse(self, y: Array) -> Array: """Computes x = f^{-1}(y).""" return y - self._shift
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,294
distrax._src.bijectors.shift
inverse_log_det_jacobian
Computes log|det J(f^{-1})(y)|.
def inverse_log_det_jacobian(self, y: Array) -> Array: """Computes log|det J(f^{-1})(y)|.""" return self.forward_log_det_jacobian(y)
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,295
distrax._src.bijectors.shift
same_as
Returns True if this bijector is guaranteed to be the same as `other`.
def same_as(self, other: base.Bijector) -> bool: """Returns True if this bijector is guaranteed to be the same as `other`.""" if type(other) is Shift: # pylint: disable=unidiomatic-typecheck return self.shift is other.shift return False
(self, other: distrax._src.bijectors.bijector.Bijector) -> bool
56,297
distrax._src.bijectors.sigmoid
Sigmoid
A bijector that computes the logistic sigmoid. The log-determinant implementation in this bijector is more numerically stable than relying on the automatic differentiation approach used by Lambda, so this bijector should be preferred over Lambda(jax.nn.sigmoid) where possible. See `tfp.bijectors.Sigmoid` for details. Note that the underlying implementation of `jax.nn.sigmoid` used by the `forward` function of this bijector does not support inputs of integer type. To invoke the forward function of this bijector on an argument of integer type, it should first be cast explicitly to a floating point type. When the absolute value of the input is large, `Sigmoid` becomes close to a constant, so that it is not possible to recover the input `x` from the output `y` within machine precision. In cases where it is needed to compute both the forward mapping and the backward mapping one after the other to recover the original input `x`, it is the user's responsibility to simplify the operation to avoid numerical issues; this is unlike the `tfp.bijectors.Sigmoid`. One example of such case is to use the bijector within a `Transformed` distribution and to obtain the log-probability of samples obtained from the distribution's `sample` method. For values of the samples for which it is not possible to apply the inverse bijector accurately, `log_prob` returns NaN. This can be avoided by using `sample_and_log_prob` instead of `sample` followed by `log_prob`.
class Sigmoid(base.Bijector): """A bijector that computes the logistic sigmoid. The log-determinant implementation in this bijector is more numerically stable than relying on the automatic differentiation approach used by Lambda, so this bijector should be preferred over Lambda(jax.nn.sigmoid) where possible. See `tfp.bijectors.Sigmoid` for details. Note that the underlying implementation of `jax.nn.sigmoid` used by the `forward` function of this bijector does not support inputs of integer type. To invoke the forward function of this bijector on an argument of integer type, it should first be cast explicitly to a floating point type. When the absolute value of the input is large, `Sigmoid` becomes close to a constant, so that it is not possible to recover the input `x` from the output `y` within machine precision. In cases where it is needed to compute both the forward mapping and the backward mapping one after the other to recover the original input `x`, it is the user's responsibility to simplify the operation to avoid numerical issues; this is unlike the `tfp.bijectors.Sigmoid`. One example of such case is to use the bijector within a `Transformed` distribution and to obtain the log-probability of samples obtained from the distribution's `sample` method. For values of the samples for which it is not possible to apply the inverse bijector accurately, `log_prob` returns NaN. This can be avoided by using `sample_and_log_prob` instead of `sample` followed by `log_prob`. """ def __init__(self): """Initializes a Sigmoid bijector.""" super().__init__(event_ndims_in=0) def forward_log_det_jacobian(self, x: Array) -> Array: """Computes log|det J(f)(x)|.""" # pylint:disable=invalid-unary-operand-type return -_more_stable_softplus(-x) - _more_stable_softplus(x) def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]: """Computes y = f(x) and log|det J(f)(x)|.""" return _more_stable_sigmoid(x), self.forward_log_det_jacobian(x) def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]: """Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.""" x = jnp.log(y) - jnp.log1p(-y) return x, -self.forward_log_det_jacobian(x) def same_as(self, other: base.Bijector) -> bool: """Returns True if this bijector is guaranteed to be the same as `other`.""" return type(other) is Sigmoid # pylint: disable=unidiomatic-typecheck
()
56,298
distrax._src.bijectors.sigmoid
__init__
Initializes a Sigmoid bijector.
def __init__(self): """Initializes a Sigmoid bijector.""" super().__init__(event_ndims_in=0)
(self)
56,303
distrax._src.bijectors.sigmoid
forward_and_log_det
Computes y = f(x) and log|det J(f)(x)|.
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]: """Computes y = f(x) and log|det J(f)(x)|.""" return _more_stable_sigmoid(x), self.forward_log_det_jacobian(x)
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
56,304
distrax._src.bijectors.sigmoid
forward_log_det_jacobian
Computes log|det J(f)(x)|.
def forward_log_det_jacobian(self, x: Array) -> Array: """Computes log|det J(f)(x)|.""" # pylint:disable=invalid-unary-operand-type return -_more_stable_softplus(-x) - _more_stable_softplus(x)
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,306
distrax._src.bijectors.sigmoid
inverse_and_log_det
Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]: """Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.""" x = jnp.log(y) - jnp.log1p(-y) return x, -self.forward_log_det_jacobian(x)
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
56,308
distrax._src.bijectors.sigmoid
same_as
Returns True if this bijector is guaranteed to be the same as `other`.
def same_as(self, other: base.Bijector) -> bool: """Returns True if this bijector is guaranteed to be the same as `other`.""" return type(other) is Sigmoid # pylint: disable=unidiomatic-typecheck
(self, other: distrax._src.bijectors.bijector.Bijector) -> bool
56,310
distrax._src.distributions.softmax
Softmax
Categorical implementing a softmax over logits, with given temperature. Given a set of logits, the probability mass is distributed such that each index `i` has probability `exp(logits[i]/τ)/Σ(exp(logits/τ)` where τ is a scalar `temperature` parameter such that for τ→0, the distribution becomes fully greedy, and for τ→∞ the distribution becomes fully uniform.
class Softmax(categorical.Categorical): """Categorical implementing a softmax over logits, with given temperature. Given a set of logits, the probability mass is distributed such that each index `i` has probability `exp(logits[i]/τ)/Σ(exp(logits/τ)` where τ is a scalar `temperature` parameter such that for τ→0, the distribution becomes fully greedy, and for τ→∞ the distribution becomes fully uniform. """ def __init__(self, logits: Array, temperature: float = 1., dtype: Union[jnp.dtype, type[Any]] = int): """Initializes a Softmax distribution. Args: logits: Logit transform of the probability of each category. temperature: Softmax temperature τ. dtype: The type of event samples. """ self._temperature = temperature self._unscaled_logits = logits scaled_logits = logits / temperature super().__init__(logits=scaled_logits, dtype=dtype) @property def temperature(self) -> float: """The softmax temperature parameter.""" return self._temperature @property def unscaled_logits(self) -> Array: """The logits of the distribution before the temperature scaling.""" return self._unscaled_logits def __getitem__(self, index) -> 'Softmax': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return Softmax( logits=self.unscaled_logits[index], temperature=self.temperature, dtype=self.dtype)
(logits: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], temperature: float = 1.0, dtype: Union[numpy.dtype, type[Any]] = <class 'int'>)
56,311
distrax._src.distributions.softmax
__getitem__
See `Distribution.__getitem__`.
def __getitem__(self, index) -> 'Softmax': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return Softmax( logits=self.unscaled_logits[index], temperature=self.temperature, dtype=self.dtype)
(self, index) -> distrax._src.distributions.softmax.Softmax
56,312
distrax._src.distributions.softmax
__init__
Initializes a Softmax distribution. Args: logits: Logit transform of the probability of each category. temperature: Softmax temperature τ. dtype: The type of event samples.
def __init__(self, logits: Array, temperature: float = 1., dtype: Union[jnp.dtype, type[Any]] = int): """Initializes a Softmax distribution. Args: logits: Logit transform of the probability of each category. temperature: Softmax temperature τ. dtype: The type of event samples. """ self._temperature = temperature self._unscaled_logits = logits scaled_logits = logits / temperature super().__init__(logits=scaled_logits, dtype=dtype)
(self, logits: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], temperature: float = 1.0, dtype: Union[numpy.dtype, type[Any]] = <class 'int'>)
56,335
distrax._src.bijectors.split_coupling
SplitCoupling
Split coupling bijector, with arbitrary conditioner & inner bijector. This coupling bijector splits the input array into two parts along a specified axis. One part remains unchanged, whereas the other part is transformed by an inner bijector conditioned on the unchanged part. Let `f` be a conditional bijector (the inner bijector) and `g` be a function (the conditioner). For `swap=False`, the split coupling bijector is defined as follows: - Forward: ``` x = [x1, x2] y1 = x1 y2 = f(x2; g(x1)) y = [y1, y2] ``` - Forward Jacobian log determinant: ``` x = [x1, x2] log|det J(x)| = log|det df/dx2(x2; g(x1))| ``` - Inverse: ``` y = [y1, y2] x1 = y1 x2 = f^{-1}(y2; g(y1)) x = [x1, x2] ``` - Inverse Jacobian log determinant: ``` y = [y1, y2] log|det J(y)| = log|det df^{-1}/dy2(y2; g(y1))| ``` Here, `[x1, x2]` is a partition of `x` along some axis. By default, `x1` remains unchanged and `x2` is transformed. If `swap=True`, `x2` will remain unchanged and `x1` will be transformed.
class SplitCoupling(base.Bijector): """Split coupling bijector, with arbitrary conditioner & inner bijector. This coupling bijector splits the input array into two parts along a specified axis. One part remains unchanged, whereas the other part is transformed by an inner bijector conditioned on the unchanged part. Let `f` be a conditional bijector (the inner bijector) and `g` be a function (the conditioner). For `swap=False`, the split coupling bijector is defined as follows: - Forward: ``` x = [x1, x2] y1 = x1 y2 = f(x2; g(x1)) y = [y1, y2] ``` - Forward Jacobian log determinant: ``` x = [x1, x2] log|det J(x)| = log|det df/dx2(x2; g(x1))| ``` - Inverse: ``` y = [y1, y2] x1 = y1 x2 = f^{-1}(y2; g(y1)) x = [x1, x2] ``` - Inverse Jacobian log determinant: ``` y = [y1, y2] log|det J(y)| = log|det df^{-1}/dy2(y2; g(y1))| ``` Here, `[x1, x2]` is a partition of `x` along some axis. By default, `x1` remains unchanged and `x2` is transformed. If `swap=True`, `x2` will remain unchanged and `x1` will be transformed. """ def __init__(self, split_index: int, event_ndims: int, conditioner: Callable[[Array], BijectorParams], bijector: Callable[[BijectorParams], base.BijectorLike], swap: bool = False, split_axis: int = -1): """Initializes a SplitCoupling bijector. Args: split_index: the index used to split the input. The input array will be split along the axis specified by `split_axis` into two parts. The first part will correspond to indices up to `split_index` (non-inclusive), whereas the second part will correspond to indices starting from `split_index` (inclusive). event_ndims: the number of event dimensions the bijector operates on. The `event_ndims_in` and `event_ndims_out` of the coupling bijector are both equal to `event_ndims`. conditioner: a function that computes the parameters of the inner bijector as a function of the unchanged part of the input. The output of the conditioner will be passed to `bijector` in order to obtain the inner bijector. bijector: a callable that returns the inner bijector that will be used to transform one of the two parts. The input to `bijector` is a set of parameters that can be used to configure the inner bijector. The `event_ndims_in` and `event_ndims_out` of the inner bijector must be equal, and less than or equal to `event_ndims`. If they are less than `event_ndims`, the remaining dimensions will be converted to event dimensions using `distrax.Block`. swap: by default, the part of the input up to `split_index` is the one that remains unchanged. If `swap` is True, then the other part remains unchanged and the first one is transformed instead. split_axis: the axis along which to split the input. Must be negative, that is, it must index from the end. By default, it's the last axis. """ if split_index < 0: raise ValueError( f'The split index must be non-negative; got {split_index}.') if split_axis >= 0: raise ValueError(f'The split axis must be negative; got {split_axis}.') if event_ndims < 0: raise ValueError( f'`event_ndims` must be non-negative; got {event_ndims}.') if split_axis < -event_ndims: raise ValueError( f'The split axis points to an axis outside the event. With ' f'`event_ndims == {event_ndims}`, the split axis must be between -1 ' f'and {-event_ndims}. Got `split_axis == {split_axis}`.') self._split_index = split_index self._conditioner = conditioner self._bijector = bijector self._swap = swap self._split_axis = split_axis super().__init__(event_ndims_in=event_ndims) @property def bijector(self) -> Callable[[BijectorParams], base.BijectorLike]: """The callable that returns the inner bijector of `SplitCoupling`.""" return self._bijector @property def conditioner(self) -> Callable[[Array], BijectorParams]: """The conditioner function.""" return self._conditioner @property def split_index(self) -> int: """The index used to split the input.""" return self._split_index @property def swap(self) -> bool: """The flag that determines which part of the input remains unchanged.""" return self._swap @property def split_axis(self) -> int: """The axis along which to split the input.""" return self._split_axis def _split(self, x: Array) -> Tuple[Array, Array]: x1, x2 = jnp.split(x, [self._split_index], self._split_axis) if self._swap: x1, x2 = x2, x1 return x1, x2 def _recombine(self, x1: Array, x2: Array) -> Array: if self._swap: x1, x2 = x2, x1 return jnp.concatenate([x1, x2], self._split_axis) def _inner_bijector(self, params: BijectorParams) -> base.Bijector: """Returns an inner bijector for the passed params.""" bijector = conversion.as_bijector(self._bijector(params)) if bijector.event_ndims_in != bijector.event_ndims_out: raise ValueError( f'The inner bijector must have `event_ndims_in==event_ndims_out`. ' f'Instead, it has `event_ndims_in=={bijector.event_ndims_in}` and ' f'`event_ndims_out=={bijector.event_ndims_out}`.') extra_ndims = self.event_ndims_in - bijector.event_ndims_in if extra_ndims < 0: raise ValueError( f'The inner bijector can\'t have more event dimensions than the ' f'coupling bijector. Got {bijector.event_ndims_in} for the inner ' f'bijector and {self.event_ndims_in} for the coupling bijector.') elif extra_ndims > 0: bijector = block.Block(bijector, extra_ndims) return bijector def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]: """Computes y = f(x) and log|det J(f)(x)|.""" self._check_forward_input_shape(x) x1, x2 = self._split(x) params = self._conditioner(x1) y2, logdet = self._inner_bijector(params).forward_and_log_det(x2) return self._recombine(x1, y2), logdet def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]: """Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.""" self._check_inverse_input_shape(y) y1, y2 = self._split(y) params = self._conditioner(y1) x2, logdet = self._inner_bijector(params).inverse_and_log_det(y2) return self._recombine(y1, x2), logdet
(split_index: int, event_ndims: int, conditioner: Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Any], bijector: Callable[[Any], Union[distrax._src.bijectors.bijector.Bijector, tensorflow_probability.substrates.jax.bijectors.bijector.Bijector, Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]]], swap: bool = False, split_axis: int = -1)
56,336
distrax._src.bijectors.split_coupling
__init__
Initializes a SplitCoupling bijector. Args: split_index: the index used to split the input. The input array will be split along the axis specified by `split_axis` into two parts. The first part will correspond to indices up to `split_index` (non-inclusive), whereas the second part will correspond to indices starting from `split_index` (inclusive). event_ndims: the number of event dimensions the bijector operates on. The `event_ndims_in` and `event_ndims_out` of the coupling bijector are both equal to `event_ndims`. conditioner: a function that computes the parameters of the inner bijector as a function of the unchanged part of the input. The output of the conditioner will be passed to `bijector` in order to obtain the inner bijector. bijector: a callable that returns the inner bijector that will be used to transform one of the two parts. The input to `bijector` is a set of parameters that can be used to configure the inner bijector. The `event_ndims_in` and `event_ndims_out` of the inner bijector must be equal, and less than or equal to `event_ndims`. If they are less than `event_ndims`, the remaining dimensions will be converted to event dimensions using `distrax.Block`. swap: by default, the part of the input up to `split_index` is the one that remains unchanged. If `swap` is True, then the other part remains unchanged and the first one is transformed instead. split_axis: the axis along which to split the input. Must be negative, that is, it must index from the end. By default, it's the last axis.
def __init__(self, split_index: int, event_ndims: int, conditioner: Callable[[Array], BijectorParams], bijector: Callable[[BijectorParams], base.BijectorLike], swap: bool = False, split_axis: int = -1): """Initializes a SplitCoupling bijector. Args: split_index: the index used to split the input. The input array will be split along the axis specified by `split_axis` into two parts. The first part will correspond to indices up to `split_index` (non-inclusive), whereas the second part will correspond to indices starting from `split_index` (inclusive). event_ndims: the number of event dimensions the bijector operates on. The `event_ndims_in` and `event_ndims_out` of the coupling bijector are both equal to `event_ndims`. conditioner: a function that computes the parameters of the inner bijector as a function of the unchanged part of the input. The output of the conditioner will be passed to `bijector` in order to obtain the inner bijector. bijector: a callable that returns the inner bijector that will be used to transform one of the two parts. The input to `bijector` is a set of parameters that can be used to configure the inner bijector. The `event_ndims_in` and `event_ndims_out` of the inner bijector must be equal, and less than or equal to `event_ndims`. If they are less than `event_ndims`, the remaining dimensions will be converted to event dimensions using `distrax.Block`. swap: by default, the part of the input up to `split_index` is the one that remains unchanged. If `swap` is True, then the other part remains unchanged and the first one is transformed instead. split_axis: the axis along which to split the input. Must be negative, that is, it must index from the end. By default, it's the last axis. """ if split_index < 0: raise ValueError( f'The split index must be non-negative; got {split_index}.') if split_axis >= 0: raise ValueError(f'The split axis must be negative; got {split_axis}.') if event_ndims < 0: raise ValueError( f'`event_ndims` must be non-negative; got {event_ndims}.') if split_axis < -event_ndims: raise ValueError( f'The split axis points to an axis outside the event. With ' f'`event_ndims == {event_ndims}`, the split axis must be between -1 ' f'and {-event_ndims}. Got `split_axis == {split_axis}`.') self._split_index = split_index self._conditioner = conditioner self._bijector = bijector self._swap = swap self._split_axis = split_axis super().__init__(event_ndims_in=event_ndims)
(self, split_index: int, event_ndims: int, conditioner: Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Any], bijector: Callable[[Any], Union[distrax._src.bijectors.bijector.Bijector, tensorflow_probability.substrates.jax.bijectors.bijector.Bijector, Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]]], swap: bool = False, split_axis: int = -1)
56,340
distrax._src.bijectors.split_coupling
_inner_bijector
Returns an inner bijector for the passed params.
def _inner_bijector(self, params: BijectorParams) -> base.Bijector: """Returns an inner bijector for the passed params.""" bijector = conversion.as_bijector(self._bijector(params)) if bijector.event_ndims_in != bijector.event_ndims_out: raise ValueError( f'The inner bijector must have `event_ndims_in==event_ndims_out`. ' f'Instead, it has `event_ndims_in=={bijector.event_ndims_in}` and ' f'`event_ndims_out=={bijector.event_ndims_out}`.') extra_ndims = self.event_ndims_in - bijector.event_ndims_in if extra_ndims < 0: raise ValueError( f'The inner bijector can\'t have more event dimensions than the ' f'coupling bijector. Got {bijector.event_ndims_in} for the inner ' f'bijector and {self.event_ndims_in} for the coupling bijector.') elif extra_ndims > 0: bijector = block.Block(bijector, extra_ndims) return bijector
(self, params: Any) -> distrax._src.bijectors.bijector.Bijector
56,341
distrax._src.bijectors.split_coupling
_recombine
null
def _recombine(self, x1: Array, x2: Array) -> Array: if self._swap: x1, x2 = x2, x1 return jnp.concatenate([x1, x2], self._split_axis)
(self, x1: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], x2: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,342
distrax._src.bijectors.split_coupling
_split
null
def _split(self, x: Array) -> Tuple[Array, Array]: x1, x2 = jnp.split(x, [self._split_index], self._split_axis) if self._swap: x1, x2 = x2, x1 return x1, x2
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
56,344
distrax._src.bijectors.split_coupling
forward_and_log_det
Computes y = f(x) and log|det J(f)(x)|.
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]: """Computes y = f(x) and log|det J(f)(x)|.""" self._check_forward_input_shape(x) x1, x2 = self._split(x) params = self._conditioner(x1) y2, logdet = self._inner_bijector(params).forward_and_log_det(x2) return self._recombine(x1, y2), logdet
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
56,347
distrax._src.bijectors.split_coupling
inverse_and_log_det
Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]: """Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.""" self._check_inverse_input_shape(y) y1, y2 = self._split(y) params = self._conditioner(y1) x2, logdet = self._inner_bijector(params).inverse_and_log_det(y2) return self._recombine(y1, x2), logdet
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
56,351
distrax._src.bijectors.tanh
Tanh
A bijector that computes the hyperbolic tangent. The log-determinant implementation in this bijector is more numerically stable than relying on the automatic differentiation approach used by Lambda, so this bijector should be preferred over Lambda(jnp.tanh) where possible. See `tfp.bijectors.Tanh` for details. When the absolute value of the input is large, `Tanh` becomes close to a constant, so that it is not possible to recover the input `x` from the output `y` within machine precision. In cases where it is needed to compute both the forward mapping and the backward mapping one after the other to recover the original input `x`, it is the user's responsibility to simplify the operation to avoid numerical issues; this is unlike the `tfp.bijectors.Tanh`. One example of such case is to use the bijector within a `Transformed` distribution and to obtain the log-probability of samples obtained from the distribution's `sample` method. For values of the samples for which it is not possible to apply the inverse bijector accurately, `log_prob` returns NaN. This can be avoided by using `sample_and_log_prob` instead of `sample` followed by `log_prob`.
class Tanh(base.Bijector): """A bijector that computes the hyperbolic tangent. The log-determinant implementation in this bijector is more numerically stable than relying on the automatic differentiation approach used by Lambda, so this bijector should be preferred over Lambda(jnp.tanh) where possible. See `tfp.bijectors.Tanh` for details. When the absolute value of the input is large, `Tanh` becomes close to a constant, so that it is not possible to recover the input `x` from the output `y` within machine precision. In cases where it is needed to compute both the forward mapping and the backward mapping one after the other to recover the original input `x`, it is the user's responsibility to simplify the operation to avoid numerical issues; this is unlike the `tfp.bijectors.Tanh`. One example of such case is to use the bijector within a `Transformed` distribution and to obtain the log-probability of samples obtained from the distribution's `sample` method. For values of the samples for which it is not possible to apply the inverse bijector accurately, `log_prob` returns NaN. This can be avoided by using `sample_and_log_prob` instead of `sample` followed by `log_prob`. """ def __init__(self): """Initializes a Tanh bijector.""" super().__init__(event_ndims_in=0) def forward_log_det_jacobian(self, x: Array) -> Array: """Computes log|det J(f)(x)|.""" return 2 * (jnp.log(2) - x - jax.nn.softplus(-2 * x)) def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]: """Computes y = f(x) and log|det J(f)(x)|.""" return jnp.tanh(x), self.forward_log_det_jacobian(x) def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]: """Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.""" x = jnp.arctanh(y) return x, -self.forward_log_det_jacobian(x) def same_as(self, other: base.Bijector) -> bool: """Returns True if this bijector is guaranteed to be the same as `other`.""" return type(other) is Tanh # pylint: disable=unidiomatic-typecheck
()
56,352
distrax._src.bijectors.tanh
__init__
Initializes a Tanh bijector.
def __init__(self): """Initializes a Tanh bijector.""" super().__init__(event_ndims_in=0)
(self)
56,357
distrax._src.bijectors.tanh
forward_and_log_det
Computes y = f(x) and log|det J(f)(x)|.
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]: """Computes y = f(x) and log|det J(f)(x)|.""" return jnp.tanh(x), self.forward_log_det_jacobian(x)
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
56,358
distrax._src.bijectors.tanh
forward_log_det_jacobian
Computes log|det J(f)(x)|.
def forward_log_det_jacobian(self, x: Array) -> Array: """Computes log|det J(f)(x)|.""" return 2 * (jnp.log(2) - x - jax.nn.softplus(-2 * x))
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,360
distrax._src.bijectors.tanh
inverse_and_log_det
Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]: """Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.""" x = jnp.arctanh(y) return x, -self.forward_log_det_jacobian(x)
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
56,362
distrax._src.bijectors.tanh
same_as
Returns True if this bijector is guaranteed to be the same as `other`.
def same_as(self, other: base.Bijector) -> bool: """Returns True if this bijector is guaranteed to be the same as `other`.""" return type(other) is Tanh # pylint: disable=unidiomatic-typecheck
(self, other: distrax._src.bijectors.bijector.Bijector) -> bool
56,364
distrax._src.distributions.transformed
Transformed
Distribution of a random variable transformed by a bijective function. Let `X` be a continuous random variable and `Y = f(X)` be a random variable transformed by a differentiable bijection `f` (a "bijector"). Given the distribution of `X` (the "base distribution") and the bijector `f`, this class implements the distribution of `Y` (also known as the pushforward of the base distribution through `f`). The probability density of `Y` can be computed by: `log p(y) = log p(x) - log|det J(f)(x)|` where `p(x)` is the probability density of `X` (the "base density") and `J(f)(x)` is the Jacobian matrix of `f`, both evaluated at `x = f^{-1}(y)`. Sampling from a Transformed distribution involves two steps: sampling from the base distribution `x ~ p(x)` and then evaluating `y = f(x)`. The first step is agnostic to the possible batch dimensions of the bijector `f(x)`. For example: ``` dist = distrax.Normal(loc=0., scale=1.) bij = distrax.ScalarAffine(shift=jnp.asarray([3., 3., 3.])) transformed_dist = distrax.Transformed(distribution=dist, bijector=bij) samples = transformed_dist.sample(seed=0, sample_shape=()) print(samples) # [2.7941577, 2.7941577, 2.7941577] ``` Note: the `batch_shape`, `event_shape`, and `dtype` properties of the transformed distribution, as well as the `kl_divergence` method, are computed on-demand via JAX tracing when requested. This assumes that the `forward` function of the bijector is traceable; that is, it is a pure function that does not contain run-time branching. Functions that do not strictly meet this requirement can still be used, but we cannot guarantee that the shapes, dtype, and KL computations involving the transformed distribution can be correctly obtained.
class Transformed(dist_base.Distribution): """Distribution of a random variable transformed by a bijective function. Let `X` be a continuous random variable and `Y = f(X)` be a random variable transformed by a differentiable bijection `f` (a "bijector"). Given the distribution of `X` (the "base distribution") and the bijector `f`, this class implements the distribution of `Y` (also known as the pushforward of the base distribution through `f`). The probability density of `Y` can be computed by: `log p(y) = log p(x) - log|det J(f)(x)|` where `p(x)` is the probability density of `X` (the "base density") and `J(f)(x)` is the Jacobian matrix of `f`, both evaluated at `x = f^{-1}(y)`. Sampling from a Transformed distribution involves two steps: sampling from the base distribution `x ~ p(x)` and then evaluating `y = f(x)`. The first step is agnostic to the possible batch dimensions of the bijector `f(x)`. For example: ``` dist = distrax.Normal(loc=0., scale=1.) bij = distrax.ScalarAffine(shift=jnp.asarray([3., 3., 3.])) transformed_dist = distrax.Transformed(distribution=dist, bijector=bij) samples = transformed_dist.sample(seed=0, sample_shape=()) print(samples) # [2.7941577, 2.7941577, 2.7941577] ``` Note: the `batch_shape`, `event_shape`, and `dtype` properties of the transformed distribution, as well as the `kl_divergence` method, are computed on-demand via JAX tracing when requested. This assumes that the `forward` function of the bijector is traceable; that is, it is a pure function that does not contain run-time branching. Functions that do not strictly meet this requirement can still be used, but we cannot guarantee that the shapes, dtype, and KL computations involving the transformed distribution can be correctly obtained. """ equiv_tfp_cls = tfd.TransformedDistribution def __init__(self, distribution: DistributionLike, bijector: BijectorLike): """Initializes a Transformed distribution. Args: distribution: the base distribution. Can be either a Distrax distribution or a TFP distribution. bijector: a differentiable bijective transformation. Can be a Distrax bijector, a TFP bijector, or a callable to be wrapped by `Lambda`. """ super().__init__() distribution = conversion.as_distribution(distribution) bijector = conversion.as_bijector(bijector) event_shape = distribution.event_shape # Check if event shape is a tuple of integers (i.e. not nested). if not (isinstance(event_shape, tuple) and all(isinstance(i, int) for i in event_shape)): raise ValueError( f"'Transformed' currently only supports distributions with Array " f"events (i.e. not nested). Received '{distribution.name}' with " f"event shape '{distribution.event_shape}'.") if len(event_shape) != bijector.event_ndims_in: raise ValueError( f"Base distribution '{distribution.name}' has event shape " f"{distribution.event_shape}, but bijector '{bijector.name}' expects " f"events to have {bijector.event_ndims_in} dimensions. Perhaps use " f"`distrax.Block` or `distrax.Independent`?") self._distribution = distribution self._bijector = bijector self._batch_shape = None self._event_shape = None self._dtype = None @property def distribution(self): """The base distribution.""" return self._distribution @property def bijector(self): """The bijector representing the transformation.""" return self._bijector def _infer_shapes_and_dtype(self): """Infer the batch shape, event shape, and dtype by tracing `forward`.""" dummy_shape = self.distribution.batch_shape + self.distribution.event_shape dummy = jnp.zeros(dummy_shape, dtype=self.distribution.dtype) shape_dtype = jax.eval_shape(self.bijector.forward, dummy) self._dtype = shape_dtype.dtype if self.bijector.event_ndims_out == 0: self._event_shape = () self._batch_shape = shape_dtype.shape else: # pylint: disable-next=invalid-unary-operand-type self._event_shape = shape_dtype.shape[-self.bijector.event_ndims_out:] # pylint: disable-next=invalid-unary-operand-type self._batch_shape = shape_dtype.shape[:-self.bijector.event_ndims_out] @property def dtype(self) -> jnp.dtype: """See `Distribution.dtype`.""" if self._dtype is None: self._infer_shapes_and_dtype() assert self._dtype is not None # By _infer_shapes_and_dtype() return self._dtype @property def event_shape(self) -> Tuple[int, ...]: """See `Distribution.event_shape`.""" if self._event_shape is None: self._infer_shapes_and_dtype() assert self._event_shape is not None # By _infer_shapes_and_dtype() return self._event_shape @property def batch_shape(self) -> Tuple[int, ...]: """See `Distribution.batch_shape`.""" if self._batch_shape is None: self._infer_shapes_and_dtype() assert self._batch_shape is not None # By _infer_shapes_and_dtype() return self._batch_shape def log_prob(self, value: EventT) -> Array: """See `Distribution.log_prob`.""" x, ildj_y = self.bijector.inverse_and_log_det(value) lp_x = self.distribution.log_prob(x) lp_y = lp_x + ildj_y return lp_y def _sample_n(self, key: PRNGKey, n: int) -> Array: """Returns `n` samples.""" x = self.distribution.sample(seed=key, sample_shape=n) y = jax.vmap(self.bijector.forward)(x) return y def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]: """Returns `n` samples and their log probs. This function is more efficient than calling `sample` and `log_prob` separately, because it uses only the forward methods of the bijector. It also works for bijectors that don't implement inverse methods. Args: key: PRNG key. n: Number of samples to generate. Returns: A tuple of `n` samples and their log probs. """ x, lp_x = self.distribution.sample_and_log_prob(seed=key, sample_shape=n) y, fldj = jax.vmap(self.bijector.forward_and_log_det)(x) lp_y = jax.vmap(jnp.subtract)(lp_x, fldj) return y, lp_y def mean(self) -> Array: """Calculates the mean.""" if self.bijector.is_constant_jacobian: return self.bijector.forward(self.distribution.mean()) else: raise NotImplementedError( "`mean` is not implemented for this transformed distribution, " "because its bijector's Jacobian is not known to be constant.") def mode(self) -> Array: """Calculates the mode.""" if self.bijector.is_constant_log_det: return self.bijector.forward(self.distribution.mode()) else: raise NotImplementedError( "`mode` is not implemented for this transformed distribution, " "because its bijector's Jacobian determinant is not known to be " "constant.") def entropy( # pylint: disable=arguments-differ self, input_hint: Optional[Array] = None) -> Array: """Calculates the Shannon entropy (in Nats). Only works for bijectors with constant Jacobian determinant. Args: input_hint: an example sample from the base distribution, used to compute the constant forward log-determinant. If not specified, it is computed using a zero array of the shape and dtype of a sample from the base distribution. Returns: the entropy of the distribution. Raises: NotImplementedError: if bijector's Jacobian determinant is not known to be constant. """ if self.bijector.is_constant_log_det: if input_hint is None: shape = self.distribution.batch_shape + self.distribution.event_shape input_hint = jnp.zeros(shape, dtype=self.distribution.dtype) entropy = self.distribution.entropy() fldj = self.bijector.forward_log_det_jacobian(input_hint) return entropy + fldj else: raise NotImplementedError( "`entropy` is not implemented for this transformed distribution, " "because its bijector's Jacobian determinant is not known to be " "constant.")
(distribution: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution], bijector: Union[distrax._src.bijectors.bijector.Bijector, tensorflow_probability.substrates.jax.bijectors.bijector.Bijector, Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]])
56,366
distrax._src.distributions.transformed
__init__
Initializes a Transformed distribution. Args: distribution: the base distribution. Can be either a Distrax distribution or a TFP distribution. bijector: a differentiable bijective transformation. Can be a Distrax bijector, a TFP bijector, or a callable to be wrapped by `Lambda`.
def __init__(self, distribution: DistributionLike, bijector: BijectorLike): """Initializes a Transformed distribution. Args: distribution: the base distribution. Can be either a Distrax distribution or a TFP distribution. bijector: a differentiable bijective transformation. Can be a Distrax bijector, a TFP bijector, or a callable to be wrapped by `Lambda`. """ super().__init__() distribution = conversion.as_distribution(distribution) bijector = conversion.as_bijector(bijector) event_shape = distribution.event_shape # Check if event shape is a tuple of integers (i.e. not nested). if not (isinstance(event_shape, tuple) and all(isinstance(i, int) for i in event_shape)): raise ValueError( f"'Transformed' currently only supports distributions with Array " f"events (i.e. not nested). Received '{distribution.name}' with " f"event shape '{distribution.event_shape}'.") if len(event_shape) != bijector.event_ndims_in: raise ValueError( f"Base distribution '{distribution.name}' has event shape " f"{distribution.event_shape}, but bijector '{bijector.name}' expects " f"events to have {bijector.event_ndims_in} dimensions. Perhaps use " f"`distrax.Block` or `distrax.Independent`?") self._distribution = distribution self._bijector = bijector self._batch_shape = None self._event_shape = None self._dtype = None
(self, distribution: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution], bijector: Union[distrax._src.bijectors.bijector.Bijector, tensorflow_probability.substrates.jax.bijectors.bijector.Bijector, Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]])
56,379
distrax._src.distributions.transformed
mean
Calculates the mean.
def mean(self) -> Array: """Calculates the mean.""" if self.bijector.is_constant_jacobian: return self.bijector.forward(self.distribution.mean()) else: raise NotImplementedError( "`mean` is not implemented for this transformed distribution, " "because its bijector's Jacobian is not known to be constant.")
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,381
distrax._src.distributions.transformed
mode
Calculates the mode.
def mode(self) -> Array: """Calculates the mode.""" if self.bijector.is_constant_log_det: return self.bijector.forward(self.distribution.mode()) else: raise NotImplementedError( "`mode` is not implemented for this transformed distribution, " "because its bijector's Jacobian determinant is not known to be " "constant.")
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,389
distrax._src.bijectors.triangular_linear
TriangularLinear
A linear bijector whose weight matrix is triangular. The bijector is defined as `f(x) = Ax` where `A` is a DxD triangular matrix. The Jacobian determinant can be computed in O(D) as follows: log|det J(x)| = log|det A| = sum(log|diag(A)|) The inverse is computed in O(D^2) by solving the triangular system `Ax = y`. The bijector is invertible if and only if all diagonal elements of `A` are non-zero. It is the responsibility of the user to make sure that this is the case; the class will make no attempt to verify that the bijector is invertible.
class TriangularLinear(linear.Linear): """A linear bijector whose weight matrix is triangular. The bijector is defined as `f(x) = Ax` where `A` is a DxD triangular matrix. The Jacobian determinant can be computed in O(D) as follows: log|det J(x)| = log|det A| = sum(log|diag(A)|) The inverse is computed in O(D^2) by solving the triangular system `Ax = y`. The bijector is invertible if and only if all diagonal elements of `A` are non-zero. It is the responsibility of the user to make sure that this is the case; the class will make no attempt to verify that the bijector is invertible. """ def __init__(self, matrix: Array, is_lower: bool = True): """Initializes a `TriangularLinear` bijector. Args: matrix: a square matrix whose triangular part defines `A`. Can also be a batch of matrices. Whether `A` is the lower or upper triangular part of `matrix` is determined by `is_lower`. is_lower: if True, `A` is set to the lower triangular part of `matrix`. If False, `A` is set to the upper triangular part of `matrix`. """ if matrix.ndim < 2: raise ValueError(f"`matrix` must have at least 2 dimensions, got" f" {matrix.ndim}.") if matrix.shape[-2] != matrix.shape[-1]: raise ValueError(f"`matrix` must be square; instead, it has shape" f" {matrix.shape[-2:]}.") super().__init__( event_dims=matrix.shape[-1], batch_shape=matrix.shape[:-2], dtype=matrix.dtype) self._matrix = jnp.tril(matrix) if is_lower else jnp.triu(matrix) self._is_lower = is_lower triangular_logdet = jnp.vectorize(_triangular_logdet, signature="(m,m)->()") self._logdet = triangular_logdet(self._matrix) @property def matrix(self) -> Array: """The triangular matrix `A` of the transformation.""" return self._matrix @property def is_lower(self) -> bool: """True if `A` is lower triangular, False if upper triangular.""" return self._is_lower def forward(self, x: Array) -> Array: """Computes y = f(x).""" self._check_forward_input_shape(x) batched = jnp.vectorize(_forward_unbatched, signature="(m),(m,m)->(m)") return batched(x, self._matrix) def forward_log_det_jacobian(self, x: Array) -> Array: """Computes log|det J(f)(x)|.""" self._check_forward_input_shape(x) batch_shape = jax.lax.broadcast_shapes(self.batch_shape, x.shape[:-1]) return jnp.broadcast_to(self._logdet, batch_shape) def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]: """Computes y = f(x) and log|det J(f)(x)|.""" return self.forward(x), self.forward_log_det_jacobian(x) def inverse(self, y: Array) -> Array: """Computes x = f^{-1}(y).""" self._check_inverse_input_shape(y) batched = jnp.vectorize( functools.partial(_inverse_unbatched, is_lower=self._is_lower), signature="(m),(m,m)->(m)") return batched(y, self._matrix) def inverse_log_det_jacobian(self, y: Array) -> Array: """Computes log|det J(f^{-1})(y)|.""" return -self.forward_log_det_jacobian(y) def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]: """Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.""" return self.inverse(y), self.inverse_log_det_jacobian(y) def same_as(self, other: base.Bijector) -> bool: """Returns True if this bijector is guaranteed to be the same as `other`.""" if type(other) is TriangularLinear: # pylint: disable=unidiomatic-typecheck return all(( self.matrix is other.matrix, self.is_lower is other.is_lower, )) return False
(matrix: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], is_lower: bool = True)
56,390
distrax._src.bijectors.triangular_linear
__init__
Initializes a `TriangularLinear` bijector. Args: matrix: a square matrix whose triangular part defines `A`. Can also be a batch of matrices. Whether `A` is the lower or upper triangular part of `matrix` is determined by `is_lower`. is_lower: if True, `A` is set to the lower triangular part of `matrix`. If False, `A` is set to the upper triangular part of `matrix`.
def __init__(self, matrix: Array, is_lower: bool = True): """Initializes a `TriangularLinear` bijector. Args: matrix: a square matrix whose triangular part defines `A`. Can also be a batch of matrices. Whether `A` is the lower or upper triangular part of `matrix` is determined by `is_lower`. is_lower: if True, `A` is set to the lower triangular part of `matrix`. If False, `A` is set to the upper triangular part of `matrix`. """ if matrix.ndim < 2: raise ValueError(f"`matrix` must have at least 2 dimensions, got" f" {matrix.ndim}.") if matrix.shape[-2] != matrix.shape[-1]: raise ValueError(f"`matrix` must be square; instead, it has shape" f" {matrix.shape[-2:]}.") super().__init__( event_dims=matrix.shape[-1], batch_shape=matrix.shape[:-2], dtype=matrix.dtype) self._matrix = jnp.tril(matrix) if is_lower else jnp.triu(matrix) self._is_lower = is_lower triangular_logdet = jnp.vectorize(_triangular_logdet, signature="(m,m)->()") self._logdet = triangular_logdet(self._matrix)
(self, matrix: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], is_lower: bool = True)
56,394
distrax._src.bijectors.triangular_linear
forward
Computes y = f(x).
def forward(self, x: Array) -> Array: """Computes y = f(x).""" self._check_forward_input_shape(x) batched = jnp.vectorize(_forward_unbatched, signature="(m),(m,m)->(m)") return batched(x, self._matrix)
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,396
distrax._src.bijectors.triangular_linear
forward_log_det_jacobian
Computes log|det J(f)(x)|.
def forward_log_det_jacobian(self, x: Array) -> Array: """Computes log|det J(f)(x)|.""" self._check_forward_input_shape(x) batch_shape = jax.lax.broadcast_shapes(self.batch_shape, x.shape[:-1]) return jnp.broadcast_to(self._logdet, batch_shape)
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,397
distrax._src.bijectors.triangular_linear
inverse
Computes x = f^{-1}(y).
def inverse(self, y: Array) -> Array: """Computes x = f^{-1}(y).""" self._check_inverse_input_shape(y) batched = jnp.vectorize( functools.partial(_inverse_unbatched, is_lower=self._is_lower), signature="(m),(m,m)->(m)") return batched(y, self._matrix)
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,399
distrax._src.bijectors.triangular_linear
inverse_log_det_jacobian
Computes log|det J(f^{-1})(y)|.
def inverse_log_det_jacobian(self, y: Array) -> Array: """Computes log|det J(f^{-1})(y)|.""" return -self.forward_log_det_jacobian(y)
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,400
distrax._src.bijectors.triangular_linear
same_as
Returns True if this bijector is guaranteed to be the same as `other`.
def same_as(self, other: base.Bijector) -> bool: """Returns True if this bijector is guaranteed to be the same as `other`.""" if type(other) is TriangularLinear: # pylint: disable=unidiomatic-typecheck return all(( self.matrix is other.matrix, self.is_lower is other.is_lower, )) return False
(self, other: distrax._src.bijectors.bijector.Bijector) -> bool
56,402
distrax._src.bijectors.unconstrained_affine
UnconstrainedAffine
An unconstrained affine bijection. This bijector is a linear-plus-bias transformation `f(x) = Ax + b`, where `A` is a `D x D` square matrix and `b` is a `D`-dimensional vector. The bijector is invertible if and only if `A` is an invertible matrix. It is the responsibility of the user to make sure that this is the case; the class will make no attempt to verify that the bijector is invertible. The Jacobian determinant is equal to `det(A)`. The inverse is computed by solving the linear system `Ax = y - b`. WARNING: Both the determinant and the inverse cost `O(D^3)` to compute. Thus, this bijector is recommended only for small `D`.
class UnconstrainedAffine(base.Bijector): """An unconstrained affine bijection. This bijector is a linear-plus-bias transformation `f(x) = Ax + b`, where `A` is a `D x D` square matrix and `b` is a `D`-dimensional vector. The bijector is invertible if and only if `A` is an invertible matrix. It is the responsibility of the user to make sure that this is the case; the class will make no attempt to verify that the bijector is invertible. The Jacobian determinant is equal to `det(A)`. The inverse is computed by solving the linear system `Ax = y - b`. WARNING: Both the determinant and the inverse cost `O(D^3)` to compute. Thus, this bijector is recommended only for small `D`. """ def __init__(self, matrix: Array, bias: Array): """Initializes an `UnconstrainedAffine` bijector. Args: matrix: the matrix `A` in `Ax + b`. Must be square and invertible. Can also be a batch of matrices. bias: the vector `b` in `Ax + b`. Can also be a batch of vectors. """ check_affine_parameters(matrix, bias) super().__init__(event_ndims_in=1, is_constant_jacobian=True) self._batch_shape = jnp.broadcast_shapes(matrix.shape[:-2], bias.shape[:-1]) self._matrix = matrix self._bias = bias self._logdet = jnp.linalg.slogdet(matrix)[1] @property def matrix(self) -> Array: """The matrix `A` of the transformation.""" return self._matrix @property def bias(self) -> Array: """The shift `b` of the transformation.""" return self._bias def forward(self, x: Array) -> Array: """Computes y = f(x).""" self._check_forward_input_shape(x) def unbatched(single_x, matrix, bias): return matrix @ single_x + bias batched = jnp.vectorize(unbatched, signature="(m),(m,m),(m)->(m)") return batched(x, self._matrix, self._bias) def forward_log_det_jacobian(self, x: Array) -> Array: """Computes log|det J(f)(x)|.""" self._check_forward_input_shape(x) batch_shape = jax.lax.broadcast_shapes(self._batch_shape, x.shape[:-1]) return jnp.broadcast_to(self._logdet, batch_shape) def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]: """Computes y = f(x) and log|det J(f)(x)|.""" return self.forward(x), self.forward_log_det_jacobian(x) def inverse(self, y: Array) -> Array: """Computes x = f^{-1}(y).""" self._check_inverse_input_shape(y) def unbatched(single_y, matrix, bias): return jnp.linalg.solve(matrix, single_y - bias) batched = jnp.vectorize(unbatched, signature="(m),(m,m),(m)->(m)") return batched(y, self._matrix, self._bias) def inverse_log_det_jacobian(self, y: Array) -> Array: """Computes log|det J(f^{-1})(y)|.""" return -self.forward_log_det_jacobian(y) def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]: """Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|.""" return self.inverse(y), self.inverse_log_det_jacobian(y) def same_as(self, other: base.Bijector) -> bool: """Returns True if this bijector is guaranteed to be the same as `other`.""" if type(other) is UnconstrainedAffine: # pylint: disable=unidiomatic-typecheck return all(( self.matrix is other.matrix, self.bias is other.bias, )) return False
(matrix: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], bias: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number])
56,403
distrax._src.bijectors.unconstrained_affine
__init__
Initializes an `UnconstrainedAffine` bijector. Args: matrix: the matrix `A` in `Ax + b`. Must be square and invertible. Can also be a batch of matrices. bias: the vector `b` in `Ax + b`. Can also be a batch of vectors.
def __init__(self, matrix: Array, bias: Array): """Initializes an `UnconstrainedAffine` bijector. Args: matrix: the matrix `A` in `Ax + b`. Must be square and invertible. Can also be a batch of matrices. bias: the vector `b` in `Ax + b`. Can also be a batch of vectors. """ check_affine_parameters(matrix, bias) super().__init__(event_ndims_in=1, is_constant_jacobian=True) self._batch_shape = jnp.broadcast_shapes(matrix.shape[:-2], bias.shape[:-1]) self._matrix = matrix self._bias = bias self._logdet = jnp.linalg.slogdet(matrix)[1]
(self, matrix: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], bias: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number])
56,407
distrax._src.bijectors.unconstrained_affine
forward
Computes y = f(x).
def forward(self, x: Array) -> Array: """Computes y = f(x).""" self._check_forward_input_shape(x) def unbatched(single_x, matrix, bias): return matrix @ single_x + bias batched = jnp.vectorize(unbatched, signature="(m),(m,m),(m)->(m)") return batched(x, self._matrix, self._bias)
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,409
distrax._src.bijectors.unconstrained_affine
forward_log_det_jacobian
Computes log|det J(f)(x)|.
def forward_log_det_jacobian(self, x: Array) -> Array: """Computes log|det J(f)(x)|.""" self._check_forward_input_shape(x) batch_shape = jax.lax.broadcast_shapes(self._batch_shape, x.shape[:-1]) return jnp.broadcast_to(self._logdet, batch_shape)
(self, x: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,410
distrax._src.bijectors.unconstrained_affine
inverse
Computes x = f^{-1}(y).
def inverse(self, y: Array) -> Array: """Computes x = f^{-1}(y).""" self._check_inverse_input_shape(y) def unbatched(single_y, matrix, bias): return jnp.linalg.solve(matrix, single_y - bias) batched = jnp.vectorize(unbatched, signature="(m),(m,m),(m)->(m)") return batched(y, self._matrix, self._bias)
(self, y: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,413
distrax._src.bijectors.unconstrained_affine
same_as
Returns True if this bijector is guaranteed to be the same as `other`.
def same_as(self, other: base.Bijector) -> bool: """Returns True if this bijector is guaranteed to be the same as `other`.""" if type(other) is UnconstrainedAffine: # pylint: disable=unidiomatic-typecheck return all(( self.matrix is other.matrix, self.bias is other.bias, )) return False
(self, other: distrax._src.bijectors.bijector.Bijector) -> bool
56,415
distrax._src.distributions.uniform
Uniform
Uniform distribution with `low` and `high` parameters.
class Uniform(distribution.Distribution): """Uniform distribution with `low` and `high` parameters.""" equiv_tfp_cls = tfd.Uniform def __init__(self, low: Numeric = 0., high: Numeric = 1.): """Initializes a Uniform distribution. Args: low: Lower bound. high: Upper bound. """ super().__init__() self._low = conversion.as_float_array(low) self._high = conversion.as_float_array(high) self._batch_shape = jax.lax.broadcast_shapes( self._low.shape, self._high.shape) @property def event_shape(self) -> Tuple[int, ...]: """Shape of the events.""" return () @property def low(self) -> Array: """Lower bound.""" return jnp.broadcast_to(self._low, self.batch_shape) @property def high(self) -> Array: """Upper bound.""" return jnp.broadcast_to(self._high, self.batch_shape) @property def range(self) -> Array: return self.high - self.low @property def batch_shape(self) -> Tuple[int, ...]: return self._batch_shape def _sample_n(self, key: PRNGKey, n: int) -> Array: """See `Distribution._sample_n`.""" new_shape = (n,) + self.batch_shape uniform = jax.random.uniform( key=key, shape=new_shape, dtype=self.range.dtype, minval=0., maxval=1.) low = jnp.expand_dims(self._low, range(uniform.ndim - self._low.ndim)) range_ = jnp.expand_dims(self.range, range(uniform.ndim - self.range.ndim)) return low + range_ * uniform def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]: """See `Distribution._sample_n_and_log_prob`.""" samples = self._sample_n(key, n) log_prob = -jnp.log(self.range) log_prob = jnp.repeat(log_prob[None], n, axis=0) return samples, log_prob def log_prob(self, value: EventT) -> Array: """See `Distribution.log_prob`.""" return jnp.log(self.prob(value)) def prob(self, value: EventT) -> Array: """See `Distribution.prob`.""" return jnp.where( jnp.logical_or(value < self.low, value > self.high), jnp.zeros_like(value), jnp.ones_like(value) / self.range) def entropy(self) -> Array: """Calculates the entropy.""" return jnp.log(self.range) def mean(self) -> Array: """Calculates the mean.""" return (self.low + self.high) / 2. def variance(self) -> Array: """Calculates the variance.""" return jnp.square(self.range) / 12. def stddev(self) -> Array: """Calculates the standard deviation.""" return self.range / math.sqrt(12.) def median(self) -> Array: """Calculates the median.""" return self.mean() def cdf(self, value: EventT) -> Array: """See `Distribution.cdf`.""" ones = jnp.ones_like(self.range) zeros = jnp.zeros_like(ones) result_if_not_big = jnp.where( value < self.low, zeros, (value - self.low) / self.range) return jnp.where(value > self.high, ones, result_if_not_big) def log_cdf(self, value: EventT) -> Array: """See `Distribution.log_cdf`.""" return jnp.log(self.cdf(value)) def __getitem__(self, index) -> 'Uniform': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return Uniform(low=self.low[index], high=self.high[index])
(low: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int] = 0.0, high: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int] = 1.0)
56,416
distrax._src.distributions.uniform
__getitem__
See `Distribution.__getitem__`.
def __getitem__(self, index) -> 'Uniform': """See `Distribution.__getitem__`.""" index = distribution.to_batch_shape_index(self.batch_shape, index) return Uniform(low=self.low[index], high=self.high[index])
(self, index) -> distrax._src.distributions.uniform.Uniform
56,417
distrax._src.distributions.uniform
__init__
Initializes a Uniform distribution. Args: low: Lower bound. high: Upper bound.
def __init__(self, low: Numeric = 0., high: Numeric = 1.): """Initializes a Uniform distribution. Args: low: Lower bound. high: Upper bound. """ super().__init__() self._low = conversion.as_float_array(low) self._high = conversion.as_float_array(high) self._batch_shape = jax.lax.broadcast_shapes( self._low.shape, self._high.shape)
(self, low: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int] = 0.0, high: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int] = 1.0)
56,420
distrax._src.distributions.uniform
_sample_n
See `Distribution._sample_n`.
def _sample_n(self, key: PRNGKey, n: int) -> Array: """See `Distribution._sample_n`.""" new_shape = (n,) + self.batch_shape uniform = jax.random.uniform( key=key, shape=new_shape, dtype=self.range.dtype, minval=0., maxval=1.) low = jnp.expand_dims(self._low, range(uniform.ndim - self._low.ndim)) range_ = jnp.expand_dims(self.range, range(uniform.ndim - self.range.ndim)) return low + range_ * uniform
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,421
distrax._src.distributions.uniform
_sample_n_and_log_prob
See `Distribution._sample_n_and_log_prob`.
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]: """See `Distribution._sample_n_and_log_prob`.""" samples = self._sample_n(key, n) log_prob = -jnp.log(self.range) log_prob = jnp.repeat(log_prob[None], n, axis=0) return samples, log_prob
(self, key: jax.Array, n: int) -> Tuple[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]
56,422
distrax._src.distributions.uniform
cdf
See `Distribution.cdf`.
def cdf(self, value: EventT) -> Array: """See `Distribution.cdf`.""" ones = jnp.ones_like(self.range) zeros = jnp.zeros_like(ones) result_if_not_big = jnp.where( value < self.low, zeros, (value - self.low) / self.range) return jnp.where(value > self.high, ones, result_if_not_big)
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,424
distrax._src.distributions.uniform
entropy
Calculates the entropy.
def entropy(self) -> Array: """Calculates the entropy.""" return jnp.log(self.range)
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,429
distrax._src.distributions.uniform
mean
Calculates the mean.
def mean(self) -> Array: """Calculates the mean.""" return (self.low + self.high) / 2.
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,432
distrax._src.distributions.uniform
prob
See `Distribution.prob`.
def prob(self, value: EventT) -> Array: """See `Distribution.prob`.""" return jnp.where( jnp.logical_or(value < self.low, value > self.high), jnp.zeros_like(value), jnp.ones_like(value) / self.range)
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,435
distrax._src.distributions.uniform
stddev
Calculates the standard deviation.
def stddev(self) -> Array: """Calculates the standard deviation.""" return self.range / math.sqrt(12.)
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,438
distrax._src.distributions.uniform
variance
Calculates the variance.
def variance(self) -> Array: """Calculates the variance.""" return jnp.square(self.range) / 12.
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,439
distrax._src.distributions.von_mises
VonMises
The von Mises distribution over angles. The von Mises distribution is a distribution over angles. It is the maximum entropy distribution on the space of angles, given a circular mean and a circular variance. In this implementation, the distribution is defined over the range [-pi, pi), with all samples in this interval and the CDF is constant outside this interval. Do note that the prob and log_prob also accept values outside of [-pi, pi) and will return values as if they are inside the interval. When `concentration=0`, this distribution becomes the uniform distribution over the interval [-pi, pi). When the concentration goes to infinity, this distribution approximates a Normal distribution. #### Details The probability density function (pdf) of this distribution is, ```none pdf(x; loc, concentration) = exp(concentration * cos(x - loc)) / (2 * pi * I_0 (concentration)) ``` where: * `I_0` is the zeroth order modified Bessel function; * `loc` the circular mean of the distribution, a scalar in radians. It can take arbitrary values, also outside of [-pi, pi). * `concentration >= 0` is the concentration parameter. It is the analogue to 1/sigma of the Normal distribution. #### Examples Examples of initialization of this distribution. ```python # Create a batch of two von Mises distributions. dist = distrax.VonMises(loc=[1.0, 2.0], concentration=[3.0, 4.0]) dist.sample(sample_shape=(3,), seed=0) # Sample of shape [3, 2] ``` Arguments are broadcast when possible. ```python dist = distrax.VonMises(loc=1.0, concentration=[3.0, 4.0]) # Evaluating the pdf of both distributions on the point 3.0 returns a length 2 # tensor. dist.prob(3.0) ```
class VonMises(distribution.Distribution): """The von Mises distribution over angles. The von Mises distribution is a distribution over angles. It is the maximum entropy distribution on the space of angles, given a circular mean and a circular variance. In this implementation, the distribution is defined over the range [-pi, pi), with all samples in this interval and the CDF is constant outside this interval. Do note that the prob and log_prob also accept values outside of [-pi, pi) and will return values as if they are inside the interval. When `concentration=0`, this distribution becomes the uniform distribution over the interval [-pi, pi). When the concentration goes to infinity, this distribution approximates a Normal distribution. #### Details The probability density function (pdf) of this distribution is, ```none pdf(x; loc, concentration) = exp(concentration * cos(x - loc)) / (2 * pi * I_0 (concentration)) ``` where: * `I_0` is the zeroth order modified Bessel function; * `loc` the circular mean of the distribution, a scalar in radians. It can take arbitrary values, also outside of [-pi, pi). * `concentration >= 0` is the concentration parameter. It is the analogue to 1/sigma of the Normal distribution. #### Examples Examples of initialization of this distribution. ```python # Create a batch of two von Mises distributions. dist = distrax.VonMises(loc=[1.0, 2.0], concentration=[3.0, 4.0]) dist.sample(sample_shape=(3,), seed=0) # Sample of shape [3, 2] ``` Arguments are broadcast when possible. ```python dist = distrax.VonMises(loc=1.0, concentration=[3.0, 4.0]) # Evaluating the pdf of both distributions on the point 3.0 returns a length 2 # tensor. dist.prob(3.0) ``` """ equiv_tfp_cls = tfd.VonMises def __init__(self, loc: Numeric, concentration: Numeric): super().__init__() self._loc = conversion.as_float_array(loc) self._concentration = conversion.as_float_array(concentration) self._batch_shape = jax.lax.broadcast_shapes( self._loc.shape, self._concentration.shape ) @property def loc(self) -> Array: """The circular mean of the distribution.""" return jnp.broadcast_to(self._loc, self.batch_shape) @property def concentration(self) -> Array: """The concentration of the distribution.""" return jnp.broadcast_to(self._concentration, self.batch_shape) @property def event_shape(self) -> Tuple[int, ...]: """Shape of event of distribution samples.""" return () @property def batch_shape(self) -> Tuple[int, ...]: """Shape of batch of distribution samples.""" return self._batch_shape def mean(self) -> Array: """The circular mean of the distribution.""" return self.loc def variance(self) -> Array: """The circular variance of the distribution.""" conc = self._concentration return 1. - jax.scipy.special.i1e(conc) / jax.scipy.special.i0e(conc) def prob(self, value: EventT) -> Array: """The probability of value under the distribution.""" conc = self._concentration unnormalized_prob = jnp.exp(conc * (jnp.cos(value - self._loc) - 1.)) normalization = (2. * math.pi) * jax.scipy.special.i0e(conc) return unnormalized_prob / normalization def log_prob(self, value: EventT) -> Array: """The logarithm of the probability of value under the distribution.""" conc = self._concentration i_0 = jax.scipy.special.i0(conc) return ( conc * jnp.cos(value - self._loc) - math.log(2 * math.pi) - jnp.log(i_0) ) def _sample_n(self, key: PRNGKey, n: int) -> Array: """Returns `n` samples in [-pi, pi).""" out_shape = (n,) + self.batch_shape conc = self._concentration dtype = jnp.result_type(self._loc, self._concentration) sample = _von_mises_sample(out_shape, conc, key, dtype) + self._loc return _convert_angle_to_standard(sample) def entropy(self) -> Array: """Returns the entropy.""" conc = self._concentration i0e = jax.scipy.special.i0e(conc) i1e = jax.scipy.special.i1e(conc) return conc * (1 - i1e / i0e) + math.log(2 * math.pi) + jnp.log(i0e) def mode(self) -> Array: """The mode of the distribution.""" return self.mean() def cdf(self, value: EventT) -> Array: """The CDF of `value` under the distribution. Note that the CDF takes values of 0. or 1. for values outside of [-pi, pi). Note that this behaviour is different from `tensorflow_probability.VonMises` or `scipy.stats.vonmises`. Args: value: the angle evaluated under the distribution. Returns: the circular CDF of value. """ dtype = jnp.result_type(value, self._loc, self._concentration) loc = _convert_angle_to_standard(self._loc) return jnp.clip( _von_mises_cdf(value - loc, self._concentration, dtype) - _von_mises_cdf(-math.pi - loc, self._concentration, dtype), a_min=0., a_max=1., ) def log_cdf(self, value: EventT) -> Array: """See `Distribution.log_cdf`.""" return jnp.log(self.cdf(value)) def survival_function(self, value: EventT) -> Array: """See `Distribution.survival_function`.""" dtype = jnp.result_type(value, self._loc, self._concentration) loc = _convert_angle_to_standard(self._loc) return jnp.clip( _von_mises_cdf(math.pi - loc, self._concentration, dtype) - _von_mises_cdf(value - loc, self._concentration, dtype), a_min=0., a_max=1., ) def log_survival_function(self, value: EventT) -> Array: """See `Distribution.log_survival_function`.""" return jnp.log(self.survival_function(value)) def __getitem__(self, index) -> 'VonMises': index = distribution.to_batch_shape_index(self.batch_shape, index) return VonMises( loc=self.loc[index], concentration=self.concentration[index], )
(loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], concentration: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int])
56,440
distrax._src.distributions.von_mises
__getitem__
null
def __getitem__(self, index) -> 'VonMises': index = distribution.to_batch_shape_index(self.batch_shape, index) return VonMises( loc=self.loc[index], concentration=self.concentration[index], )
(self, index) -> distrax._src.distributions.von_mises.VonMises
56,441
distrax._src.distributions.von_mises
__init__
null
def __init__(self, loc: Numeric, concentration: Numeric): super().__init__() self._loc = conversion.as_float_array(loc) self._concentration = conversion.as_float_array(concentration) self._batch_shape = jax.lax.broadcast_shapes( self._loc.shape, self._concentration.shape )
(self, loc: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int], concentration: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number, float, int])
56,444
distrax._src.distributions.von_mises
_sample_n
Returns `n` samples in [-pi, pi).
def _sample_n(self, key: PRNGKey, n: int) -> Array: """Returns `n` samples in [-pi, pi).""" out_shape = (n,) + self.batch_shape conc = self._concentration dtype = jnp.result_type(self._loc, self._concentration) sample = _von_mises_sample(out_shape, conc, key, dtype) + self._loc return _convert_angle_to_standard(sample)
(self, key: jax.Array, n: int) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,446
distrax._src.distributions.von_mises
cdf
The CDF of `value` under the distribution. Note that the CDF takes values of 0. or 1. for values outside of [-pi, pi). Note that this behaviour is different from `tensorflow_probability.VonMises` or `scipy.stats.vonmises`. Args: value: the angle evaluated under the distribution. Returns: the circular CDF of value.
def cdf(self, value: EventT) -> Array: """The CDF of `value` under the distribution. Note that the CDF takes values of 0. or 1. for values outside of [-pi, pi). Note that this behaviour is different from `tensorflow_probability.VonMises` or `scipy.stats.vonmises`. Args: value: the angle evaluated under the distribution. Returns: the circular CDF of value. """ dtype = jnp.result_type(value, self._loc, self._concentration) loc = _convert_angle_to_standard(self._loc) return jnp.clip( _von_mises_cdf(value - loc, self._concentration, dtype) - _von_mises_cdf(-math.pi - loc, self._concentration, dtype), a_min=0., a_max=1., )
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,448
distrax._src.distributions.von_mises
entropy
Returns the entropy.
def entropy(self) -> Array: """Returns the entropy.""" conc = self._concentration i0e = jax.scipy.special.i0e(conc) i1e = jax.scipy.special.i1e(conc) return conc * (1 - i1e / i0e) + math.log(2 * math.pi) + jnp.log(i0e)
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,451
distrax._src.distributions.von_mises
log_prob
The logarithm of the probability of value under the distribution.
def log_prob(self, value: EventT) -> Array: """The logarithm of the probability of value under the distribution.""" conc = self._concentration i_0 = jax.scipy.special.i0(conc) return ( conc * jnp.cos(value - self._loc) - math.log(2 * math.pi) - jnp.log(i_0) )
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,452
distrax._src.distributions.von_mises
log_survival_function
See `Distribution.log_survival_function`.
def log_survival_function(self, value: EventT) -> Array: """See `Distribution.log_survival_function`.""" return jnp.log(self.survival_function(value))
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,453
distrax._src.distributions.von_mises
mean
The circular mean of the distribution.
def mean(self) -> Array: """The circular mean of the distribution.""" return self.loc
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,455
distrax._src.distributions.von_mises
mode
The mode of the distribution.
def mode(self) -> Array: """The mode of the distribution.""" return self.mean()
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,456
distrax._src.distributions.von_mises
prob
The probability of value under the distribution.
def prob(self, value: EventT) -> Array: """The probability of value under the distribution.""" conc = self._concentration unnormalized_prob = jnp.exp(conc * (jnp.cos(value - self._loc) - 1.)) normalization = (2. * math.pi) * jax.scipy.special.i0e(conc) return unnormalized_prob / normalization
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,460
distrax._src.distributions.von_mises
survival_function
See `Distribution.survival_function`.
def survival_function(self, value: EventT) -> Array: """See `Distribution.survival_function`.""" dtype = jnp.result_type(value, self._loc, self._concentration) loc = _convert_angle_to_standard(self._loc) return jnp.clip( _von_mises_cdf(math.pi - loc, self._concentration, dtype) - _von_mises_cdf(value - loc, self._concentration, dtype), a_min=0., a_max=1., )
(self, value: ~EventT) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,462
distrax._src.distributions.von_mises
variance
The circular variance of the distribution.
def variance(self) -> Array: """The circular variance of the distribution.""" conc = self._concentration return 1. - jax.scipy.special.i1e(conc) / jax.scipy.special.i0e(conc)
(self) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,464
distrax._src.utils.conversion
as_bijector
Converts a bijector-like object to a Distrax bijector. Bijector-like objects are: Distrax bijectors, TFP bijectors, and callables. Distrax bijectors are returned unchanged. TFP bijectors are converted to a Distrax equivalent. Callables are wrapped by `distrax.Lambda`, with a few exceptions where an explicit implementation already exists and is returned. Args: obj: The bijector-like object to be converted. Returns: A Distrax bijector.
def as_bijector(obj: BijectorLike) -> bijector.BijectorT: """Converts a bijector-like object to a Distrax bijector. Bijector-like objects are: Distrax bijectors, TFP bijectors, and callables. Distrax bijectors are returned unchanged. TFP bijectors are converted to a Distrax equivalent. Callables are wrapped by `distrax.Lambda`, with a few exceptions where an explicit implementation already exists and is returned. Args: obj: The bijector-like object to be converted. Returns: A Distrax bijector. """ if isinstance(obj, bijector.Bijector): return obj elif isinstance(obj, tfb.Bijector): return bijector_from_tfp.BijectorFromTFP(obj) elif obj is jax.nn.sigmoid: return sigmoid.Sigmoid() elif obj is jnp.tanh: return tanh.Tanh() elif callable(obj): return lambda_bijector.Lambda(obj) else: raise TypeError( f"A bijector-like object can be a `distrax.Bijector`, a `tfb.Bijector`," f" or a callable. Got type `{type(obj)}`.")
(obj: Union[distrax._src.bijectors.bijector.Bijector, tensorflow_probability.substrates.jax.bijectors.bijector.Bijector, Callable[[Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]], Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]]]) -> ~BijectorT
56,465
distrax._src.utils.conversion
as_distribution
Converts a distribution-like object to a Distrax distribution. Distribution-like objects are: Distrax distributions and TFP distributions. Distrax distributions are returned unchanged. TFP distributions are converted to a Distrax equivalent. Args: obj: A distribution-like object to be converted. Returns: A Distrax distribution.
def as_distribution(obj: DistributionLike) -> distribution.DistributionT: """Converts a distribution-like object to a Distrax distribution. Distribution-like objects are: Distrax distributions and TFP distributions. Distrax distributions are returned unchanged. TFP distributions are converted to a Distrax equivalent. Args: obj: A distribution-like object to be converted. Returns: A Distrax distribution. """ if isinstance(obj, distribution.Distribution): return obj elif isinstance(obj, tfd.Distribution): return distribution_from_tfp.distribution_from_tfp(obj) else: raise TypeError( f"A distribution-like object can be a `distrax.Distribution` or a" f" `tfd.Distribution`. Got type `{type(obj)}`.")
(obj: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution]) -> ~DistributionT
56,466
distrax._src.utils.monte_carlo
estimate_kl_best_effort
Estimates KL(distribution_a, distribution_b) exactly or with DiCE. If the kl_divergence(distribution_a, distribution_b) is not supported, the DiCE estimator is used instead. Args: distribution_a: The first distribution. distribution_b: The second distribution. rng_key: The PRNGKey random key. num_samples: The number of samples, if using the DiCE estimator. proposal_distribution: A proposal distribution for the samples, if using the DiCE estimator. If None, use `distribution_a` as proposal. Returns: The estimated KL divergence.
def estimate_kl_best_effort( distribution_a: DistributionLike, distribution_b: DistributionLike, rng_key: PRNGKey, num_samples: int, proposal_distribution: Optional[DistributionLike] = None): """Estimates KL(distribution_a, distribution_b) exactly or with DiCE. If the kl_divergence(distribution_a, distribution_b) is not supported, the DiCE estimator is used instead. Args: distribution_a: The first distribution. distribution_b: The second distribution. rng_key: The PRNGKey random key. num_samples: The number of samples, if using the DiCE estimator. proposal_distribution: A proposal distribution for the samples, if using the DiCE estimator. If None, use `distribution_a` as proposal. Returns: The estimated KL divergence. """ distribution_a = conversion.as_distribution(distribution_a) distribution_b = conversion.as_distribution(distribution_b) # If possible, compute the exact KL. try: return tfd.kl_divergence(distribution_a, distribution_b) except NotImplementedError: pass return mc_estimate_kl(distribution_a, distribution_b, rng_key, num_samples=num_samples, proposal_distribution=proposal_distribution)
(distribution_a: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution], distribution_b: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution], rng_key: jax.Array, num_samples: int, proposal_distribution: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution, NoneType] = None)
56,467
distrax._src.utils.importance_sampling
importance_sampling_ratios
Compute importance sampling ratios given target and sampling distributions. Args: target_dist: Target probability distribution. sampling_dist: Sampling probability distribution. event: Samples. Returns: Importance sampling ratios.
def importance_sampling_ratios( target_dist: DistributionLike, sampling_dist: DistributionLike, event: Array ) -> Array: """Compute importance sampling ratios given target and sampling distributions. Args: target_dist: Target probability distribution. sampling_dist: Sampling probability distribution. event: Samples. Returns: Importance sampling ratios. """ log_pi_a = target_dist.log_prob(event) log_mu_a = sampling_dist.log_prob(event) rho = jnp.exp(log_pi_a - log_mu_a) return rho
(target_dist: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution], sampling_dist: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution], event: Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]) -> Union[jax.Array, numpy.ndarray, numpy.bool_, numpy.number]
56,468
distrax._src.utils.monte_carlo
mc_estimate_kl
Estimates KL(distribution_a, distribution_b) with the DiCE estimator. To get correct gradients with respect the `distribution_a`, we use the DiCE estimator, i.e., we stop the gradient with respect to the samples and with respect to the denominator in the importance weights. We then do not need reparametrized distributions. Args: distribution_a: The first distribution. distribution_b: The second distribution. rng_key: The PRNGKey random key. num_samples: The number of samples, if using the DiCE estimator. proposal_distribution: A proposal distribution for the samples, if using the DiCE estimator. If None, use `distribution_a` as proposal. Returns: The estimated KL divergence.
def mc_estimate_kl( distribution_a: DistributionLike, distribution_b: DistributionLike, rng_key: PRNGKey, num_samples: int, proposal_distribution: Optional[DistributionLike] = None): """Estimates KL(distribution_a, distribution_b) with the DiCE estimator. To get correct gradients with respect the `distribution_a`, we use the DiCE estimator, i.e., we stop the gradient with respect to the samples and with respect to the denominator in the importance weights. We then do not need reparametrized distributions. Args: distribution_a: The first distribution. distribution_b: The second distribution. rng_key: The PRNGKey random key. num_samples: The number of samples, if using the DiCE estimator. proposal_distribution: A proposal distribution for the samples, if using the DiCE estimator. If None, use `distribution_a` as proposal. Returns: The estimated KL divergence. """ if proposal_distribution is None: proposal_distribution = distribution_a proposal_distribution = conversion.as_distribution(proposal_distribution) distribution_a = conversion.as_distribution(distribution_a) distribution_b = conversion.as_distribution(distribution_b) samples, logp_proposal = proposal_distribution.sample_and_log_prob( seed=rng_key, sample_shape=[num_samples]) samples = jax.lax.stop_gradient(samples) logp_proposal = jax.lax.stop_gradient(logp_proposal) logp_a = distribution_a.log_prob(samples) logp_b = distribution_b.log_prob(samples) importance_weight = jnp.exp(logp_a - logp_proposal) log_ratio = logp_b - logp_a kl_estimator = -importance_weight * log_ratio return jnp.mean(kl_estimator, axis=0)
(distribution_a: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution], distribution_b: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution], rng_key: jax.Array, num_samples: int, proposal_distribution: Union[distrax._src.distributions.distribution.Distribution, tensorflow_probability.substrates.jax.distributions.distribution.Distribution, NoneType] = None)