python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper to adapt a TFP distribution."""
from typing import Tuple
import chex
from distrax._src.distributions import distribution
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
PRNGKey = chex.PRNGKey
DistributionT = distribution.DistributionT
EventT = distribution.EventT
def distribution_from_tfp(tfp_distribution: tfd.Distribution) -> DistributionT:
"""Create a Distrax distribution from a TFP distribution.
Given a TFP distribution `tfp_distribution`, this method returns a
distribution of a class that inherits from the class of `tfp_distribution`.
The returned distribution behaves almost identically as the TFP distribution,
except the common methods (`sample`, `variance`, etc.) are overwritten to
return `jnp.ndarrays`. Moreover, the wrapped distribution also implements
Distrax methods inherited from `Distribution`, such as `sample_and_log_prob`.
Args:
tfp_distribution: A TFP distribution.
Returns:
The wrapped distribution.
"""
class DistributionFromTFP(
distribution.Distribution, tfp_distribution.__class__):
"""Class to wrap a TFP distribution.
The wrapped class dynamically inherits from `tfp_distribution`, so that
computations involving the KL remain valid.
"""
def __init__(self):
pass
def __getattr__(self, name: str):
return getattr(tfp_distribution, name)
def sample(self, *a, **k): # pylint: disable=useless-super-delegation
"""See `Distribution.sample`."""
return super().sample(*a, **k)
def _sample_n(self, key: PRNGKey, n: int):
"""See `Distribution._sample_n`."""
return jnp.asarray(
tfp_distribution.sample(seed=key, sample_shape=(n,)),
dtype=tfp_distribution.dtype)
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return jnp.asarray(tfp_distribution.log_prob(value))
def prob(self, value: EventT) -> Array:
"""See `Distribution.prob`."""
return jnp.asarray(tfp_distribution.prob(value))
@property
def event_shape(self) -> Tuple[int, ...]:
"""See `Distribution.event_shape`."""
return tuple(tfp_distribution.event_shape)
@property
def batch_shape(self) -> Tuple[int, ...]:
"""See `Distribution.batch_shape`."""
return tuple(tfp_distribution.batch_shape)
@property
def name(self) -> str:
"""See `Distribution.name`."""
return tfp_distribution.name
@property
def dtype(self) -> jnp.dtype:
"""See `Distribution.dtype`."""
return tfp_distribution.dtype
def kl_divergence(self, other_dist, *args, **kwargs) -> Array:
"""See `Distribution.kl_divergence`."""
return jnp.asarray(
tfd.kullback_leibler.kl_divergence(self, other_dist, *args, **kwargs))
def entropy(self) -> Array:
"""See `Distribution.entropy`."""
return jnp.asarray(tfp_distribution.entropy())
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return jnp.asarray(tfp_distribution.log_cdf(value))
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
return jnp.asarray(tfp_distribution.cdf(value))
def mean(self) -> Array:
"""See `Distribution.mean`."""
return jnp.asarray(tfp_distribution.mean())
def median(self) -> Array:
"""See `Distribution.median`."""
return jnp.asarray(tfp_distribution.median())
def variance(self) -> Array:
"""See `Distribution.variance`."""
return jnp.asarray(tfp_distribution.variance())
def stddev(self) -> Array:
"""See `Distribution.stddev`."""
return jnp.asarray(tfp_distribution.stddev())
def mode(self) -> Array:
"""See `Distribution.mode`."""
return jnp.asarray(tfp_distribution.mode())
def __getitem__(self, index) -> DistributionT:
"""See `Distribution.__getitem__`."""
return distribution_from_tfp(tfp_distribution[index])
return DistributionFromTFP()
| distrax-master | distrax/_src/distributions/distribution_from_tfp.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MultivariateNormalFromBijector distribution."""
from typing import Callable, Union
import chex
from distrax._src.bijectors import block
from distrax._src.bijectors import chain
from distrax._src.bijectors import diag_linear
from distrax._src.bijectors import linear
from distrax._src.bijectors import shift
from distrax._src.distributions import independent
from distrax._src.distributions import normal
from distrax._src.distributions import transformed
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
def _check_input_parameters_are_valid(scale: linear.Linear, loc: Array) -> None:
"""Raises an error if `scale` and `loc` are not valid."""
if loc.ndim < 1:
raise ValueError('`loc` must have at least 1 dimension.')
if scale.event_dims != loc.shape[-1]:
raise ValueError(
f'`scale` and `loc` have inconsistent dimensionality: '
f'`scale.event_dims = {scale.event_dims} and '
f'`loc.shape[-1] = {loc.shape[-1]}.')
class MultivariateNormalFromBijector(transformed.Transformed):
"""Multivariate normal distribution on `R^k`.
The multivariate normal over `x` is characterized by an invertible affine
transformation `x = f(z) = A @ z + b`, where `z` is a random variable that
follows a standard multivariate normal on `R^k`, i.e., `p(z) = N(0, I_k)`,
`A` is a `k x k` transformation matrix, and `b` is a `k`-dimensional vector.
The resulting PDF on `x` is a multivariate normal, `p(x) = N(b, C)`, where
`C = A @ A.T` is the covariance matrix. Additional leading dimensions (if any)
index batches.
The transformation `x = f(z)` must be specified by a linear scale bijector
implementing the operation `A @ z` and a shift (or location) term `b`.
"""
def __init__(self, loc: Array, scale: linear.Linear):
"""Initializes the distribution.
Args:
loc: The term `b`, i.e., the mean of the multivariate normal distribution.
scale: The bijector specifying the linear transformation `A @ z`, as
described in the class docstring.
"""
_check_input_parameters_are_valid(scale, loc)
batch_shape = jnp.broadcast_shapes(scale.batch_shape, loc.shape[:-1])
dtype = jnp.result_type(scale.dtype, loc.dtype)
# Build a standard multivariate Gaussian with the right `batch_shape`.
std_mvn_dist = independent.Independent(
distribution=normal.Normal(
loc=jnp.zeros(batch_shape + loc.shape[-1:], dtype=dtype),
scale=1.),
reinterpreted_batch_ndims=1)
# Form the bijector `f(x) = Ax + b`.
bijector = chain.Chain([block.Block(shift.Shift(loc), ndims=1), scale])
super().__init__(distribution=std_mvn_dist, bijector=bijector)
self._scale = scale
self._loc = loc
self._event_shape = loc.shape[-1:]
self._batch_shape = batch_shape
self._dtype = dtype
@property
def scale(self) -> linear.Linear:
"""The scale bijector."""
return self._scale
@property
def loc(self) -> Array:
"""The `loc` parameter of the distribution."""
shape = self.batch_shape + self.event_shape
return jnp.broadcast_to(self._loc, shape=shape)
def mean(self) -> Array:
"""Calculates the mean."""
return self.loc
def median(self) -> Array:
"""Calculates the median."""
return self.loc
def mode(self) -> Array:
"""Calculates the mode."""
return self.loc
def covariance(self) -> Array:
"""Calculates the covariance matrix.
Unlike TFP, which would drop leading dimensions, in Distrax the covariance
matrix always has shape `batch_shape + (num_dims, num_dims)`. This helps to
keep things simple and predictable.
Returns:
The covariance matrix, of shape `k x k` (broadcasted to match the batch
shape of the distribution).
"""
if isinstance(self.scale, diag_linear.DiagLinear):
result = jnp.vectorize(jnp.diag, signature='(k)->(k,k)')(self.variance())
else:
result = jax.vmap(self.scale.forward, in_axes=-2, out_axes=-2)(
self._scale.matrix)
return jnp.broadcast_to(
result, self.batch_shape + self.event_shape + self.event_shape)
def variance(self) -> Array:
"""Calculates the variance of all one-dimensional marginals."""
if isinstance(self.scale, diag_linear.DiagLinear):
result = jnp.square(self.scale.diag)
else:
scale_matrix = self._scale.matrix
result = jnp.sum(scale_matrix * scale_matrix, axis=-1)
return jnp.broadcast_to(result, self.batch_shape + self.event_shape)
def stddev(self) -> Array:
"""Calculates the standard deviation (the square root of the variance)."""
if isinstance(self.scale, diag_linear.DiagLinear):
result = jnp.abs(self.scale.diag)
else:
result = jnp.sqrt(self.variance())
return jnp.broadcast_to(result, self.batch_shape + self.event_shape)
MultivariateNormalLike = Union[
MultivariateNormalFromBijector, tfd.MultivariateNormalLinearOperator]
def _squared_frobenius_norm(x: Array) -> Array:
"""Computes the squared Frobenius norm of a matrix."""
return jnp.sum(jnp.square(x), axis=[-2, -1])
def _log_abs_determinant(d: MultivariateNormalLike) -> Array:
"""Obtains `log|det(A)|`."""
if isinstance(d, MultivariateNormalFromBijector):
log_det_scale = d.scale.forward_log_det_jacobian(
jnp.zeros(d.event_shape, dtype=d.dtype))
elif isinstance(d, tfd.MultivariateNormalLinearOperator):
log_det_scale = d.scale.log_abs_determinant()
return log_det_scale
def _inv_scale_operator(d: MultivariateNormalLike) -> Callable[[Array], Array]:
"""Gets the operator that performs `A^-1 * x`."""
if isinstance(d, MultivariateNormalFromBijector):
inverse_fn = jax.vmap(d.scale.inverse, in_axes=-1, out_axes=-1)
elif isinstance(d, tfd.MultivariateNormalLinearOperator):
inverse_fn = d.scale.solve
return inverse_fn
def _scale_matrix(d: MultivariateNormalLike) -> Array:
"""Gets the full scale matrix `A`."""
if isinstance(d, MultivariateNormalFromBijector):
matrix = d.scale.matrix
elif isinstance(d, tfd.MultivariateNormalLinearOperator):
matrix = d.scale.to_dense()
return matrix
def _has_diagonal_scale(d: MultivariateNormalLike) -> bool:
"""Determines if the scale matrix `A` is diagonal."""
if (isinstance(d, MultivariateNormalFromBijector)
and isinstance(d.scale, diag_linear.DiagLinear)):
return True
elif (isinstance(d, tfd.MultivariateNormalDiag) or
(isinstance(d, tfd.MultivariateNormalFullCovariance) and
d.parameters['covariance_matrix'] is None) or
(isinstance(d, tfd.MultivariateNormalTriL) and
not isinstance(d, tfd.MultivariateNormalFullCovariance) and
d.parameters['scale_tril'] is None) or
(isinstance(d, tfd.MultivariateNormalDiagPlusLowRank) and
d.parameters['scale_perturb_factor'] is None)):
return True
return False
def _kl_divergence_mvn_mvn(
dist1: MultivariateNormalLike,
dist2: MultivariateNormalLike,
*unused_args, **unused_kwargs,
) -> Array:
"""Divergence KL(dist1 || dist2) between multivariate normal distributions.
Args:
dist1: A multivariate normal distribution.
dist2: A multivariate normal distribution.
Returns:
Batchwise `KL(dist1 || dist2)`.
"""
num_dims = tuple(dist1.event_shape)[-1] # `tuple` needed for TFP distrib.
if num_dims != tuple(dist2.event_shape)[-1]:
raise ValueError(f'Both multivariate normal distributions must have the '
f'same `event_shape`, but they have {num_dims} and '
f'{tuple(dist2.event_shape)[-1]} dimensions.')
# Calculation is based on:
# https://github.com/tensorflow/probability/blob/v0.12.1/tensorflow_probability/python/distributions/mvn_linear_operator.py#L384
# If C_1 = AA.T, C_2 = BB.T, then
# tr[inv(C_2) C_1] = ||inv(B) A||_F^2
# where ||.||_F^2 is the squared Frobenius norm.
diff_lob_abs_det = _log_abs_determinant(dist2) - _log_abs_determinant(dist1)
if _has_diagonal_scale(dist1) and _has_diagonal_scale(dist2):
# This avoids instantiating the full scale matrix when it is diagonal.
b_inv_a = jnp.expand_dims(dist1.stddev() / dist2.stddev(), axis=-1)
else:
b_inv_a = _inv_scale_operator(dist2)(_scale_matrix(dist1))
diff_mean_expanded = jnp.expand_dims(dist2.mean() - dist1.mean(), axis=-1)
b_inv_diff_mean = _inv_scale_operator(dist2)(diff_mean_expanded)
kl_divergence = (
diff_lob_abs_det +
0.5 * (-num_dims +
_squared_frobenius_norm(b_inv_a) +
_squared_frobenius_norm(b_inv_diff_mean)))
return kl_divergence
# Register the KL functions with TFP.
tfd.RegisterKL(
MultivariateNormalFromBijector, MultivariateNormalFromBijector)(
_kl_divergence_mvn_mvn)
tfd.RegisterKL(
MultivariateNormalFromBijector, tfd.MultivariateNormalLinearOperator)(
_kl_divergence_mvn_mvn)
tfd.RegisterKL(
tfd.MultivariateNormalLinearOperator, MultivariateNormalFromBijector)(
_kl_divergence_mvn_mvn)
| distrax-master | distrax/_src/distributions/mvn_from_bijector.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `logistic.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import logistic
from distrax._src.utils import equivalence
import jax.numpy as jnp
import numpy as np
class Logistic(equivalence.EquivalenceTest):
"""Logistic tests."""
def setUp(self):
super().setUp()
self._init_distr_cls(logistic.Logistic)
@parameterized.named_parameters(
('1d std logistic', (0, 1)),
('2d std logistic', (np.zeros(2), np.ones(2))),
('rank 2 std logistic', (np.zeros((3, 2)), np.ones((3, 2)))),
('broadcasted loc', (0, np.ones(3))),
('broadcasted scale', (np.ones(3), 1)),
)
def test_event_shape(self, distr_params):
super()._test_event_shape(distr_params, dict())
@chex.all_variants
@parameterized.named_parameters(
('1d std logistic, no shape', (0, 1), ()),
('1d std logistic, int shape', (0, 1), 1),
('1d std logistic, 1-tuple shape', (0, 1), (1,)),
('1d std logistic, 2-tuple shape', (0, 1), (2, 2)),
('2d std logistic, no shape', (np.zeros(2), np.ones(2)), ()),
('2d std logistic, int shape', ([0, 0], [1, 1]), 1),
('2d std logistic, 1-tuple shape', (np.zeros(2), np.ones(2)), (1,)),
('2d std logistic, 2-tuple shape', ([0, 0], [1, 1]), (2, 2)),
('rank 2 std logistic, 2-tuple shape', (np.zeros((3, 2)), np.ones(
(3, 2))), (2, 2)),
('broadcasted loc', (0, np.ones(3)), (2, 2)),
('broadcasted scale', (np.ones(3), 1), ()),
)
def test_sample_shape(self, distr_params, sample_shape):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_sample_shape(distr_params, dict(), sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist = self.distrax_cls(
loc=jnp.zeros((), dtype), scale=jnp.ones((), dtype))
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
@chex.all_variants
@parameterized.named_parameters(
('1d std logistic, no shape', (0, 1), ()),
('1d std logistic, int shape', (0, 1), 1),
('1d std logistic, 1-tuple shape', (0, 1), (1,)),
('1d std logistic, 2-tuple shape', (0, 1), (2, 2)),
('2d std logistic, no shape', (np.zeros(2), np.ones(2)), ()),
('2d std logistic, int shape', ([0, 0], [1, 1]), 1),
('2d std logistic, 1-tuple shape', (np.zeros(2), np.ones(2)), (1,)),
('2d std logistic, 2-tuple shape', ([0, 0], [1, 1]), (2, 2)),
('rank 2 std logistic, 2-tuple shape', (np.zeros((3, 2)), np.ones(
(3, 2))), (2, 2)),
('broadcasted loc', (0, np.ones(3)), (2, 2)),
('broadcasted scale', (np.ones(3), 1), ()),
)
def test_sample_and_log_prob(self, distr_params, sample_shape):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_sample_and_log_prob(
dist_args=distr_params,
dist_kwargs=dict(),
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants
@parameterized.named_parameters(
('1d dist, 1d value', (0, 1), 1),
('1d dist, 2d value', (0., 1.), np.array([1, 2])),
('1d dist, 2d value as list', (0., 1.), [1, 2]),
('2d dist, 1d value', (np.zeros(2), np.ones(2)), 1),
('2d broadcasted dist, 1d value', (np.zeros(2), 1), 1),
('2d dist, 2d value', (np.zeros(2), np.ones(2)), np.array([1, 2])),
('1d dist, 1d value, edge case', (0, 1), 200),
)
def test_method_with_input(self, distr_params, value):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
value = np.asarray(value, dtype=np.float32)
for method in ['log_prob', 'prob', 'cdf', 'log_cdf', 'survival_function',
'log_survival_function']:
with self.subTest(method):
super()._test_attribute(
attribute_string=method,
dist_args=distr_params,
dist_kwargs={},
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('entropy', (0., 1.), 'entropy'),
('mean', (0, 1), 'mean'),
('mean from list params', ([-1, 1], [1, 2]), 'mean'),
('variance', (0, 1), 'variance'),
('variance from np params', (np.ones(2), np.ones(2)), 'variance'),
('stddev', (0, 1), 'stddev'),
('stddev from rank 2 params', (np.ones((2, 3)), np.ones(
(2, 3))), 'stddev'),
('mode', (0, 1), 'mode'),
)
def test_method(self, distr_params, function_string):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_attribute(
function_string,
distr_params,
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('no broadcast', ([0., 1., -0.5], [0.5, 1., 1.5])),
('broadcasted loc', (0.5, [0.5, 1., 1.5])),
('broadcasted scale', ([0., 1., -0.5], 0.8)),
)
def test_median(self, distr_params):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
dist = self.distrax_cls(*distr_params)
self.assertion_fn(rtol=1e-2)(self.variant(dist.median)(), dist.mean())
def test_jitable(self):
super()._test_jittable((0., 1.))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
('ellipsis', (Ellipsis, -1)),
)
def test_slice(self, slice_):
loc = jnp.array(np.random.randn(3, 4, 5))
scale = jnp.array(np.random.randn(3, 4, 5))
dist = self.distrax_cls(loc=loc, scale=scale)
self.assertion_fn(rtol=1e-2)(dist[slice_].mean(), loc[slice_])
def test_slice_different_parameterization(self):
loc = jnp.array(np.random.randn(4))
scale = jnp.array(np.random.randn(3, 4))
dist = self.distrax_cls(loc=loc, scale=scale)
self.assertion_fn(rtol=1e-2)(dist[0].loc, loc) # Not slicing loc.
self.assertion_fn(rtol=1e-2)(dist[0].scale, scale[0])
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/logistic_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Normal distribution."""
import math
from typing import Tuple, Union
import chex
from distrax._src.distributions import distribution
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
PRNGKey = chex.PRNGKey
EventT = distribution.EventT
_half_log2pi = 0.5 * math.log(2 * math.pi)
class Normal(distribution.Distribution):
"""Normal distribution with location `loc` and `scale` parameters."""
equiv_tfp_cls = tfd.Normal
def __init__(self, loc: Numeric, scale: Numeric):
"""Initializes a Normal distribution.
Args:
loc: Mean of the distribution.
scale: Standard deviation of the distribution.
"""
super().__init__()
self._loc = conversion.as_float_array(loc)
self._scale = conversion.as_float_array(scale)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return jax.lax.broadcast_shapes(self._loc.shape, self._scale.shape)
@property
def loc(self) -> Array:
"""Mean of the distribution."""
return jnp.broadcast_to(self._loc, self.batch_shape)
@property
def scale(self) -> Array:
"""Scale of the distribution."""
return jnp.broadcast_to(self._scale, self.batch_shape)
def _sample_from_std_normal(self, key: PRNGKey, n: int) -> Array:
out_shape = (n,) + self.batch_shape
dtype = jnp.result_type(self._loc, self._scale)
return jax.random.normal(key, shape=out_shape, dtype=dtype)
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
rnd = self._sample_from_std_normal(key, n)
scale = jnp.expand_dims(self._scale, range(rnd.ndim - self._scale.ndim))
loc = jnp.expand_dims(self._loc, range(rnd.ndim - self._loc.ndim))
return scale * rnd + loc
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
rnd = self._sample_from_std_normal(key, n)
samples = self._scale * rnd + self._loc
log_prob = -0.5 * jnp.square(rnd) - _half_log2pi - jnp.log(self._scale)
return samples, log_prob
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
log_unnormalized = -0.5 * jnp.square(self._standardize(value))
log_normalization = _half_log2pi + jnp.log(self._scale)
return log_unnormalized - log_normalization
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
return jax.scipy.special.ndtr(self._standardize(value))
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return jax.scipy.special.log_ndtr(self._standardize(value))
def survival_function(self, value: EventT) -> Array:
"""See `Distribution.survival_function`."""
return jax.scipy.special.ndtr(-self._standardize(value))
def log_survival_function(self, value: EventT) -> Array:
"""See `Distribution.log_survival_function`."""
return jax.scipy.special.log_ndtr(-self._standardize(value))
def _standardize(self, value: EventT) -> Array:
return (value - self._loc) / self._scale
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
log_normalization = _half_log2pi + jnp.log(self.scale)
entropy = 0.5 + log_normalization
return entropy
def mean(self) -> Array:
"""Calculates the mean."""
return self.loc
def variance(self) -> Array:
"""Calculates the variance."""
return jnp.square(self.scale)
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return self.scale
def mode(self) -> Array:
"""Calculates the mode."""
return self.mean()
def median(self) -> Array:
"""Calculates the median."""
return self.mean()
def __getitem__(self, index) -> 'Normal':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Normal(loc=self.loc[index], scale=self.scale[index])
def _kl_divergence_normal_normal(
dist1: Union[Normal, tfd.Normal],
dist2: Union[Normal, tfd.Normal],
*unused_args, **unused_kwargs,
) -> Array:
"""Obtain the batched KL divergence KL(dist1 || dist2) between two Normals.
Args:
dist1: A Normal distribution.
dist2: A Normal distribution.
Returns:
Batchwise `KL(dist1 || dist2)`.
"""
diff_log_scale = jnp.log(dist1.scale) - jnp.log(dist2.scale)
return (
0.5 * jnp.square(dist1.loc / dist2.scale - dist2.loc / dist2.scale) +
0.5 * jnp.expm1(2. * diff_log_scale) -
diff_log_scale)
# Register the KL functions with TFP.
tfd.RegisterKL(Normal, Normal)(_kl_divergence_normal_normal)
tfd.RegisterKL(Normal, Normal.equiv_tfp_cls)(_kl_divergence_normal_normal)
tfd.RegisterKL(Normal.equiv_tfp_cls, Normal)(_kl_divergence_normal_normal)
| distrax-master | distrax/_src/distributions/normal.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `gamma.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import gamma
from distrax._src.utils import equivalence
import jax.numpy as jnp
import numpy as np
class GammaTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(gamma.Gamma)
@parameterized.named_parameters(
('0d params', (), (), ()),
('1d params', (2,), (2,), (2,)),
('2d params, no broadcast', (3, 2), (3, 2), (3, 2)),
('2d params, broadcasted concentration', (2,), (3, 2), (3, 2)),
('2d params, broadcasted rate', (3, 2), (2,), (3, 2)),
)
def test_properties(self, concentration_shape, rate_shape, batch_shape):
rng = np.random.default_rng(42)
concentration = 0.1 + rng.uniform(size=concentration_shape)
rate = 0.1 + rng.uniform(size=rate_shape)
dist = gamma.Gamma(concentration, rate)
self.assertEqual(dist.event_shape, ())
self.assertEqual(dist.batch_shape, batch_shape)
self.assertion_fn(rtol=2e-2)(
dist.concentration, np.broadcast_to(concentration, batch_shape))
self.assertion_fn(rtol=2e-2)(dist.rate, np.broadcast_to(rate, batch_shape))
@chex.all_variants
@parameterized.named_parameters(
('1d std gamma, no shape', (1, 1), ()),
('1d std gamma, int shape', (1, 1), 1),
('1d std gamma, 1-tuple shape', (1, 1), (1,)),
('1d std gamma, 2-tuple shape', (1, 1), (2, 2)),
('2d std gamma, no shape', (np.ones(2), np.ones(2)), ()),
('2d std gamma, int shape', ([1, 1], [1, 1]), 1),
('2d std gamma, 1-tuple shape', (np.ones(2), np.ones(2)), (1,)),
('2d std gamma, 2-tuple shape', ([1, 1], [1, 1]), (2, 2)),
('rank 2 std gamma, 2-tuple shape', (np.ones((3, 2)), np.ones(
(3, 2))), (2, 2)),
('broadcasted concentration', (1, np.ones(3)), (2, 2)),
('broadcasted rate', (np.ones(3), 1), ()),
)
def test_sample_shape(self, distr_params, sample_shape):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_sample_shape(distr_params, dict(), sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist = self.distrax_cls(
concentration=jnp.ones((), dtype), rate=jnp.ones((), dtype))
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
@chex.all_variants
@parameterized.named_parameters(
('1d std gamma, no shape', (11, 3), ()),
('1d std gamma, int shape', (11, 3), 1),
('1d std gamma, 1-tuple shape', (11, 3), (1,)),
('1d std gamma, 2-tuple shape', (1, 1), (2, 2)),
('2d std gamma, no shape', (np.ones(2), np.ones(2)), ()),
('2d std gamma, int shape', ([1, 1], [1, 1]), 1),
('2d std gamma, 1-tuple shape', (np.ones(2), np.ones(2)), (1,)),
('2d std gamma, 2-tuple shape', ([1, 1], [1, 1]), (2, 2)),
('rank 2 std gamma, 2-tuple shape', (np.ones((3, 2)), np.ones(
(3, 2))), (2, 2)),
('broadcasted concentration', (1, np.ones(3)), (2, 2)),
('broadcasted rate', (np.ones(3), 1), ()),
)
def test_sample_and_log_prob(self, distr_params, sample_shape):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_sample_and_log_prob(
dist_args=distr_params,
dist_kwargs=dict(),
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=2e-2))
@chex.all_variants
@parameterized.named_parameters(
('1d dist, 1d value', (3.1, 1), 1),
('1d dist, 2d value', (0.5, 0.1), np.array([1, 2])),
('2d dist, 1d value', (0.5 + np.zeros(2), 0.3 * np.ones(2)), 1),
('2d broadcasted dist, 1d value', (0.4 + np.zeros(2), 0.8), 1),
('2d dist, 2d value', ([0.1, 0.5], 0.9 * np.ones(2)), np.array([1, 2])),
('1d dist, 1d value, edge case', (2.1, 1), 200),
)
def test_method_with_value(self, distr_params, value):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
value = np.asarray(value, dtype=np.float32)
for method in ['log_prob', 'prob', 'cdf', 'log_cdf']:
with self.subTest(method=method):
super()._test_attribute(
attribute_string=method,
dist_args=distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=2e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('no broadcast', ([0.1, 1.3, 0.5], [0.5, 1.3, 1.5])),
('broadcasted concentration', (0.5, [0.5, 1.3, 1.5])),
('broadcasted rate', ([0.1, 1.3, 0.5], 0.8)),
)
def test_method(self, distr_params):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
for method in ['entropy', 'mean', 'variance', 'stddev', 'mode']:
with self.subTest(method=method):
super()._test_attribute(
attribute_string=method,
dist_args=distr_params,
assertion_fn=self.assertion_fn(rtol=2e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax')
)
def test_with_two_distributions(self, function_string, mode_string):
rtol = 1e-3
atol = 1e-4
rng = np.random.default_rng(42)
super()._test_with_two_distributions(
attribute_string=function_string,
mode_string=mode_string,
dist1_kwargs={
'concentration': np.abs(
rng.normal(size=(4, 1, 2))).astype(np.float32),
'rate': np.array(
[[0.8, 0.2], [0.1, 1.2], [1.4, 3.1]], dtype=np.float32),
},
dist2_kwargs={
'concentration': np.abs(rng.normal(size=(3, 2))).astype(np.float32),
'rate': 0.1 + rng.uniform(size=(4, 1, 2)).astype(np.float32),
},
assertion_fn=lambda x, y: np.testing.assert_allclose(x, y, rtol, atol))
def test_jitable(self):
super()._test_jittable(
(0.1, 1.5), assertion_fn=self.assertion_fn(rtol=2e-2))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
('ellipsis', (Ellipsis, -1)),
)
def test_slice(self, slice_):
concentration = jnp.array(np.abs(np.random.randn(3, 4, 5)))
rate = jnp.array(np.abs(np.random.randn(3, 4, 5)))
dist = self.distrax_cls(concentration, rate)
self.assertion_fn(rtol=2e-2)(
dist[slice_].concentration, concentration[slice_])
self.assertion_fn(rtol=2e-2)(dist[slice_].rate, rate[slice_])
def test_slice_different_parameterization(self):
concentration = jnp.array(np.abs(np.random.randn(3, 4, 5)))
rate = jnp.array(np.abs(np.random.randn(4, 5)))
dist = self.distrax_cls(concentration, rate)
self.assertion_fn(rtol=2e-2)(dist[0].concentration, concentration[0])
self.assertion_fn(rtol=2e-2)(dist[0].rate, rate) # Not slicing rate.
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/gamma_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `mvn_full_covariance.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions.mvn_full_covariance import MultivariateNormalFullCovariance
from distrax._src.utils import equivalence
import jax.numpy as jnp
import numpy as np
def _sample_covariance_matrix(rng, shape):
matrix = rng.normal(size=shape)
matrix_t = np.vectorize(np.transpose, signature='(k,k)->(k,k)')(matrix)
return np.matmul(matrix, matrix_t)
class MultivariateNormalFullCovarianceTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(MultivariateNormalFullCovariance)
@parameterized.named_parameters(
('all inputs are None', {}),
('wrong dimension of loc', {
'loc': np.array(0.),
}),
('covariance_matrix is 0d', {
'covariance_matrix': np.array(1.),
}),
('covariance_matrix is 1d', {
'covariance_matrix': np.ones((4,)),
}),
('covariance_matrix is not square', {
'covariance_matrix': np.ones((4, 3)),
}),
('inconsistent loc and covariance_matrix', {
'loc': np.zeros((4,)),
'covariance_matrix': np.eye(5),
}),
)
def test_raises_on_wrong_inputs(self, dist_kwargs):
with self.assertRaises(ValueError):
self.distrax_cls(**dist_kwargs)
@parameterized.named_parameters(
('loc provided', {'loc': np.zeros((4,))}),
('covariance_matrix provided', {'covariance_matrix': np.eye(4)}),
)
def test_default_properties(self, dist_kwargs):
dist = self.distrax_cls(**dist_kwargs)
self.assertion_fn(rtol=1e-3)(dist.loc, jnp.zeros((4,)))
self.assertion_fn(rtol=1e-3)(dist.covariance_matrix, jnp.eye(4))
@parameterized.named_parameters(
('unbatched', (), (4,), (4, 4)),
('batched loc', (7,), (7, 4), (4, 4)),
('batched covariance_matrix', (7,), (4,), (7, 4, 4)),
)
def test_properties(self, batch_shape, loc_shape, covariance_matrix_shape):
rng = np.random.default_rng(2022)
loc = rng.normal(size=loc_shape)
covariance_matrix = _sample_covariance_matrix(rng, covariance_matrix_shape)
dist = self.distrax_cls(loc=loc, covariance_matrix=covariance_matrix)
self.assertEqual(dist.batch_shape, batch_shape)
self.assertion_fn(rtol=1e-3)(
dist.loc, jnp.broadcast_to(loc, batch_shape + (4,)))
self.assertion_fn(rtol=1e-3)(dist.covariance_matrix, jnp.broadcast_to(
covariance_matrix, batch_shape + (4, 4)))
@chex.all_variants
@parameterized.named_parameters(
('unbatched, no shape', (), (4,), (4, 4)),
('batched loc, no shape', (), (7, 4), (4, 4)),
('batched covariance_matrix, no shape', (), (4,), (7, 4, 4)),
('unbatched, with shape', (3,), (4,), (4, 4)),
('batched loc, with shape', (3,), (7, 4), (4, 4)),
('batched covariance_matrix, with shape', (3,), (4,), (7, 4, 4)),
)
def test_sample_shape(self, sample_shape, loc_shape, covariance_matrix_shape):
rng = np.random.default_rng(2022)
loc = rng.normal(size=loc_shape)
covariance_matrix = _sample_covariance_matrix(rng, covariance_matrix_shape)
dist_kwargs = {'loc': loc, 'covariance_matrix': covariance_matrix}
super()._test_sample_shape(
dist_args=(), dist_kwargs=dist_kwargs, sample_shape=sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist_params = {
'loc': np.array([0., 0.], dtype),
'covariance_matrix': np.array([[1., 0.], [0., 1.]], dtype)}
dist = self.distrax_cls(**dist_params)
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
@chex.all_variants
@parameterized.named_parameters(
('unbatched, unbatched value', (4,), (4,), (4, 4)),
('batched loc, unbatched value', (4,), (7, 4), (4, 4)),
('batched covariance_matrix, unbatched value', (4,), (4,), (7, 4, 4)),
('unbatched, batched value', (3, 7, 4), (4,), (4, 4)),
('batched loc, batched value', (3, 7, 4), (7, 4), (4, 4)),
('batched covariance_matrix, batched value', (3, 7, 4), (4,), (7, 4, 4)),
)
def test_log_prob(self, value_shape, loc_shape, covariance_matrix_shape):
rng = np.random.default_rng(2022)
loc = rng.normal(size=loc_shape)
covariance_matrix = _sample_covariance_matrix(rng, covariance_matrix_shape)
dist_kwargs = {'loc': loc, 'covariance_matrix': covariance_matrix}
value = rng.normal(size=value_shape)
super()._test_attribute(
attribute_string='log_prob',
dist_kwargs=dist_kwargs,
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=1e-3))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('unbatched', (4,), (4, 4)),
('batched loc', (7, 4), (4, 4)),
('batched covariance_matrix', (4,), (7, 4, 4)),
)
def test_method(self, loc_shape, covariance_matrix_shape):
rng = np.random.default_rng(2022)
loc = rng.normal(size=loc_shape)
covariance_matrix = _sample_covariance_matrix(rng, covariance_matrix_shape)
dist_kwargs = {'loc': loc, 'covariance_matrix': covariance_matrix}
for method in ['entropy', 'mean', 'stddev', 'variance',
'covariance', 'mode']:
if method == 'covariance':
rtol = 2e-2
elif method in ['stddev', 'variance']:
rtol = 6e-3
else:
rtol = 1e-3
with self.subTest(method=method):
super()._test_attribute(
method,
dist_kwargs=dist_kwargs,
assertion_fn=self.assertion_fn(rtol=rtol))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax'))
def test_with_two_distributions(self, function_string, mode_string):
rng = np.random.default_rng(2022)
dist1_kwargs = {
'loc': rng.normal(size=(5, 1, 4)),
'covariance_matrix': _sample_covariance_matrix(rng, (3, 4, 4)),
}
dist2_kwargs = {
'loc': rng.normal(size=(3, 4)),
'covariance_matrix': _sample_covariance_matrix(rng, (4, 4)),
}
super()._test_with_two_distributions(
attribute_string=function_string,
mode_string=mode_string,
dist1_kwargs=dist1_kwargs,
dist2_kwargs=dist2_kwargs,
assertion_fn=self.assertion_fn(rtol=1e-3))
def test_jittable(self):
super()._test_jittable(
dist_kwargs={'loc': np.zeros((4,))},
assertion_fn=self.assertion_fn(rtol=1e-3))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
)
def test_slice(self, slice_):
rng = np.random.default_rng(2022)
loc = rng.normal(size=(6, 5, 4))
covariance_matrix = _sample_covariance_matrix(rng, (4, 4))
dist_kwargs = {'loc': loc, 'covariance_matrix': covariance_matrix}
dist = self.distrax_cls(**dist_kwargs)
self.assertEqual(dist[slice_].batch_shape, loc[slice_].shape[:-1])
self.assertEqual(dist[slice_].event_shape, dist.event_shape)
self.assertion_fn(rtol=1e-3)(dist[slice_].mean(), loc[slice_])
def test_slice_ellipsis(self):
rng = np.random.default_rng(2022)
loc = rng.normal(size=(6, 5, 4))
covariance_matrix = _sample_covariance_matrix(rng, (4, 4))
dist_kwargs = {'loc': loc, 'covariance_matrix': covariance_matrix}
dist = self.distrax_cls(**dist_kwargs)
self.assertEqual(dist[..., -1].batch_shape, (6,))
self.assertEqual(dist[..., -1].event_shape, dist.event_shape)
self.assertion_fn(rtol=1e-3)(dist[..., -1].mean(), loc[:, -1, :])
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/mvn_full_covariance_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tfp_compatible_distribution.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions.categorical import Categorical
from distrax._src.distributions.independent import Independent
from distrax._src.distributions.laplace import Laplace
from distrax._src.distributions.mvn_diag import MultivariateNormalDiag
from distrax._src.distributions.normal import Normal
from distrax._src.distributions.tfp_compatible_distribution import tfp_compatible_distribution
from distrax._src.distributions.transformed import Transformed
from distrax._src.distributions.uniform import Uniform
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
class TFPCompatibleDistributionNormal(parameterized.TestCase):
"""Tests for Normal distribution."""
def setUp(self):
super().setUp()
self._sample_shape = (np.int32(10),)
self._seed = 42
self._key = jax.random.PRNGKey(self._seed)
self.base_dist = Normal(loc=jnp.array([0., 0.]), scale=jnp.array([1., 1.]))
self.values = jnp.array([1., -1.])
self.distrax_second_dist = Normal(loc=-1., scale=0.8)
self.tfp_second_dist = tfd.Normal(loc=-1., scale=0.8)
def assertion_fn(self, rtol):
return lambda x, y: np.testing.assert_allclose(x, y, rtol=rtol)
@property
def wrapped_dist(self):
return tfp_compatible_distribution(self.base_dist)
def test_event_shape(self):
self.assertEqual(self.wrapped_dist.event_shape, self.base_dist.event_shape)
def test_event_shape_types(self):
wrapped_dist = tfp_compatible_distribution(self.distrax_second_dist)
self.assertEqual(
type(wrapped_dist.event_shape), type(self.tfp_second_dist.event_shape))
self.assertEqual(
type(wrapped_dist.event_shape_tensor()),
type(self.tfp_second_dist.event_shape_tensor()))
def test_batch_shape(self):
self.assertEqual(self.wrapped_dist.batch_shape, self.base_dist.batch_shape)
@chex.all_variants
def test_sample(self):
def sample_fn(key):
return self.wrapped_dist.sample(seed=key, sample_shape=self._sample_shape)
sample_fn = self.variant(sample_fn)
self.assertion_fn(rtol=1e-4)(
sample_fn(self._key),
self.base_dist.sample(sample_shape=self._sample_shape, seed=self._key))
def test_experimental_local_measure(self):
samples = self.wrapped_dist.sample(seed=self._key)
expected_log_prob = self.wrapped_dist.log_prob(samples)
log_prob, space = self.wrapped_dist.experimental_local_measure(
samples, backward_compat=True)
self.assertion_fn(rtol=1e-4)(log_prob, expected_log_prob)
self.assertIsInstance(space, tfp.experimental.tangent_spaces.FullSpace)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('mean', 'mean'),
('mode', 'mode'),
('median', 'median'),
('stddev', 'stddev'),
('variance', 'variance'),
('entropy', 'entropy'),
)
def test_method(self, method):
try:
expected_result = self.variant(getattr(self.base_dist, method))()
except NotImplementedError:
return
except AttributeError:
return
result = self.variant(getattr(self.wrapped_dist, method))()
self.assertion_fn(rtol=1e-4)(result, expected_result)
@chex.all_variants
@parameterized.named_parameters(
('log_prob', 'log_prob'),
('prob', 'prob'),
('log_cdf', 'log_cdf'),
('cdf', 'cdf'),
)
def test_method_with_value(self, method):
try:
expected_result = self.variant(
getattr(self.base_dist, method))(self.values)
except NotImplementedError:
return
except AttributeError:
return
result = self.variant(getattr(self.wrapped_dist, method))(self.values)
self.assertion_fn(rtol=1e-4)(result, expected_result)
@chex.all_variants
@parameterized.named_parameters(
('kl_divergence', 'kl_divergence'),
('cross_entropy', 'cross_entropy'),
)
def test_with_two_distributions(self, method):
"""Test methods of the form listed below.
D(distrax_distrib || wrapped_distrib),
D(wrapped_distrib || distrax_distrib),
D(tfp_distrib || wrapped_distrib),
D(wrapped_distrib || tfp_distrib).
Args:
method: the method name to be tested
"""
try:
expected_result1 = self.variant(
getattr(self.distrax_second_dist, method))(self.base_distribution)
expected_result2 = self.variant(
getattr(self.base_distribution, method))(self.distrax_second_dist)
except NotImplementedError:
return
except AttributeError:
return
distrax_result1 = self.variant(getattr(self.distrax_second_dist, method))(
self.wrapped_dist)
distrax_result2 = self.variant(getattr(self.wrapped_dist, method))(
self.distrax_second_dist)
tfp_result1 = self.variant(getattr(self.tfp_second_dist, method))(
self.wrapped_dist)
tfp_result2 = self.variant(getattr(self.wrapped_dist, method))(
self.tfp_second_dist)
self.assertion_fn(rtol=1e-4)(distrax_result1, expected_result1)
self.assertion_fn(rtol=1e-4)(distrax_result2, expected_result2)
self.assertion_fn(rtol=1e-4)(tfp_result1, expected_result1)
self.assertion_fn(rtol=1e-4)(tfp_result2, expected_result2)
class TFPCompatibleDistributionMvnNormal(TFPCompatibleDistributionNormal):
"""Tests for multivariate normal distribution."""
def setUp(self):
super().setUp()
self.base_dist = MultivariateNormalDiag(loc=jnp.array([0., 1.]))
self.values = jnp.array([1., -1.])
self.distrax_second_dist = MultivariateNormalDiag(
loc=jnp.array([-1., 0.]), scale_diag=jnp.array([0.8, 1.2]))
self.tfp_second_dist = tfd.MultivariateNormalDiag(
loc=jnp.array([-1., 0.]), scale_diag=jnp.array([0.8, 1.2]))
class TFPCompatibleDistributionCategorical(TFPCompatibleDistributionNormal):
"""Tests for categorical distribution."""
def setUp(self):
super().setUp()
self.base_dist = Categorical(logits=jnp.array([0., -1., 1.]))
self.values = jnp.array([0, 1, 2])
self.distrax_second_dist = Categorical(probs=jnp.array([0.2, 0.2, 0.6]))
self.tfp_second_dist = tfd.Categorical(probs=jnp.array([0.2, 0.2, 0.6]))
class TFPCompatibleDistributionTransformed(TFPCompatibleDistributionNormal):
"""Tests for transformed distributions."""
def setUp(self):
super().setUp()
self.base_dist = Transformed(
distribution=Normal(loc=0., scale=1.),
bijector=tfb.Exp())
self.values = jnp.array([0., 1., 2.])
self.distrax_second_dist = Transformed(
distribution=Normal(loc=0.5, scale=0.8),
bijector=tfb.Exp())
self.tfp_second_dist = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=0.5, scale=0.8),
bijector=tfb.Exp())
class TfpMetaDistributionsWithWrappedBaseDistribution(parameterized.TestCase):
"""Tests for meta distributions (with wrappper base distr)."""
def setUp(self):
super().setUp()
self._sample_shape = (np.int32(10),)
self._seed = 42
self._key = jax.random.PRNGKey(self._seed)
def assertion_fn(self, rtol):
return lambda x, y: np.testing.assert_allclose(x, y, rtol=rtol)
def test_with_independent(self):
base_dist = Normal(loc=jnp.array([0., 0.]), scale=jnp.array([1., 1.]))
wrapped_dist = tfp_compatible_distribution(base_dist)
meta_dist = tfd.Independent(wrapped_dist, 1, validate_args=True)
samples = meta_dist.sample((), self._key)
log_prob = meta_dist.log_prob(samples)
distrax_meta_dist = Independent(base_dist, 1)
expected_log_prob = distrax_meta_dist.log_prob(samples)
self.assertion_fn(rtol=1e-4)(log_prob, expected_log_prob)
def test_with_transformed_distribution(self):
base_dist = Normal(loc=jnp.array([0., 0.]), scale=jnp.array([1., 1.]))
wrapped_dist = tfp_compatible_distribution(base_dist)
meta_dist = tfd.TransformedDistribution(
distribution=wrapped_dist, bijector=tfb.Exp(), validate_args=True)
samples = meta_dist.sample(seed=self._key)
log_prob = meta_dist.log_prob(samples)
distrax_meta_dist = Transformed(
distribution=base_dist, bijector=tfb.Exp())
expected_log_prob = distrax_meta_dist.log_prob(samples)
self.assertion_fn(rtol=1e-4)(log_prob, expected_log_prob)
def test_with_sample(self):
base_dist = Normal(0., 1.)
wrapped_dist = tfp_compatible_distribution(base_dist)
meta_dist = tfd.Sample(
wrapped_dist, sample_shape=[1, 3], validate_args=True)
meta_dist.log_prob(meta_dist.sample(2, seed=self._key))
def test_with_joint_distribution_named_auto_batched(self):
def laplace(a, b):
return tfp_compatible_distribution(Laplace(a * jnp.ones((2, 1)), b))
meta_dist = tfd.JointDistributionNamedAutoBatched({
'a': tfp_compatible_distribution(Uniform(2. * jnp.ones(3), 4.)),
'b': tfp_compatible_distribution(Uniform(2. * jnp.ones(3), 4.)),
'c': laplace}, validate_args=True)
meta_dist.log_prob(meta_dist.sample(4, seed=self._key))
def test_with_joint_distribution_coroutine_auto_batched(self):
def model_fn():
a = yield tfp_compatible_distribution(Uniform(2. * jnp.ones(3), 4.),
name='a')
b = yield tfp_compatible_distribution(Uniform(2. * jnp.ones(3), 4.),
name='b')
yield tfp_compatible_distribution(Laplace(a * jnp.ones((2, 1)), b),
name='c')
meta_dist = tfd.JointDistributionCoroutineAutoBatched(
model_fn, validate_args=True)
meta_dist.log_prob(meta_dist.sample(7, seed=self._key))
class TFPCompatibleDistributionSlicing(parameterized.TestCase):
"""Class to test the `getitem` method."""
def assertion_fn(self, rtol):
return lambda x, y: np.testing.assert_allclose(x, y, rtol=rtol)
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
('ellipsis', (Ellipsis, -1)),
)
def test_slice(self, slice_):
loc = np.random.randn(3, 4, 5)
base_dist = Normal(loc=loc, scale=1.)
dist = tfp_compatible_distribution(base_dist)
sliced_dist = dist[slice_]
self.assertIsInstance(sliced_dist, base_dist.__class__)
self.assertIsInstance(sliced_dist.batch_shape, tfp.tf2jax.TensorShape)
self.assertTrue(sliced_dist.allow_nan_stats)
self.assertion_fn(rtol=1e-4)(sliced_dist.loc, loc[slice_])
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/tfp_compatible_distribution_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logistic distribution."""
from typing import Tuple
import chex
from distrax._src.distributions import distribution
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
PRNGKey = chex.PRNGKey
EventT = distribution.EventT
class Logistic(distribution.Distribution):
"""The Logistic distribution with location `loc` and `scale` parameters."""
equiv_tfp_cls = tfd.Logistic
def __init__(self, loc: Numeric, scale: Numeric) -> None:
"""Initializes a Logistic distribution.
Args:
loc: Mean of the distribution.
scale: Spread of the distribution.
"""
super().__init__()
self._loc = conversion.as_float_array(loc)
self._scale = conversion.as_float_array(scale)
self._batch_shape = jax.lax.broadcast_shapes(
self._loc.shape, self._scale.shape)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return self._batch_shape
@property
def loc(self) -> Array:
"""Mean of the distribution."""
return jnp.broadcast_to(self._loc, self.batch_shape)
@property
def scale(self) -> Array:
"""Spread of the distribution."""
return jnp.broadcast_to(self._scale, self.batch_shape)
def _standardize(self, x: Array) -> Array:
return (x - self.loc) / self.scale
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
out_shape = (n,) + self.batch_shape
dtype = jnp.result_type(self._loc, self._scale)
uniform = jax.random.uniform(
key,
shape=out_shape,
dtype=dtype,
minval=jnp.finfo(dtype).tiny,
maxval=1.)
rnd = jnp.log(uniform) - jnp.log1p(-uniform)
return self._scale * rnd + self._loc
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
z = self._standardize(value)
return -z - 2. * jax.nn.softplus(-z) - jnp.log(self._scale)
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in Nats)."""
return 2. + jnp.broadcast_to(jnp.log(self._scale), self.batch_shape)
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
return jax.nn.sigmoid(self._standardize(value))
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return -jax.nn.softplus(-self._standardize(value))
def survival_function(self, value: EventT) -> Array:
"""See `Distribution.survival_function`."""
return jax.nn.sigmoid(-self._standardize(value))
def log_survival_function(self, value: EventT) -> Array:
"""See `Distribution.log_survival_function`."""
return -jax.nn.softplus(self._standardize(value))
def mean(self) -> Array:
"""Calculates the mean."""
return self.loc
def variance(self) -> Array:
"""Calculates the variance."""
return jnp.square(self.scale * jnp.pi) / 3.
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return self.scale * jnp.pi / jnp.sqrt(3.)
def mode(self) -> Array:
"""Calculates the mode."""
return self.mean()
def median(self) -> Array:
"""Calculates the median."""
return self.mean()
def __getitem__(self, index) -> 'Logistic':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Logistic(loc=self.loc[index], scale=self.scale[index])
| distrax-master | distrax/_src/distributions/logistic.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Beta distribution."""
from typing import Tuple, Union
import chex
from distrax._src.distributions import distribution
from distrax._src.utils import conversion
from distrax._src.utils import math
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
PRNGKey = chex.PRNGKey
EventT = distribution.EventT
class Beta(distribution.Distribution):
"""Beta distribution with parameters `alpha` and `beta`.
The PDF of a Beta distributed random variable `X` is defined on the interval
`0 <= X <= 1` and has the form:
```
p(x; alpha, beta) = x ** {alpha - 1} * (1 - x) ** (beta - 1) / B(alpha, beta)
```
where `B(alpha, beta)` is the beta function, and the `alpha, beta > 0` are the
shape parameters.
Note that the support of the distribution does not include `x = 0` or `x = 1`
if `alpha < 1` or `beta < 1`, respectively.
"""
equiv_tfp_cls = tfd.Beta
def __init__(self, alpha: Numeric, beta: Numeric):
"""Initializes a Beta distribution.
Args:
alpha: Shape parameter `alpha` of the distribution. Must be positive.
beta: Shape parameter `beta` of the distribution. Must be positive.
"""
super().__init__()
self._alpha = conversion.as_float_array(alpha)
self._beta = conversion.as_float_array(beta)
self._batch_shape = jax.lax.broadcast_shapes(
self._alpha.shape, self._beta.shape)
self._log_normalization_constant = math.log_beta(self._alpha, self._beta)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return self._batch_shape
@property
def alpha(self) -> Array:
"""Shape parameter `alpha` of the distribution."""
return jnp.broadcast_to(self._alpha, self.batch_shape)
@property
def beta(self) -> Array:
"""Shape parameter `beta` of the distribution."""
return jnp.broadcast_to(self._beta, self.batch_shape)
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
out_shape = (n,) + self.batch_shape
dtype = jnp.result_type(self._alpha, self._beta)
rnd = jax.random.beta(
key, a=self._alpha, b=self._beta, shape=out_shape, dtype=dtype)
return rnd
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
result = ((self._alpha - 1.) * jnp.log(value)
+ (self._beta - 1.) * jnp.log(1. - value)
- self._log_normalization_constant)
return jnp.where(
jnp.logical_or(jnp.logical_and(self._alpha == 1., value == 0.),
jnp.logical_and(self._beta == 1., value == 1.)),
-self._log_normalization_constant,
result
)
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
return jax.scipy.special.betainc(self._alpha, self._beta, value)
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return jnp.log(self.cdf(value))
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
return (
self._log_normalization_constant
- (self._alpha - 1.) * jax.lax.digamma(self._alpha)
- (self._beta - 1.) * jax.lax.digamma(self._beta)
+ (self._alpha + self._beta - 2.) * jax.lax.digamma(
self._alpha + self._beta)
)
def mean(self) -> Array:
"""Calculates the mean."""
return self._alpha / (self._alpha + self._beta)
def variance(self) -> Array:
"""Calculates the variance."""
sum_alpha_beta = self._alpha + self._beta
return self._alpha * self._beta / (
jnp.square(sum_alpha_beta) * (sum_alpha_beta + 1.))
def mode(self) -> Array:
"""Calculates the mode.
Returns:
The mode, an array of shape `batch_shape`. The mode is not defined if
`alpha = beta = 1`, or if `alpha < 1` and `beta < 1`. For these cases,
the returned value is `jnp.nan`.
"""
return jnp.where(
jnp.logical_and(self._alpha > 1., self._beta > 1.),
(self._alpha - 1.) / (self._alpha + self._beta - 2.),
jnp.where(
jnp.logical_and(self._alpha <= 1., self._beta > 1.),
0.,
jnp.where(
jnp.logical_and(self._alpha > 1., self._beta <= 1.),
1., jnp.nan)))
def __getitem__(self, index) -> 'Beta':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Beta(alpha=self.alpha[index], beta=self.beta[index])
BetaLike = Union[Beta, tfd.Beta]
def _obtain_alpha_beta(dist: BetaLike) -> Tuple[Array, Array]:
if isinstance(dist, Beta):
alpha, beta = dist.alpha, dist.beta
elif isinstance(dist, tfd.Beta):
alpha, beta = dist.concentration1, dist.concentration0
return alpha, beta
def _kl_divergence_beta_beta(
dist1: BetaLike,
dist2: BetaLike,
*unused_args,
**unused_kwargs,
) -> Array:
"""Batched KL divergence KL(dist1 || dist2) between two Beta distributions.
Args:
dist1: A Beta distribution.
dist2: A Beta distribution.
Returns:
Batchwise `KL(dist1 || dist2)`.
"""
alpha1, beta1 = _obtain_alpha_beta(dist1)
alpha2, beta2 = _obtain_alpha_beta(dist2)
t1 = math.log_beta(alpha2, beta2) - math.log_beta(alpha1, beta1)
t2 = (alpha1 - alpha2) * jax.lax.digamma(alpha1)
t3 = (beta1 - beta2) * jax.lax.digamma(beta1)
t4 = (alpha2 - alpha1 + beta2 - beta1) * jax.lax.digamma(alpha1 + beta1)
return t1 + t2 + t3 + t4
# Register the KL functions with TFP.
tfd.RegisterKL(Beta, Beta)(_kl_divergence_beta_beta)
tfd.RegisterKL(Beta, Beta.equiv_tfp_cls)(_kl_divergence_beta_beta)
tfd.RegisterKL(Beta.equiv_tfp_cls, Beta)(_kl_divergence_beta_beta)
| distrax-master | distrax/_src/distributions/beta.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MultivariateNormalTri distribution."""
from typing import Optional
import chex
from distrax._src.bijectors.diag_linear import DiagLinear
from distrax._src.bijectors.triangular_linear import TriangularLinear
from distrax._src.distributions import distribution
from distrax._src.distributions.mvn_from_bijector import MultivariateNormalFromBijector
from distrax._src.utils import conversion
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
def _check_parameters(
loc: Optional[Array], scale_tri: Optional[Array]) -> None:
"""Checks that the inputs are correct."""
if loc is None and scale_tri is None:
raise ValueError(
'At least one of `loc` and `scale_tri` must be specified.')
if loc is not None and loc.ndim < 1:
raise ValueError('The parameter `loc` must have at least one dimension.')
if scale_tri is not None and scale_tri.ndim < 2:
raise ValueError(
f'The parameter `scale_tri` must have at least two dimensions, but '
f'`scale_tri.shape = {scale_tri.shape}`.')
if scale_tri is not None and scale_tri.shape[-1] != scale_tri.shape[-2]:
raise ValueError(
f'The parameter `scale_tri` must be a (batched) square matrix, but '
f'`scale_tri.shape = {scale_tri.shape}`.')
if loc is not None:
num_dims = loc.shape[-1]
if scale_tri is not None and scale_tri.shape[-1] != num_dims:
raise ValueError(
f'Shapes are not compatible: `loc.shape = {loc.shape}` and '
f'`scale_tri.shape = {scale_tri.shape}`.')
class MultivariateNormalTri(MultivariateNormalFromBijector):
"""Multivariate normal distribution on `R^k`.
The `MultivariateNormalTri` distribution is parameterized by a `k`-length
location (mean) vector `b` and a (lower or upper) triangular scale matrix `S`
of size `k x k`. The covariance matrix is `C = S @ S.T`.
"""
equiv_tfp_cls = tfd.MultivariateNormalTriL
def __init__(self,
loc: Optional[Array] = None,
scale_tri: Optional[Array] = None,
is_lower: bool = True):
"""Initializes a MultivariateNormalTri distribution.
Args:
loc: Mean vector of the distribution of shape `k` (can also be a batch of
such vectors). If not specified, it defaults to zeros.
scale_tri: The scale matrix `S`. It must be a `k x k` triangular matrix
(additional dimensions index batches). If `scale_tri` is not triangular,
the entries above or below the main diagonal will be ignored. The
parameter `is_lower` specifies if `scale_tri` is lower or upper
triangular. It is the responsibility of the user to make sure that
`scale_tri` only contains non-zero elements in its diagonal; this class
makes no attempt to verify that. If `scale_tri` is not specified, it
defaults to the identity.
is_lower: Indicates if `scale_tri` is lower (if True) or upper (if False)
triangular.
"""
loc = None if loc is None else conversion.as_float_array(loc)
scale_tri = None if scale_tri is None else conversion.as_float_array(
scale_tri)
_check_parameters(loc, scale_tri)
if loc is not None:
num_dims = loc.shape[-1]
elif scale_tri is not None:
num_dims = scale_tri.shape[-1]
dtype = jnp.result_type(*[x for x in [loc, scale_tri] if x is not None])
if loc is None:
loc = jnp.zeros((num_dims,), dtype=dtype)
if scale_tri is None:
self._scale_tri = jnp.eye(num_dims, dtype=dtype)
scale = DiagLinear(diag=jnp.ones(loc.shape[-1:], dtype=dtype))
else:
tri_fn = jnp.tril if is_lower else jnp.triu
self._scale_tri = tri_fn(scale_tri)
scale = TriangularLinear(matrix=self._scale_tri, is_lower=is_lower)
self._is_lower = is_lower
super().__init__(loc=loc, scale=scale)
@property
def scale_tri(self) -> Array:
"""Triangular scale matrix `S`."""
return jnp.broadcast_to(
self._scale_tri,
self.batch_shape + self.event_shape + self.event_shape)
@property
def is_lower(self) -> bool:
"""Whether the `scale_tri` matrix is lower triangular."""
return self._is_lower
def __getitem__(self, index) -> 'MultivariateNormalTri':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return MultivariateNormalTri(
loc=self.loc[index],
scale_tri=self.scale_tri[index],
is_lower=self.is_lower)
| distrax-master | distrax/_src/distributions/mvn_tri.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `gumbel.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import gumbel
from distrax._src.utils import equivalence
import jax.numpy as jnp
import numpy as np
class GumbelTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(gumbel.Gumbel)
@parameterized.named_parameters(
('1d std gumbel', (0, 1)),
('2d std gumbel', (np.zeros(2), np.ones(2))),
('rank 2 std gumbel', (np.zeros((3, 2)), np.ones((3, 2)))),
('broadcasted loc', (0, np.ones(3))),
('broadcasted scale', (np.ones(3), 1)),
)
def test_event_shape(self, distr_params):
distr_params = (np.asarray(distr_params[0], dtype=np.float32), # For TFP.
np.asarray(distr_params[1], dtype=np.float32))
super()._test_event_shape(distr_params, dict())
@chex.all_variants
@parameterized.named_parameters(
('1d std gumbel, no shape', (0, 1), ()),
('1d std gumbel, int shape', (0, 1), 1),
('1d std gumbel, 1-tuple shape', (0, 1), (1,)),
('1d std gumbel, 2-tuple shape', (0, 1), (2, 2)),
('2d std gumbel, no shape', (np.zeros(2), np.ones(2)), ()),
('2d std gumbel, int shape', ([0, 0], [1, 1]), 1),
('2d std gumbel, 1-tuple shape', (np.zeros(2), np.ones(2)), (1,)),
('2d std gumbel, 2-tuple shape', ([0, 0], [1, 1]), (2, 2)),
('rank 2 std normal, 2-tuple shape', (np.zeros((3, 2)), np.ones(
(3, 2))), (2, 2)),
('broadcasted loc', (0, np.ones(3)), (2, 2)),
('broadcasted scale', (np.ones(3), 1), ()),
)
def test_sample_shape(self, distr_params, sample_shape):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_sample_shape(distr_params, dict(), sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist = self.distrax_cls(
loc=jnp.zeros((), dtype), scale=jnp.ones((), dtype))
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
@chex.all_variants
@parameterized.named_parameters(
('1d std gumbel, no shape', (0, 1), ()),
('1d std gumbel, int shape', (0, 1), 1),
('1d std gumbel, 1-tuple shape', (0, 1), (1,)),
('1d std gumbel, 2-tuple shape', (0, 1), (2, 2)),
('2d std gumbel, no shape', (np.zeros(2), np.ones(2)), ()),
('2d std gumbel, int shape', ([0, 0], [1, 1]), 1),
('2d std gumbel, 1-tuple shape', (np.zeros(2), np.ones(2)), (1,)),
('2d std gumbel, 2-tuple shape', ([0, 0], [1, 1]), (2, 2)),
('rank 2 std normal, 2-tuple shape', (np.zeros((3, 2)), np.ones(
(3, 2))), (2, 2)),
('broadcasted loc', (0, np.ones(3)), (2, 2)),
('broadcasted scale', (np.ones(3), 1), ()),
)
def test_sample_and_log_prob(self, distr_params, sample_shape):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_sample_and_log_prob(
dist_args=distr_params,
dist_kwargs=dict(),
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=3e-2))
@chex.all_variants
@parameterized.named_parameters(
('1d dist, 1d value', (0, 1), 1),
('1d dist, 2d value', (0.5, 0.1), np.array([1, 0.5])),
('1d dist, 2d value as list', (0.5, 0.1), [1, 0.5]),
('2d dist, 1d value', (0.5 + np.zeros(2), 0.3 * np.ones(2)), 1),
('2d broadcasted dist, 1d value', (np.zeros(2), 0.8), 1),
('2d dist, 2d value', ([0.1, -0.5], 0.9 * np.ones(2)), np.array([1, 2])),
('1d dist, 1d value, edge case', (0, 1), 200),
)
def test_method_with_input(self, distr_params, value):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
value = np.asarray(value, dtype=np.float32)
for method in ['log_prob', 'prob', 'cdf', 'log_cdf']:
with self.subTest(method):
super()._test_attribute(
attribute_string=method,
dist_args=distr_params,
dist_kwargs={},
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=3e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('entropy', ([0., 1., -0.5], [0.5, 1., 1.5]), 'entropy'),
('entropy broadcasted loc', (0.5, [0.5, 1., 1.5]), 'entropy'),
('entropy broadcasted scale', ([0., 1., -0.5], 0.8), 'entropy'),
('mean', ([0., 1., -0.5], [0.5, 1., 1.5]), 'mean'),
('mean broadcasted loc', (0.5, [0.5, 1., 1.5]), 'mean'),
('mean broadcasted scale', ([0., 1., -0.5], 0.8), 'mean'),
('variance', ([0., 1., -0.5], [0.5, 1., 1.5]), 'variance'),
('variance broadcasted loc', (0.5, [0.5, 1., 1.5]), 'variance'),
('variance broadcasted scale', ([0., 1., -0.5], 0.8), 'variance'),
('stddev', ([0., 1., -0.5], [0.5, 1., 1.5]), 'stddev'),
('stddev broadcasted loc', (0.5, [0.5, 1., 1.5]), 'stddev'),
('stddev broadcasted scale', ([0., 1., -0.5], 0.8), 'stddev'),
('mode', ([0., 1., -0.5], [0.5, 1., 1.5]), 'mode'),
('mode broadcasted loc', (0.5, [0.5, 1., 1.5]), 'mode'),
('mode broadcasted scale', ([0., 1., -0.5], 0.8), 'mode'),
)
def test_method(self, distr_params, function_string):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_attribute(
attribute_string=function_string,
dist_args=distr_params,
assertion_fn=self.assertion_fn(rtol=3e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('no broadcast', ([0., 1., -0.5], [0.5, 1., 1.5])),
('broadcasted loc', (0.5, [0.5, 1., 1.5])),
('broadcasted scale', ([0., 1., -0.5], 0.8)),
)
def test_median(self, distr_params):
distr_params = (np.asarray(distr_params[0], dtype=np.float32), # For TFP.
np.asarray(distr_params[1], dtype=np.float32))
dist = self.distrax_cls(*distr_params)
self.assertion_fn(rtol=3e-2)(
self.variant(dist.median)(),
dist.loc - dist.scale * jnp.log(jnp.log(2.)))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax')
)
def test_with_two_distributions(self, function_string, mode_string):
rng = np.random.default_rng(2023)
super()._test_with_two_distributions(
attribute_string=function_string,
mode_string=mode_string,
dist1_kwargs={
'loc': rng.normal(size=(4, 1, 2)).astype(np.float32),
'scale': np.array([[0.8, 0.2], [0.1, 1.2], [1.4, 3.1]],
dtype=np.float32),
},
dist2_kwargs={
'loc': rng.normal(size=(3, 2)).astype(np.float32),
'scale': 0.1 + np.array(rng.uniform(size=(4, 1, 2)),
dtype=np.float32),
},
assertion_fn=self.assertion_fn(rtol=6e-2))
def test_jittable(self):
super()._test_jittable(
(0.1, 1.2), assertion_fn=self.assertion_fn(rtol=3e-2))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
('ellipsis', (Ellipsis, -1)),
)
def test_slice(self, slice_):
rng = np.random.default_rng(2023)
loc = jnp.array(rng.normal(size=(3, 4, 5)))
scale = jnp.array(0.1 + rng.uniform(size=(3, 4, 5)))
dist = self.distrax_cls(loc=loc, scale=scale)
self.assertion_fn(rtol=3e-2)(dist[slice_].loc, loc[slice_])
self.assertion_fn(rtol=3e-2)(dist[slice_].scale, scale[slice_])
def test_slice_different_parameterization(self):
rng = np.random.default_rng(2023)
loc = jnp.array(rng.normal(size=(4,)))
scale = jnp.array(0.1 + rng.uniform(size=(3, 4)))
dist = self.distrax_cls(loc=loc, scale=scale)
self.assertion_fn(rtol=3e-2)(dist[0].loc, loc) # Not slicing loc.
self.assertion_fn(rtol=3e-2)(dist[0].scale, scale[0])
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/gumbel_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Greedy distributions with respect to a set of preferences."""
from typing import Any, Union
import chex
from distrax._src.distributions import categorical
from distrax._src.distributions import distribution
import jax.numpy as jnp
Array = chex.Array
def _argmax_with_random_tie_breaking(preferences: Array) -> Array:
"""Compute probabilities greedily with respect to a set of preferences."""
optimal_actions = preferences == preferences.max(axis=-1, keepdims=True)
return optimal_actions / optimal_actions.sum(axis=-1, keepdims=True)
class Greedy(categorical.Categorical):
"""A Categorical distribution that is greedy with respect to some preferences.
Given a set of unnormalized preferences, the probability mass is distributed
equally among all indices `i` such that `preferences[i] = max(preferences)`,
all other indices will be assigned a probability of zero.
"""
def __init__(
self, preferences: Array, dtype: Union[jnp.dtype, type[Any]] = int
):
"""Initializes a Greedy distribution.
Args:
preferences: Unnormalized preferences.
dtype: The type of event samples.
"""
self._preferences = jnp.asarray(preferences)
probs = _argmax_with_random_tie_breaking(self._preferences)
super().__init__(probs=probs, dtype=dtype)
@property
def preferences(self) -> Array:
"""Unnormalized preferences."""
return self._preferences
def __getitem__(self, index) -> 'Greedy':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Greedy(
preferences=self.preferences[index], dtype=self.dtype)
| distrax-master | distrax/_src/distributions/greedy.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `kl_divergence` across different types of MultivariateNormal."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions.mvn_diag import MultivariateNormalDiag
from distrax._src.distributions.mvn_diag_plus_low_rank import MultivariateNormalDiagPlusLowRank
from distrax._src.distributions.mvn_full_covariance import MultivariateNormalFullCovariance
from distrax._src.distributions.mvn_tri import MultivariateNormalTri
import numpy as np
def _get_dist_params(dist, batch_shape, dim, rng):
"""Generates random parameters depending on the distribution type."""
if dist is MultivariateNormalDiag:
distrax_dist_params = {
'scale_diag': rng.normal(size=batch_shape + (dim,)),
}
tfp_dist_params = distrax_dist_params
elif dist is MultivariateNormalDiagPlusLowRank:
scale_diag = rng.normal(size=batch_shape + (dim,))
scale_u_matrix = 0.2 * rng.normal(size=batch_shape + (dim, 2))
scale_perturb_diag = rng.normal(size=batch_shape + (2,))
scale_v_matrix = scale_u_matrix * np.expand_dims(
scale_perturb_diag, axis=-2)
distrax_dist_params = {
'scale_diag': scale_diag,
'scale_u_matrix': scale_u_matrix,
'scale_v_matrix': scale_v_matrix,
}
tfp_dist_params = {
'scale_diag': scale_diag,
'scale_perturb_factor': scale_u_matrix,
'scale_perturb_diag': scale_perturb_diag,
}
elif dist is MultivariateNormalTri:
scale_tril = rng.normal(size=batch_shape + (dim, dim))
distrax_dist_params = {
'scale_tri': scale_tril,
'is_lower': True,
}
tfp_dist_params = {
'scale_tril': scale_tril,
}
elif dist is MultivariateNormalFullCovariance:
matrix = rng.normal(size=batch_shape + (dim, dim))
matrix_t = np.vectorize(np.transpose, signature='(k,k)->(k,k)')(matrix)
distrax_dist_params = {
'covariance_matrix': np.matmul(matrix, matrix_t),
}
tfp_dist_params = distrax_dist_params
loc = rng.normal(size=batch_shape + (dim,))
distrax_dist_params.update({'loc': loc})
tfp_dist_params.update({'loc': loc})
return distrax_dist_params, tfp_dist_params
class MultivariateNormalKLTest(parameterized.TestCase):
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('Diag vs DiagPlusLowRank',
MultivariateNormalDiag, MultivariateNormalDiagPlusLowRank),
('Diag vs FullCovariance',
MultivariateNormalDiag, MultivariateNormalFullCovariance),
('Diag vs Tri',
MultivariateNormalDiag, MultivariateNormalTri),
('DiagPlusLowRank vs FullCovariance',
MultivariateNormalDiagPlusLowRank, MultivariateNormalFullCovariance),
('DiagPlusLowRank vs Tri',
MultivariateNormalDiagPlusLowRank, MultivariateNormalTri),
('Tri vs FullCovariance',
MultivariateNormalTri, MultivariateNormalFullCovariance),
)
def test_two_distributions(self, dist1_type, dist2_type):
rng = np.random.default_rng(42)
distrax_dist1_params, tfp_dist1_params = _get_dist_params(
dist1_type, batch_shape=(8, 1), dim=5, rng=rng)
distrax_dist2_params, tfp_dist2_params = _get_dist_params(
dist2_type, batch_shape=(6,), dim=5, rng=rng)
dist1_distrax = dist1_type(**distrax_dist1_params)
dist1_tfp = dist1_type.equiv_tfp_cls(**tfp_dist1_params)
dist2_distrax = dist2_type(**distrax_dist2_params)
dist2_tfp = dist2_type.equiv_tfp_cls(**tfp_dist2_params)
for method in ['kl_divergence', 'cross_entropy']:
expected_result1 = getattr(dist1_tfp, method)(dist2_tfp)
expected_result2 = getattr(dist2_tfp, method)(dist1_tfp)
for mode in ['distrax_to_distrax', 'distrax_to_tfp', 'tfp_to_distrax']:
with self.subTest(method=method, mode=mode):
if mode == 'distrax_to_distrax':
result1 = self.variant(getattr(dist1_distrax, method))(
dist2_distrax)
result2 = self.variant(getattr(dist2_distrax, method))(
dist1_distrax)
elif mode == 'distrax_to_tfp':
result1 = self.variant(getattr(dist1_distrax, method))(dist2_tfp)
result2 = self.variant(getattr(dist2_distrax, method))(dist1_tfp)
elif mode == 'tfp_to_distrax':
result1 = self.variant(getattr(dist1_tfp, method))(dist2_distrax)
result2 = self.variant(getattr(dist2_tfp, method))(dist1_distrax)
np.testing.assert_allclose(result1, expected_result1, rtol=0.03)
np.testing.assert_allclose(result2, expected_result2, rtol=0.03)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/mvn_kl_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `epsilon_greedy.py`."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import epsilon_greedy
from distrax._src.utils import equivalence
import jax.numpy as jnp
import numpy as np
class EpsilonGreedyTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(epsilon_greedy.EpsilonGreedy)
self.epsilon = 0.2
self.preferences = jnp.array([0., 4., -1., 4.])
def test_parameters_from_preferences(self):
dist = self.distrax_cls(preferences=self.preferences, epsilon=self.epsilon)
expected_probs = jnp.array([0.05, 0.45, 0.05, 0.45])
self.assertion_fn(rtol=2e-3)(dist.logits, jnp.log(expected_probs))
self.assertion_fn(rtol=2e-3)(dist.probs, expected_probs)
def test_num_categories(self):
dist = self.distrax_cls(preferences=self.preferences, epsilon=self.epsilon)
np.testing.assert_equal(dist.num_categories, len(self.preferences))
@chex.all_variants
@parameterized.named_parameters(
('int32', jnp.int32),
('int64', jnp.int64),
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist = self.distrax_cls(
preferences=self.preferences, epsilon=self.epsilon, dtype=dtype)
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
def test_jittable(self):
super()._test_jittable(
dist_args=(np.array([0., 4., -1., 4.]), 0.1),
assertion_fn=functools.partial(np.testing.assert_allclose, rtol=1e-5))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
)
def test_slice(self, slice_):
preferences = np.abs(np.random.randn(3, 4, 5))
dtype = jnp.float32
dist = self.distrax_cls(preferences, self.epsilon, dtype=dtype)
dist_sliced = dist[slice_]
self.assertIsInstance(dist_sliced, epsilon_greedy.EpsilonGreedy)
self.assertion_fn(rtol=2e-3)(dist_sliced.preferences, preferences[slice_])
self.assertion_fn(rtol=2e-3)(dist_sliced.epsilon, self.epsilon)
self.assertEqual(dist_sliced.dtype, dtype)
def test_slice_ellipsis(self):
preferences = np.abs(np.random.randn(3, 4, 5))
dtype = jnp.float32
dist = self.distrax_cls(preferences, self.epsilon, dtype=dtype)
dist_sliced = dist[..., -1]
self.assertIsInstance(dist_sliced, epsilon_greedy.EpsilonGreedy)
self.assertion_fn(rtol=2e-3)(dist_sliced.preferences, preferences[:, -1])
self.assertion_fn(rtol=2e-3)(dist_sliced.epsilon, self.epsilon)
self.assertEqual(dist_sliced.dtype, dtype)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/epsilon_greedy_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| distrax-master | distrax/_src/distributions/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `categorical_uniform.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import categorical_uniform
import jax
import jax.numpy as jnp
import numpy as np
_NAMED_PARAMETERS = (
dict(
testcase_name='scalar',
low=0.,
high=1.,
logits=np.zeros((7,)),
num_bins=7,
target_event_shape=(),
target_sample_shape=(),
target_batch_shape=(),
target_low=np.zeros(()),
target_high=np.ones(()),
target_logits=np.zeros((7,)),
target_entropy=np.float64(0.0),
target_mean=np.float32(0.5),
target_variance=np.float32(1/12),
),
dict(
testcase_name='one_dimensional',
low=np.zeros((2,)),
high=np.ones((2,)),
logits=np.zeros((2, 7)),
num_bins=7,
target_event_shape=(),
target_sample_shape=(2,),
target_batch_shape=(2,),
target_low=np.zeros((2,)),
target_high=np.ones((2,)),
target_logits=np.zeros((2, 7)),
target_entropy=np.full((2,), 0.0, dtype=np.float64),
target_mean=np.full((2,), 0.5),
target_variance=np.full((2,), 1/12),
),
dict(
testcase_name='two_dimensional',
low=np.zeros((2, 3)),
high=np.ones((2, 3)),
logits=np.zeros((2, 3, 7)),
num_bins=7,
target_event_shape=(),
target_sample_shape=(2, 3),
target_batch_shape=(2, 3),
target_low=np.zeros((2, 3)),
target_high=np.ones((2, 3)),
target_logits=np.zeros((2, 3, 7)),
target_entropy=np.full((2, 3), 0.0, dtype=np.float64),
target_mean=np.full((2, 3), 0.5),
target_variance=np.full((2, 3), 1/12),
),
dict(
testcase_name='broadcasted_low',
low=0.,
high=np.ones((2, 3)),
logits=np.zeros((2, 3, 7)),
num_bins=7,
target_event_shape=(),
target_sample_shape=(2, 3),
target_batch_shape=(2, 3),
target_low=np.zeros((2, 3)),
target_high=np.ones((2, 3)),
target_logits=np.zeros((2, 3, 7)),
target_entropy=np.full((2, 3), 0.0, dtype=np.float64),
target_mean=np.full((2, 3), 0.5),
target_variance=np.full((2, 3), 1/12),
),
dict(
testcase_name='broadcasted_high',
low=np.zeros((2, 3)),
high=1.,
logits=np.zeros((2, 3, 7)),
num_bins=7,
target_event_shape=(),
target_sample_shape=(2, 3),
target_batch_shape=(2, 3),
target_low=np.zeros((2, 3)),
target_high=np.ones((2, 3)),
target_logits=np.zeros((2, 3, 7)),
target_entropy=np.full((2, 3), 0.0, dtype=np.float64),
target_mean=np.full((2, 3), 0.5),
target_variance=np.full((2, 3), 1/12),
),
dict(
testcase_name='broadcasted_logits',
low=np.zeros((2, 3)),
high=np.ones((2, 3)),
logits=np.zeros((7,)),
num_bins=7,
target_event_shape=(),
target_sample_shape=(2, 3),
target_batch_shape=(2, 3),
target_low=np.zeros((2, 3)),
target_high=np.ones((2, 3)),
target_logits=np.zeros((2, 3, 7)),
target_entropy=np.full((2, 3), 0.0, dtype=np.float64),
target_mean=np.full((2, 3), 0.5),
target_variance=np.full((2, 3), 1/12),
),
)
class CategoricalUniformTest(parameterized.TestCase):
def test_raises_on_wrong_logits(self):
with self.assertRaises(ValueError):
categorical_uniform.CategoricalUniform(
low=0.0, high=1.0, logits=jnp.array(0.0)
)
@parameterized.named_parameters(*_NAMED_PARAMETERS)
def test_batch_shape(self, *, low, high, logits, target_batch_shape, **_):
distribution = categorical_uniform.CategoricalUniform(
low=low, high=high, logits=logits)
self.assertEqual(distribution.batch_shape, target_batch_shape)
@parameterized.named_parameters(*_NAMED_PARAMETERS)
def test_event_shape(self, *, low, high, logits, target_event_shape, **_):
distribution = categorical_uniform.CategoricalUniform(
low=low, high=high, logits=logits)
self.assertEqual(distribution.event_shape, target_event_shape)
@chex.all_variants
@parameterized.named_parameters(*_NAMED_PARAMETERS)
def test_sample_shape(self, *, low, high, logits, target_sample_shape, **_):
distribution = categorical_uniform.CategoricalUniform(
low=low, high=high, logits=logits)
sample = self.variant(distribution.sample)(seed=jax.random.PRNGKey(42))
self.assertEqual(sample.shape, target_sample_shape)
@chex.all_variants
@parameterized.named_parameters(*_NAMED_PARAMETERS)
def test_sample_gradients(self, *, low, high, logits, **_):
def summed_samples_fn(params):
distribution = categorical_uniform.CategoricalUniform(
low=params[0], high=params[1], logits=params[2])
sample = distribution.sample(seed=jax.random.PRNGKey(42))
return sample.sum()
grad_fn = self.variant(jax.grad(summed_samples_fn))
grad_low, grad_high, grad_logits = grad_fn(
(jnp.float32(low), jnp.float32(high), jnp.float32(logits)))
self.assertTrue(np.all(grad_low)) # Assert gradient is non-zero.
self.assertTrue(np.all(grad_high)) # Assert gradient is non-zero.
self.assertTrue(np.all(grad_logits)) # Assert gradient is non-zero.
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(*_NAMED_PARAMETERS)
def test_entropy(self, *, low, high, logits, target_entropy, **_):
distribution = categorical_uniform.CategoricalUniform(
low=low, high=high, logits=logits)
chex.assert_trees_all_close(
self.variant(distribution.entropy)(),
target_entropy,
atol=1e-4,
rtol=1e-4,
)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(*_NAMED_PARAMETERS)
def test_mean(self, *, low, high, logits, target_mean, **_):
distribution = categorical_uniform.CategoricalUniform(
low=low, high=high, logits=logits)
chex.assert_trees_all_close(
self.variant(distribution.mean)(), target_mean, atol=1e-4, rtol=1e-4)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(*_NAMED_PARAMETERS)
def test_variance(self, *, low, high, logits, target_variance, **_):
distribution = categorical_uniform.CategoricalUniform(
low=low, high=high, logits=logits)
chex.assert_trees_all_close(
self.variant(distribution.variance)(),
target_variance,
atol=1e-4,
rtol=1e-4,
)
@chex.all_variants
@parameterized.named_parameters(*_NAMED_PARAMETERS)
def test_log_prob(self, *, low, high, logits, target_sample_shape, **_):
distribution = categorical_uniform.CategoricalUniform(
low=low, high=high, logits=logits)
sample = jnp.full(target_sample_shape, 0.2, jnp.float32)
log_prob = self.variant(distribution.log_prob)(sample)
target_log_prob = jnp.zeros(target_sample_shape, jnp.float32)
chex.assert_trees_all_close(log_prob, target_log_prob, atol=1e-4, rtol=1e-4)
@parameterized.named_parameters(*_NAMED_PARAMETERS)
def test_attributes(
self, *,
low, high, logits, target_low, target_high, target_logits, **_,
):
distribution = categorical_uniform.CategoricalUniform(
low=low, high=high, logits=logits)
with self.subTest('low'):
chex.assert_trees_all_equal(distribution.low, target_low)
with self.subTest('high'):
chex.assert_trees_all_equal(distribution.high, target_high)
with self.subTest('logits'):
chex.assert_trees_all_equal(distribution.logits, target_logits)
@parameterized.named_parameters(
# Remove the scalar parameterization because slice would be out of range.
named_parameters for named_parameters in _NAMED_PARAMETERS
if named_parameters['testcase_name'] != 'scalar'
)
def test_slice(self, low, high, logits, **_):
distribution = categorical_uniform.CategoricalUniform(
low=low, high=high, logits=logits)
for name, key in (
('single_element', 1),
('range', slice(-1)),
('ellipsis', (Ellipsis, -1)),
):
with self.subTest(name):
chex.assert_trees_all_close(
distribution[key].low,
distribution.low[key],
atol=1e-4,
rtol=1e-4,
)
chex.assert_trees_all_close(
distribution[key].high,
distribution.high[key],
atol=1e-4,
rtol=1e-4,
)
dist_logits = distribution.logits
chex.assert_trees_all_close(
distribution[key].logits,
dist_logits[key] if name != 'ellipsis' else dist_logits[..., -1, :],
atol=1e-4,
rtol=1e-4,
)
@chex.all_variants
@parameterized.named_parameters(
named_parameters for named_parameters in _NAMED_PARAMETERS
if named_parameters['testcase_name'] == 'scalar'
)
def test_log_prob_outside_of_domain(
self, *, low, high, logits, target_sample_shape, **_):
distribution = categorical_uniform.CategoricalUniform(
low=low, high=high, logits=logits)
with self.subTest('lower'):
sample = jnp.full(target_sample_shape, -1, jnp.float32)
log_prob = self.variant(distribution.log_prob)(sample)
self.assertEqual(log_prob, -np.inf)
with self.subTest('upper'):
sample = jnp.full(target_sample_shape, +2, jnp.float32)
log_prob = self.variant(distribution.log_prob)(sample)
self.assertEqual(log_prob, -np.inf)
@parameterized.named_parameters(
named_parameters for named_parameters in _NAMED_PARAMETERS
if named_parameters['testcase_name'] == 'two_dimensional'
)
def test_vmap_inputs(self, *, low, high, logits, target_sample_shape, **_):
def log_prob_sum(distribution, sample):
return distribution.log_prob(sample).sum()
distribution = categorical_uniform.CategoricalUniform(
low=low, high=high, logits=logits)
sample = jnp.full(target_sample_shape, 0.2, jnp.float32)
with self.subTest('no vmap'):
actual = log_prob_sum(distribution, sample)
expected = distribution.log_prob(sample).sum()
chex.assert_trees_all_close(actual, expected, atol=1e-4, rtol=1e-4)
with self.subTest('axis=0'):
actual = jax.vmap(log_prob_sum, in_axes=0)(distribution, sample)
expected = distribution.log_prob(sample).sum(axis=1)
chex.assert_trees_all_close(actual, expected, atol=1e-4, rtol=1e-4)
with self.subTest('axis=1'):
actual = jax.vmap(log_prob_sum, in_axes=1)(distribution, sample)
expected = distribution.log_prob(sample).sum(axis=0)
chex.assert_trees_all_close(actual, expected, atol=1e-4, rtol=1e-4)
@parameterized.named_parameters(
named_parameters for named_parameters in _NAMED_PARAMETERS
if named_parameters['testcase_name'] == 'two_dimensional'
)
def test_vmap_outputs(self, *, low, high, logits, target_sample_shape, **_):
def summed_distribution(low, high, logits):
return categorical_uniform.CategoricalUniform(
low=low.sum(keepdims=True),
high=high.sum(keepdims=True),
logits=logits.sum(keepdims=True),
)
actual = jax.vmap(summed_distribution)(low, high, logits)
expected = categorical_uniform.CategoricalUniform(
low=low.sum(axis=1, keepdims=True),
high=high.sum(axis=1, keepdims=True),
logits=logits.sum(axis=1, keepdims=True),
)
np.testing.assert_equal(actual.batch_shape, expected.batch_shape)
np.testing.assert_equal(actual.event_shape, expected.event_shape)
sample = jnp.full(target_sample_shape, 0.2, jnp.float32)
chex.assert_trees_all_close(
actual.log_prob(sample),
expected.log_prob(sample),
atol=1e-4,
rtol=1e-4,
)
def test_jitable(self):
@jax.jit
def jitted_function(event, dist):
return dist.log_prob(event)
dist = categorical_uniform.CategoricalUniform(
low=0., high=1., logits=np.ones((7,)))
event = dist.sample(seed=jax.random.PRNGKey(4242))
log_prob = dist.log_prob(event)
jitted_log_prob = jitted_function(event, dist)
chex.assert_trees_all_close(jitted_log_prob, log_prob, atol=1e-4, rtol=1e-4)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/categorical_uniform_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `clipped.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import clipped
from distrax._src.distributions import logistic
from distrax._src.distributions import normal
import jax
import jax.numpy as jnp
import numpy as np
MINIMUM = -1.0
MAXIMUM = 1.0
LOC = MINIMUM
SCALE = 0.1
SIZE = 3
class ClippedTest(parameterized.TestCase):
@parameterized.parameters([
[clipped.ClippedLogistic, logistic.Logistic],
[clipped.ClippedNormal, normal.Normal],
])
def test_clipped_logprob(self, factory, unclipped_factory):
distribution = factory(
loc=LOC, scale=SCALE, minimum=MINIMUM, maximum=MAXIMUM)
unclipped = unclipped_factory(loc=LOC, scale=SCALE)
np.testing.assert_allclose(
unclipped.log_prob(0.0),
distribution.log_prob(0.0))
np.testing.assert_allclose(
unclipped.log_prob(0.8),
distribution.log_prob(0.8))
# Testing outside of the boundary.
self.assertEqual(-np.inf, distribution.log_prob(MINIMUM - 0.1))
self.assertEqual(-np.inf, distribution.log_prob(MAXIMUM + 0.1))
@parameterized.parameters([
[clipped.ClippedLogistic, logistic.Logistic],
[clipped.ClippedNormal, normal.Normal],
])
def test_batched_clipped_logprob(self, factory, unclipped_factory):
distribution = factory(
loc=jnp.array([LOC]*SIZE),
scale=jnp.array([SCALE]*SIZE),
minimum=MINIMUM,
maximum=MAXIMUM)
unclipped = unclipped_factory(loc=LOC, scale=SCALE)
np.testing.assert_allclose(
unclipped.log_prob(jnp.array([0.0]*SIZE)),
distribution.log_prob(jnp.array([0.0]*SIZE)))
np.testing.assert_allclose(
unclipped.log_prob(jnp.array([0.8]*SIZE)),
distribution.log_prob(jnp.array([0.8]*SIZE)))
# Testing outside of the boundary.
np.testing.assert_allclose(
-np.inf, distribution.log_prob(jnp.array([MINIMUM - 0.1]*SIZE)))
np.testing.assert_allclose(
-np.inf, distribution.log_prob(jnp.array([MAXIMUM + 0.1]*SIZE)))
@parameterized.parameters([
[clipped.ClippedLogistic, logistic.Logistic],
[clipped.ClippedNormal, normal.Normal],
])
def test_clipped_sampled_and_logprob(self, factory, unclipped_factory):
distribution = factory(
loc=LOC, scale=SCALE, minimum=MINIMUM, maximum=MAXIMUM)
unclipped = unclipped_factory(loc=LOC, scale=SCALE)
for rng in jax.random.split(jax.random.PRNGKey(42), 5):
sample, log_prob = distribution.sample_and_log_prob(seed=rng)
unclipped_sample, unclipped_log_prob = unclipped.sample_and_log_prob(
seed=rng)
if float(unclipped_sample) > MAXIMUM:
np.testing.assert_allclose(sample, MAXIMUM, atol=1e-5)
elif float(unclipped_sample) < MINIMUM:
np.testing.assert_allclose(sample, MINIMUM, atol=1e-5)
else:
np.testing.assert_allclose(sample, unclipped_sample, atol=1e-5)
np.testing.assert_allclose(log_prob, unclipped_log_prob, atol=1e-5)
@parameterized.parameters([
[clipped.ClippedLogistic, logistic.Logistic],
[clipped.ClippedNormal, normal.Normal],
])
def test_clipped_sample(self, factory, unclipped_factory):
distribution = factory(
loc=LOC, scale=SCALE, minimum=MINIMUM, maximum=MAXIMUM)
unclipped = unclipped_factory(loc=LOC, scale=SCALE)
for rng in jax.random.split(jax.random.PRNGKey(42), 5):
sample = distribution.sample(seed=rng)
unclipped_sample = unclipped.sample(seed=rng)
if float(unclipped_sample) > MAXIMUM:
np.testing.assert_allclose(sample, MAXIMUM, atol=1e-5)
elif float(unclipped_sample) < MINIMUM:
np.testing.assert_allclose(sample, MINIMUM, atol=1e-5)
else:
np.testing.assert_allclose(sample, unclipped_sample, atol=1e-5)
@parameterized.parameters([
[clipped.ClippedLogistic],
[clipped.ClippedNormal],
])
def test_extremes(self, factory):
minimum = -1.0
maximum = 1.0
scale = 0.01
# Using extreme loc.
distribution = factory(
loc=minimum, scale=scale, minimum=minimum, maximum=maximum)
self.assertTrue(np.isfinite(distribution.log_prob(minimum)))
self.assertTrue(np.isfinite(distribution.log_prob(maximum)))
distribution = factory(
loc=maximum, scale=scale, minimum=minimum, maximum=maximum)
self.assertTrue(np.isfinite(distribution.log_prob(minimum)))
self.assertTrue(np.isfinite(distribution.log_prob(maximum)))
def test_jitable(self):
minimum = -1.0
maximum = 1.0
loc = minimum
scale = 0.1
@jax.jit
def jitted_function(event, dist):
return dist.log_prob(event)
dist = clipped.ClippedLogistic(
loc=loc, scale=scale, minimum=minimum, maximum=maximum)
event = dist.sample(seed=jax.random.PRNGKey(4242))
log_prob = dist.log_prob(event)
jitted_log_prob = jitted_function(event, dist)
chex.assert_trees_all_close(
jitted_log_prob, log_prob, atol=1e-4, rtol=1e-4)
def test_properties(self):
dist = clipped.ClippedLogistic(
loc=LOC, scale=SCALE, minimum=MINIMUM, maximum=MAXIMUM)
np.testing.assert_allclose(dist.minimum, MINIMUM, atol=1e-5)
np.testing.assert_allclose(dist.maximum, MAXIMUM, atol=1e-5)
dist = clipped.ClippedLogistic(
loc=jnp.array([LOC]*SIZE),
scale=jnp.array([SCALE]*SIZE),
minimum=MINIMUM,
maximum=MAXIMUM)
np.testing.assert_allclose(dist.minimum, MINIMUM, atol=1e-5)
np.testing.assert_allclose(dist.maximum, MAXIMUM, atol=1e-5)
def test_min_max_broadcasting(self):
dist = clipped.ClippedLogistic(
loc=jnp.array([LOC]*SIZE),
scale=jnp.array([SCALE]*SIZE),
minimum=MINIMUM,
maximum=MAXIMUM)
self.assertEqual(dist.minimum.shape, (SIZE,))
self.assertEqual(dist.minimum.shape, (SIZE,))
def test_batch_shape(self):
dist = clipped.ClippedLogistic(
loc=jnp.array([LOC]*SIZE),
scale=jnp.array([SCALE]*SIZE),
minimum=MINIMUM,
maximum=MAXIMUM)
self.assertEqual(dist.batch_shape, (SIZE,))
self.assertEqual(dist.batch_shape, (SIZE,))
def test_event_shape(self):
dist = clipped.ClippedLogistic(
loc=jnp.array([LOC]*SIZE),
scale=jnp.array([SCALE]*SIZE),
minimum=jnp.array([MINIMUM]*SIZE),
maximum=jnp.array([MAXIMUM]*SIZE))
self.assertEqual(dist.event_shape, ())
self.assertEqual(dist.event_shape, ())
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/clipped_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Independent distribution."""
from typing import Callable, Optional, Tuple, Union
import chex
from distrax._src.distributions import distribution as distrax_distribution
from distrax._src.utils import conversion
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
PRNGKey = chex.PRNGKey
DistributionLike = distrax_distribution.DistributionLike
EventT = distrax_distribution.EventT
class Independent(distrax_distribution.Distribution):
"""Independent distribution obtained from child distributions."""
equiv_tfp_cls = tfd.Independent
def __init__(self,
distribution: DistributionLike,
reinterpreted_batch_ndims: Optional[int] = None):
"""Initializes an Independent distribution.
Args:
distribution: Base distribution instance.
reinterpreted_batch_ndims: Number of event dimensions.
"""
super().__init__()
distribution = conversion.as_distribution(distribution)
self._distribution = distribution
# Check if event shape is a tuple of integers (i.e. not nested).
event_shape = distribution.event_shape
if not (isinstance(event_shape, tuple) and
all(isinstance(i, int) for i in event_shape)):
raise ValueError(
f"'Independent' currently only supports distributions with Array "
f"events (i.e. not nested). Received '{distribution.name}' with "
f"event shape '{distribution.event_shape}'.")
dist_batch_shape = distribution.batch_shape
if reinterpreted_batch_ndims is not None:
dist_batch_ndims = len(dist_batch_shape)
if reinterpreted_batch_ndims > dist_batch_ndims:
raise ValueError(
f'`reinterpreted_batch_ndims` is {reinterpreted_batch_ndims}, but'
f' distribution `{distribution.name}` has only {dist_batch_ndims}'
f' batch dimensions.')
elif reinterpreted_batch_ndims < 0:
raise ValueError(f'`reinterpreted_batch_ndims` can\'t be negative; got'
f' {reinterpreted_batch_ndims}.')
self._reinterpreted_batch_ndims = reinterpreted_batch_ndims
else:
self._reinterpreted_batch_ndims = max(len(dist_batch_shape) - 1, 0)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
dist_batch_shape = self._distribution.batch_shape
event_ndims = len(dist_batch_shape) - self._reinterpreted_batch_ndims
return dist_batch_shape[event_ndims:] + self._distribution.event_shape
@property
def distribution(self):
return self._distribution
@property
def reinterpreted_batch_ndims(self) -> int:
return self._reinterpreted_batch_ndims
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
dist_batch_shape = self._distribution.batch_shape
d = len(dist_batch_shape) - self.reinterpreted_batch_ndims
return dist_batch_shape[:d]
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
return self._distribution.sample(seed=key, sample_shape=n)
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
samples, log_prob = self._distribution.sample_and_log_prob(
seed=key, sample_shape=n)
log_prob = self._reduce(jnp.sum, log_prob)
return samples, log_prob
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return self._reduce(jnp.sum, self._distribution.log_prob(value))
def entropy(self) -> Array:
"""See `Distribution.entropy`."""
return self._reduce(jnp.sum, self._distribution.entropy())
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return self._reduce(jnp.sum, self._distribution.log_cdf(value))
def mean(self) -> Array:
"""Calculates the mean."""
return self._distribution.mean()
def median(self) -> Array:
"""Calculates the median."""
return self._distribution.median()
def variance(self) -> Array:
"""Calculates the variance."""
return self._distribution.variance()
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return self._distribution.stddev()
def mode(self) -> Array:
"""Calculates the mode."""
return self._distribution.mode()
def _reduce(self, fn: Callable[..., Array], value: Array) -> Array:
return fn(value,
axis=[-i - 1 for i in range(0, self.reinterpreted_batch_ndims)])
def __getitem__(self, index) -> 'Independent':
"""See `Distribution.__getitem__`."""
index = distrax_distribution.to_batch_shape_index(self.batch_shape, index)
return Independent(
distribution=self.distribution[index],
reinterpreted_batch_ndims=self.reinterpreted_batch_ndims)
def _kl_divergence_independent_independent(
dist1: Union[Independent, tfd.Independent],
dist2: Union[Independent, tfd.Independent],
*args, **kwargs) -> Array:
"""Batched KL divergence `KL(dist1 || dist2)` for Independent distributions.
Args:
dist1: instance of an Independent distribution.
dist2: instance of an Independent distribution.
*args: Additional args.
**kwargs: Additional kwargs.
Returns:
Batchwise `KL(dist1 || dist2)`
"""
p = dist1.distribution
q = dist2.distribution
if dist1.event_shape == dist2.event_shape:
if p.event_shape == q.event_shape:
num_reduce_dims = len(dist1.event_shape) - len(p.event_shape)
reduce_dims = [-i - 1 for i in range(0, num_reduce_dims)]
kl_divergence = jnp.sum(p.kl_divergence(q, *args, **kwargs),
axis=reduce_dims)
else:
raise NotImplementedError(
f'KL between Independents whose inner distributions have different '
f'event shapes is not supported: obtained {p.event_shape} and '
f'{q.event_shape}.')
else:
raise ValueError(f'Event shapes {dist1.event_shape} and {dist2.event_shape}'
f' do not match.')
return kl_divergence
# Register the KL functions with TFP.
tfd.RegisterKL(Independent, Independent)(_kl_divergence_independent_independent)
tfd.RegisterKL(Independent, Independent.equiv_tfp_cls)(
_kl_divergence_independent_independent)
tfd.RegisterKL(Independent.equiv_tfp_cls, Independent)(
_kl_divergence_independent_independent)
| distrax-master | distrax/_src/distributions/independent.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `transformed.py`."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import block
from distrax._src.bijectors import lambda_bijector
from distrax._src.bijectors import masked_coupling
from distrax._src.bijectors import scalar_affine
from distrax._src.bijectors import sigmoid
from distrax._src.distributions import normal
from distrax._src.distributions import transformed
from distrax._src.utils import conversion
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
def _with_additional_parameters(params, all_named_parameters):
"""Convenience function for appending a cartesian product of parameters."""
for name, param in params:
for named_params in all_named_parameters:
yield (f'{named_params[0]}; {name}',) + named_params[1:] + (param,)
def _with_base_dists(*all_named_parameters):
"""Partial of _with_additional_parameters to specify distrax and tfp base."""
base_dists = (
('tfp_base', tfd.Normal),
('distrax_base', normal.Normal),
)
return _with_additional_parameters(base_dists, all_named_parameters)
class TransformedTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.seed = jax.random.PRNGKey(1234)
@parameterized.named_parameters(_with_base_dists(
('1d std normal', 0, 1),
('2d std normal', np.zeros(2), np.ones(2)),
('broadcasted loc', 0, np.ones(3)),
('broadcasted scale', np.ones(3), 1),
))
def test_event_shape(self, mu, sigma, base_dist):
base = base_dist(mu, sigma)
bijector = tfb.Scale(2)
dist = transformed.Transformed(base, bijector)
tfp_dist = tfd.TransformedDistribution(conversion.to_tfp(base), bijector)
assert dist.event_shape == tfp_dist.event_shape
@parameterized.named_parameters(
('tfp_normal, tfp_scale',
lambda: tfd.Normal(0, 1), lambda: tfb.Scale(2)),
('tfp_normal, tfp_shift',
lambda: tfd.Normal(0, 1), lambda: tfb.Shift(3.0)),
('tfp_normal, tfp_tanh',
lambda: tfd.Normal(0, 1), tfb.Tanh),
)
def test_dtype_is_consistent_with_tfp(self, dist_fn, bijector_fn):
base = dist_fn()
bijector = bijector_fn()
dist = transformed.Transformed(base, bijector)
tfp_dist = tfd.TransformedDistribution(conversion.to_tfp(base), bijector)
assert dist.dtype == tfp_dist.dtype
@chex.all_variants
@parameterized.named_parameters(
('tfp_normal, dx_lambda_scale', lambda: tfd.Normal(0, 1),
lambda: lambda x: x * 2, jnp.float32),
('tfp_normal, dx_lambda_shift', lambda: tfd.Normal(0, 1),
lambda: lambda x: x + 3.0, jnp.float32),
('tfp_normal, dx_lambda_tanh', lambda: tfd.Normal(0, 1),
lambda: jnp.tanh, jnp.float32),
)
def test_dtype_is_as_expected(self, dist_fn, bijector_fn, expected_dtype):
base = dist_fn()
bijector = bijector_fn()
dist = transformed.Transformed(base, bijector)
sample = self.variant(dist.sample)(seed=self.seed)
assert dist.dtype == sample.dtype
assert dist.dtype == expected_dtype
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('1d std normal, no shape', 0, 1, ()),
('1d std normal, int shape', 0, 1, 1),
('1d std normal, 1-tuple shape', 0, 1, (1,)),
('1d std normal, 2-tuple shape', 0, 1, (2, 2)),
('2d std normal, no shape', np.zeros(2), np.ones(2), ()),
('2d std normal, int shape', [0, 0], [1, 1], 1),
('2d std normal, 1-tuple shape', np.zeros(2), np.ones(2), (1,)),
('2d std normal, 2-tuple shape', [0, 0], [1, 1], (2, 2)),
('rank 2 std normal, 2-tuple shape', np.zeros(
(3, 2)), np.ones((3, 2)), (2, 2)),
('broadcasted loc', 0, np.ones(3), (2, 2)),
('broadcasted scale', np.ones(3), 1, ()),
))
def test_sample_shape(self, mu, sigma, sample_shape, base_dist):
base = base_dist(mu, sigma)
bijector = tfb.Scale(2)
dist = transformed.Transformed(base, bijector)
def sample_fn(seed, sample_shape):
return dist.sample(seed=seed, sample_shape=sample_shape)
samples = self.variant(sample_fn, ignore_argnums=(1,), static_argnums=1)(
self.seed, sample_shape)
tfp_dist = tfd.TransformedDistribution(conversion.to_tfp(base), bijector)
tfp_samples = tfp_dist.sample(sample_shape=sample_shape, seed=self.seed)
chex.assert_equal_shape([samples, tfp_samples])
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('1d dist, 1d value', 0, 1, 1.),
('1d dist, 2d value', 0., 1., np.array([1., 2.])),
('2d dist, 1d value', np.zeros(2), np.ones(2), 1.),
('2d broadcasted dist, 1d value', np.zeros(2), 1, 1.),
('2d dist, 2d value', np.zeros(2), np.ones(2), np.array([1., 2.])),
('1d dist, 1d value, edge case', 0, 1, 200.),
))
def test_log_prob(self, mu, sigma, value, base_dist):
base = base_dist(mu, sigma)
bijector = tfb.Scale(2)
dist = transformed.Transformed(base, bijector)
actual = self.variant(dist.log_prob)(value)
tfp_dist = tfd.TransformedDistribution(conversion.to_tfp(base), bijector)
expected = tfp_dist.log_prob(value)
np.testing.assert_array_equal(actual, expected)
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('1d dist, 1d value', 0, 1, 1.),
('1d dist, 2d value', 0., 1., np.array([1., 2.])),
('2d dist, 1d value', np.zeros(2), np.ones(2), 1.),
('2d broadcasted dist, 1d value', np.zeros(2), 1, 1.),
('2d dist, 2d value', np.zeros(2), np.ones(2), np.array([1., 2.])),
('1d dist, 1d value, edge case', 0, 1, 200.),
))
def test_prob(self, mu, sigma, value, base_dist):
base = base_dist(mu, sigma)
bijector = tfb.Scale(2)
dist = transformed.Transformed(base, bijector)
actual = self.variant(dist.prob)(value)
tfp_dist = tfd.TransformedDistribution(conversion.to_tfp(base), bijector)
expected = tfp_dist.prob(value)
np.testing.assert_array_equal(actual, expected)
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('1d std normal, no shape', 0, 1, ()),
('1d std normal, int shape', 0, 1, 1),
('1d std normal, 1-tuple shape', 0, 1, (1,)),
('1d std normal, 2-tuple shape', 0, 1, (2, 2)),
('2d std normal, no shape', np.zeros(2), np.ones(2), ()),
('2d std normal, int shape', [0, 0], [1, 1], 1),
('2d std normal, 1-tuple shape', np.zeros(2), np.ones(2), (1,)),
('2d std normal, 2-tuple shape', [0, 0], [1, 1], (2, 2)),
('rank 2 std normal, 2-tuple shape', np.zeros(
(3, 2)), np.ones((3, 2)), (2, 2)),
('broadcasted loc', 0, np.ones(3), (2, 2)),
('broadcasted scale', np.ones(3), 1, ()),
))
def test_sample_and_log_prob(self, mu, sigma, sample_shape, base_dist):
base = base_dist(mu, sigma)
bijector = tfb.Tanh()
dist = transformed.Transformed(base, bijector)
def sample_and_log_prob_fn(seed, sample_shape):
return dist.sample_and_log_prob(seed=seed, sample_shape=sample_shape)
samples, log_prob = self.variant(
sample_and_log_prob_fn, ignore_argnums=(1,), static_argnums=(1,))(
self.seed, sample_shape)
expected_samples = bijector.forward(
base.sample(seed=self.seed, sample_shape=sample_shape))
tfp_dist = tfd.TransformedDistribution(conversion.to_tfp(base), bijector)
tfp_samples = tfp_dist.sample(seed=self.seed, sample_shape=sample_shape)
tfp_log_prob = tfp_dist.log_prob(samples)
chex.assert_equal_shape([samples, tfp_samples])
np.testing.assert_allclose(log_prob, tfp_log_prob, rtol=1e-2)
np.testing.assert_allclose(samples, expected_samples, rtol=1e-2)
@parameterized.named_parameters(
('1d-batched bijector, unbatched sample', (2,), ()),
('1d-batched bijector, 1d-batched sample', (2,), (4,)),
('1d-batched bijector, 2d-batched sample', (2,), (4, 5)),
('2d-batched bijector, unbatched sample', (5, 2), ()),
('2d-batched bijector, 1d-batched sample', (5, 2), (4,)),
('2d-batched bijector, 2d-batched sample', (5, 2), (4, 5)),
('3d-batched bijector, unbatched sample', (7, 5, 2), ()),
('3d-batched bijector, 1d-batched sample', (7, 5, 2), (4,)),
('3d-batched bijector, 2d-batched sample', (7, 5, 2), (4, 5)),
)
def test_batched_bijector_shapes(self, batch_shape, sample_shape):
base = tfd.MultivariateNormalDiag(jnp.zeros(3), jnp.ones(3))
bijector = block.Block(tfb.Scale(jnp.ones(batch_shape + (3,))), 1)
dist = transformed.Transformed(base, bijector)
with self.subTest('batch_shape'):
chex.assert_equal(dist.batch_shape, batch_shape)
with self.subTest('sample.shape'):
sample = dist.sample(seed=self.seed, sample_shape=sample_shape)
chex.assert_equal(sample.shape, sample_shape + batch_shape + (3,))
with self.subTest('sample_and_log_prob sample.shape'):
sample, log_prob = dist.sample_and_log_prob(
seed=self.seed, sample_shape=sample_shape)
chex.assert_equal(sample.shape, sample_shape + batch_shape + (3,))
with self.subTest('sample_and_log_prob log_prob.shape'):
sample, log_prob = dist.sample_and_log_prob(
seed=self.seed, sample_shape=sample_shape)
chex.assert_equal(log_prob.shape, sample_shape + batch_shape)
with self.subTest('sample_and_log_prob log_prob value'):
sample, log_prob = dist.sample_and_log_prob(
seed=self.seed, sample_shape=sample_shape)
np.testing.assert_allclose(log_prob, dist.log_prob(sample))
@chex.all_variants
@parameterized.named_parameters(
('Scale-scalar-unbatched', tfb.Scale, 1, (), 3),
('Scale-scalar-batched', tfb.Scale, 1, (), (2, 3)),
('Scale-vector-unbatched', tfb.Scale, 1, 3, 3),
('Scale-vector-batched', tfb.Scale, 1, 3, (2, 3)),
('Scale-batched-unbatched', tfb.Scale, 1, (2, 3), 3),
('Scale-batched-batched', tfb.Scale, 1, (2, 3), (2, 3)),
('Matvec-vector-unbatched', tfb.ScaleMatvecDiag, 0, 3, 3),
('Matvec-vector-batched', tfb.ScaleMatvecDiag, 0, 3, (2, 3)),
('Matvec-batched-unbatched', tfb.ScaleMatvecDiag, 0, (2, 3), 3),
('Matvec-batched-batched', tfb.ScaleMatvecDiag, 0, (2, 3), (2, 3)),
)
def test_batched_bijector_against_tfp(
self, bijector_fn, block_ndims, bijector_shape, params_shape):
base = tfd.MultivariateNormalDiag(
jnp.zeros(params_shape), jnp.ones(params_shape))
tfp_bijector = bijector_fn(jnp.ones(bijector_shape))
dx_bijector = block.Block(tfp_bijector, block_ndims)
dx_dist = transformed.Transformed(base, dx_bijector)
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
with self.subTest('event_shape property matches TFP'):
np.testing.assert_equal(dx_dist.event_shape, tfp_dist.event_shape)
with self.subTest('sample shape matches TFP'):
dx_sample = self.variant(dx_dist.sample)(seed=self.seed)
tfp_sample = self.variant(tfp_dist.sample)(seed=self.seed)
chex.assert_equal_shape([dx_sample, tfp_sample])
with self.subTest('log_prob(dx_sample) matches TFP'):
dx_logp_dx = self.variant(dx_dist.log_prob)(dx_sample)
tfp_logp_dx = self.variant(tfp_dist.log_prob)(dx_sample)
np.testing.assert_allclose(dx_logp_dx, tfp_logp_dx, rtol=1e-2)
with self.subTest('log_prob(tfp_sample) matches TFP'):
dx_logp_tfp = self.variant(dx_dist.log_prob)(tfp_sample)
tfp_logp_tfp = self.variant(tfp_dist.log_prob)(tfp_sample)
np.testing.assert_allclose(dx_logp_tfp, tfp_logp_tfp, rtol=1e-2)
with self.subTest('sample/lp shape is self-consistent'):
second_sample, log_prob = self.variant(dx_dist.sample_and_log_prob)(
seed=self.seed)
chex.assert_equal_shape([dx_sample, second_sample])
chex.assert_equal_shape([dx_logp_dx, log_prob])
# These should all fail because the bijector's event_ndims is incompatible
# with the base distribution's event_shape.
@parameterized.named_parameters(
('scalar', 0),
('matrix', 2),
('3-tensor', 3),
)
def test_raises_on_incorrect_shape(self, block_dims):
base = tfd.MultivariateNormalDiag(jnp.zeros((2, 3)), jnp.ones((2, 3)))
scalar_bijector = tfb.Scale(jnp.ones((1, 2, 3)))
block_bijector = block.Block(scalar_bijector, block_dims)
with self.assertRaises(ValueError):
transformed.Transformed(base, block_bijector)
@chex.all_variants
def test_bijector_that_assumes_batch_dimensions(self):
# Create a Haiku conditioner that assumes a single batch dimension.
def forward(x):
network = hk.Sequential([hk.Flatten(preserve_dims=1), hk.Linear(3)])
return network(x)
init, apply = hk.transform(forward)
params = init(self.seed, jnp.ones((2, 3)))
conditioner = functools.partial(apply, params, self.seed)
bijector = masked_coupling.MaskedCoupling(
jnp.ones(3) > 0, conditioner, tfb.Scale)
base = tfd.MultivariateNormalDiag(jnp.zeros((2, 3)), jnp.ones((2, 3)))
dist = transformed.Transformed(base, bijector)
# Exercise the trace-based functions
assert dist.batch_shape == (2,)
assert dist.event_shape == (3,)
assert dist.dtype == jnp.float32
sample = self.variant(dist.sample)(seed=self.seed)
assert sample.dtype == dist.dtype
self.variant(dist.log_prob)(sample)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(_with_base_dists(
('entropy', 'entropy', 0., 1.),
('mean', 'mean', 0, 1),
('mean from list params', 'mean', [-1, 1], [1, 2]),
('mode', 'mode', 0, 1),
))
def test_method(self, function_string, mu, sigma, base_dist):
base = base_dist(mu, sigma)
bijector = tfb.Scale(2)
dist = transformed.Transformed(base, bijector)
tfp_dist = tfd.TransformedDistribution(conversion.to_tfp(base), bijector)
np.testing.assert_allclose(
self.variant(getattr(dist, function_string))(),
getattr(tfp_dist, function_string)())
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('int16', np.array([0, 0], dtype=np.int16)),
('int32', np.array([0, 0], dtype=np.int32)),
('int64', np.array([0, 0], dtype=np.int64)),
))
def test_integer_inputs(self, inputs, base_dist):
base = base_dist(jnp.zeros_like(inputs, dtype=jnp.float32),
jnp.ones_like(inputs, dtype=jnp.float32))
bijector = scalar_affine.ScalarAffine(shift=0.0)
dist = transformed.Transformed(base, bijector)
log_prob = self.variant(dist.log_prob)(inputs)
standard_normal_log_prob_of_zero = -0.9189385
expected_log_prob = jnp.full_like(
inputs, standard_normal_log_prob_of_zero, dtype=jnp.float32)
np.testing.assert_array_equal(log_prob, expected_log_prob)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'tfp_to_distrax'))
def test_kl_divergence(self, mode_string):
base_dist1 = tfd.Normal([0.1, 0.5, 0.9], [0.1, 1.1, 2.5])
base_dist2 = tfd.Normal(-0.1, 1.5)
bij_tfp1 = tfb.Identity()
bij_tfp2 = tfb.Identity()
bij_distrax1 = bij_tfp1
bij_distrax2 = lambda_bijector.Lambda(lambda x: x)
tfp_dist1 = tfd.TransformedDistribution(base_dist1, bij_tfp1)
tfp_dist2 = tfd.TransformedDistribution(base_dist2, bij_tfp2)
distrax_dist1 = transformed.Transformed(base_dist1, bij_distrax1)
distrax_dist2 = transformed.Transformed(base_dist2, bij_distrax2)
expected_result_fwd = base_dist1.kl_divergence(base_dist2)
expected_result_inv = base_dist2.kl_divergence(base_dist1)
distrax_fn1 = self.variant(distrax_dist1.kl_divergence)
distrax_fn2 = self.variant(distrax_dist2.kl_divergence)
if mode_string == 'distrax_to_distrax':
result_fwd = distrax_fn1(distrax_dist2)
result_inv = distrax_fn2(distrax_dist1)
elif mode_string == 'distrax_to_tfp':
result_fwd = distrax_fn1(tfp_dist2)
result_inv = distrax_fn2(tfp_dist1)
elif mode_string == 'tfp_to_distrax':
result_fwd = tfp_dist1.kl_divergence(distrax_dist2)
result_inv = tfp_dist2.kl_divergence(distrax_dist1)
np.testing.assert_allclose(result_fwd, expected_result_fwd, rtol=1e-2)
np.testing.assert_allclose(result_inv, expected_result_inv, rtol=1e-2)
@chex.all_variants(with_pmap=False)
def test_kl_divergence_on_same_instance_of_distrax_bijector(self):
base_dist1 = tfd.Normal([0.1, 0.5, 0.9], [0.1, 1.1, 2.5])
base_dist2 = tfd.Normal(-0.1, 1.5)
bij_distrax = sigmoid.Sigmoid()
distrax_dist1 = transformed.Transformed(base_dist1, bij_distrax)
distrax_dist2 = transformed.Transformed(base_dist2, bij_distrax)
expected_result_fwd = base_dist1.kl_divergence(base_dist2)
expected_result_inv = base_dist2.kl_divergence(base_dist1)
result_fwd = self.variant(distrax_dist1.kl_divergence)(distrax_dist2)
result_inv = self.variant(distrax_dist2.kl_divergence)(distrax_dist1)
np.testing.assert_allclose(result_fwd, expected_result_fwd, rtol=1e-2)
np.testing.assert_allclose(result_inv, expected_result_inv, rtol=1e-2)
def test_kl_divergence_raises_on_event_shape(self):
base_dist1 = tfd.MultivariateNormalDiag([0.1, 0.5, 0.9], [0.1, 1.1, 2.5])
base_dist2 = tfd.Normal(-0.1, 1.5)
bij1 = block.Block(lambda_bijector.Lambda(lambda x: x), ndims=1)
bij2 = lambda_bijector.Lambda(lambda x: x)
distrax_dist1 = transformed.Transformed(base_dist1, bij1)
distrax_dist2 = transformed.Transformed(base_dist2, bij2)
with self.assertRaises(ValueError):
distrax_dist1.kl_divergence(distrax_dist2)
def test_kl_divergence_raises_on_different_bijectors(self):
base_dist1 = tfd.Normal([0.1, 0.5, 0.9], [0.1, 1.1, 2.5])
base_dist2 = tfd.Normal(-0.1, 1.5)
bij1 = lambda_bijector.Lambda(lambda x: x)
bij2 = sigmoid.Sigmoid()
distrax_dist1 = transformed.Transformed(base_dist1, bij1)
distrax_dist2 = transformed.Transformed(base_dist2, bij2)
with self.assertRaises(NotImplementedError):
distrax_dist1.kl_divergence(distrax_dist2)
def test_jittable(self):
@jax.jit
def f(x, d):
return d.log_prob(x)
base = normal.Normal(0, 1)
bijector = scalar_affine.ScalarAffine(0, 1)
dist = transformed.Transformed(base, bijector)
x = np.zeros(())
f(x, dist)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/transformed_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `uniform.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import uniform
from distrax._src.utils import equivalence
import jax
import jax.numpy as jnp
import numpy as np
class UniformTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(uniform.Uniform)
@parameterized.named_parameters(
('1d', (0., 1.)),
('2d', (np.zeros(2), np.ones(2))),
('rank 2', (np.zeros((3, 2)), np.ones((3, 2)))),
('broadcasted low', (0., np.ones(3))),
('broadcasted high', (np.ones(3), 1.)),
)
def test_event_shape(self, distr_params):
super()._test_event_shape(distr_params, dict())
@chex.all_variants
@parameterized.named_parameters(
('1d, no shape', (0., 1.), ()),
('1d, int shape', (0., 1.), 1),
('1d, 1-tuple shape', (0., 1.), (1,)),
('1d, 2-tuple shape', (0., 1.), (2, 2)),
('2d, no shape', (np.zeros(2), np.ones(2)), ()),
('2d, int shape', (np.zeros(2), np.ones(2)), 1),
('2d, 1-tuple shape', (np.zeros(2), np.ones(2)), (1,)),
('2d, 2-tuple shape', (np.zeros(2), np.ones(2)), (2, 2)),
('rank 2, 2-tuple shape', (np.zeros((3, 2)), np.ones((3, 2))), (2, 2)),
('broadcasted low', (0., np.ones(3)), (2, 2)),
('broadcasted high', (np.ones(3), 1.), ()),
)
def test_sample_shape(self, distr_params, sample_shape):
super()._test_sample_shape(distr_params, dict(), sample_shape)
@chex.all_variants
@jax.numpy_rank_promotion('raise')
@parameterized.named_parameters(
('1d, no shape', (0., 1.), ()),
('1d, int shape', (0., 1.), 1),
('1d, 1-tuple shape', (0., 1.), (1,)),
('1d, 2-tuple shape', (0., 1.), (2, 2)),
('2d, no shape', (np.zeros(2), np.ones(2)), ()),
('2d, int shape', (np.zeros(2), np.ones(2)), 1),
('2d, 1-tuple shape', (np.zeros(2), np.ones(2)), (1,)),
('2d, 2-tuple shape', (np.zeros(2), np.ones(2)), (2, 2)),
('rank 2, 2-tuple shape', (np.zeros((3, 2)), np.ones((3, 2))), (2, 2)),
('broadcasted low', (0., np.ones(3)), (2, 2)),
('broadcasted high', (np.ones(3), 1.), ()),
)
def test_sample_and_log_prob(self, distr_params, sample_shape):
super()._test_sample_and_log_prob(
dist_args=distr_params,
dist_kwargs=dict(),
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=1e-3))
@chex.all_variants
@parameterized.named_parameters(
('log_prob', 'log_prob'),
('prob', 'prob'),
('cdf', 'cdf'),
('survival_function', 'survival_function'),
('log_survival_function', 'log_survival_function')
)
def test_method_with_inputs(self, function_string):
inputs = 10. * np.random.normal(size=(100,))
super()._test_attribute(
function_string, dist_args=(-1, 1), call_args=(inputs,))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('entropy', (0., 1.), 'entropy'),
('mean', (0, 1), 'mean'),
('variance', (0, 1), 'variance'),
('variance from 1d params', (np.ones(2), np.ones(2)), 'mean'),
('stddev', (0, 1), 'stddev'),
('stddev from rank 2 params', (np.ones((2, 3)), np.ones(
(2, 3))), 'stddev'),
)
def test_method(self, distr_params, function_string):
super()._test_attribute(function_string, distr_params)
@parameterized.named_parameters(
('low', 'low'),
('high', 'high'),
)
def test_attribute(self, attribute_string):
super()._test_attribute(attribute_string)
@chex.all_variants(with_pmap=False)
def test_median(self):
np.testing.assert_allclose(
self.variant(self.distrax_cls(-1, 1).median)(), 0)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax'))
def test_with_two_distributions(self, function_string, mode_string):
super()._test_with_two_distributions(
attribute_string=function_string,
mode_string=mode_string,
dist1_kwargs={
'low': -0.5 + np.random.rand(4, 1, 2),
'high': np.array([[1.8, 1.5], [1.1, 1.2], [1.4, 1.1]]),
},
dist2_kwargs={
'low': -1.0 + np.random.rand(3, 2),
'high': 1.5 + np.random.rand(4, 1, 2),
},
assertion_fn=self.assertion_fn(rtol=1e-3))
def test_jittable(self):
super()._test_jittable((0.0, 1.0))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
('ellipsis', (Ellipsis, -1)),
)
def test_slice(self, slice_):
low = jnp.zeros((3, 4, 5))
high = jnp.ones((3, 4, 5))
dist = self.distrax_cls(low=low, high=high)
self.assertion_fn(rtol=1e-3)(dist[slice_].low, low[slice_])
self.assertion_fn(rtol=1e-3)(dist[slice_].high, high[slice_])
def test_slice_different_parameterization(self):
low = jnp.zeros((3, 4, 5))
high = 1.
dist = self.distrax_cls(low=low, high=high)
self.assertion_fn(rtol=1e-3)(dist[..., -1].low, low[..., -1])
self.assertEqual(dist[..., -1].high.shape, (3, 4))
self.assertion_fn(rtol=1e-3)(dist[..., -1].high, high) # Not slicing high.
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/uniform_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Clipped distributions."""
from typing import Tuple
import chex
from distrax._src.distributions import distribution as base_distribution
from distrax._src.distributions import logistic
from distrax._src.distributions import normal
from distrax._src.utils import conversion
import jax.numpy as jnp
Array = chex.Array
PRNGKey = chex.PRNGKey
Numeric = chex.Numeric
DistributionLike = base_distribution.DistributionLike
EventT = base_distribution.EventT
class Clipped(base_distribution.Distribution):
"""A clipped distribution."""
def __init__(
self,
distribution: DistributionLike,
minimum: Numeric,
maximum: Numeric):
"""Wraps a distribution clipping samples out of `[minimum, maximum]`.
The samples outside of `[minimum, maximum]` are clipped to the boundary.
The log probability of samples outside of this range is `-inf`.
Args:
distribution: a Distrax / TFP distribution to be wrapped.
minimum: can be a `scalar` or `vector`; if a vector, must have fewer dims
than `distribution.batch_shape` and must be broadcastable to it.
maximum: can be a `scalar` or `vector`; if a vector, must have fewer dims
than `distribution.batch_shape` and must be broadcastable to it.
"""
super().__init__()
if distribution.event_shape:
raise ValueError('The wrapped distribution must have event shape ().')
if (jnp.array(minimum).ndim > len(distribution.batch_shape) or
jnp.array(maximum).ndim > len(distribution.batch_shape)):
raise ValueError(
'The minimum and maximum clipping boundaries must be scalars or'
'vectors with fewer dimensions as the batch_shape of distribution:'
'i.e. we can broadcast min/max to batch_shape but not viceversa.')
self._distribution = conversion.as_distribution(distribution)
self._minimum = jnp.broadcast_to(minimum, self._distribution.batch_shape)
self._maximum = jnp.broadcast_to(maximum, self._distribution.batch_shape)
self._log_prob_minimum = self._distribution.log_cdf(minimum)
self._log_prob_maximum = self._distribution.log_survival_function(maximum)
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
raw_sample = self._distribution.sample(seed=key, sample_shape=[n])
return jnp.clip(raw_sample, self._minimum, self._maximum)
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
samples = self._sample_n(key, n)
return samples, self.log_prob(samples)
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
# The log_prob can be used to compute expectations by explicitly integrating
# over the discrete and continuous elements.
# Info about mixed distributions:
# http://www.randomservices.org/random/dist/Mixed.html
log_prob = jnp.where(
jnp.equal(value, self._minimum),
self._log_prob_minimum,
jnp.where(jnp.equal(value, self._maximum),
self._log_prob_maximum,
self._distribution.log_prob(value)))
# Giving -inf log_prob outside the boundaries.
return jnp.where(
jnp.logical_or(value < self._minimum, value > self._maximum),
-jnp.inf,
log_prob)
@property
def minimum(self) -> Array:
return self._minimum
@property
def maximum(self) -> Array:
return self._maximum
@property
def distribution(self) -> DistributionLike:
return self._distribution
@property
def event_shape(self) -> Tuple[int, ...]:
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
return self._distribution.batch_shape
def __getitem__(self, index) -> 'Clipped':
"""See `Distribution.__getitem__`."""
index = base_distribution.to_batch_shape_index(self.batch_shape, index)
return Clipped(
distribution=self.distribution[index],
minimum=self.minimum[index],
maximum=self.maximum[index])
class ClippedNormal(Clipped):
"""A clipped normal distribution."""
def __init__(
self, loc: Numeric, scale: Numeric, minimum: Numeric, maximum: Numeric):
distribution = normal.Normal(loc=loc, scale=scale)
super().__init__(distribution, minimum=minimum, maximum=maximum)
class ClippedLogistic(Clipped):
"""A clipped logistic distribution."""
def __init__(
self, loc: Numeric, scale: Numeric, minimum: Numeric, maximum: Numeric):
distribution = logistic.Logistic(loc=loc, scale=scale)
super().__init__(distribution, minimum=minimum, maximum=maximum)
| distrax-master | distrax/_src/distributions/clipped.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LogStddevNormal distribution."""
import math
from typing import Optional
import chex
from distrax._src.distributions import distribution
from distrax._src.distributions import normal
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
class LogStddevNormal(normal.Normal):
"""Normal distribution with `log_scale` parameter.
The `LogStddevNormal` has three parameters: `loc`, `log_scale`, and
(optionally) `max_scale`. The distribution is a univariate normal
distribution with mean equal to `loc` and scale parameter (i.e., stddev) equal
to `exp(log_scale)` if `max_scale` is None. If `max_scale` is not None, a soft
thresholding is applied to obtain the scale parameter of the normal, so that
its log is given by `log(max_scale) - softplus(log(max_scale) - log_scale)`.
"""
def __init__(self,
loc: Numeric,
log_scale: Numeric,
max_scale: Optional[float] = None):
"""Initializes a LogStddevNormal distribution.
Args:
loc: Mean of the distribution.
log_scale: Log of the distribution's scale (before the soft thresholding
applied when `max_scale` is not None).
max_scale: Maximum value of the scale that this distribution will saturate
at. This parameter can be useful for numerical stability. It is not a
hard maximum; rather, we compute `log(scale)` as per the formula:
`log(max_scale) - softplus(log(max_scale) - log_scale)`.
"""
self._max_scale = max_scale
if max_scale is not None:
max_log_scale = math.log(max_scale)
self._log_scale = max_log_scale - jax.nn.softplus(
max_log_scale - conversion.as_float_array(log_scale))
else:
self._log_scale = conversion.as_float_array(log_scale)
scale = jnp.exp(self._log_scale)
super().__init__(loc, scale)
@property
def log_scale(self) -> Array:
"""The log standard deviation (after thresholding, if applicable)."""
return jnp.broadcast_to(self._log_scale, self.batch_shape)
def __getitem__(self, index) -> 'LogStddevNormal':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return LogStddevNormal(
loc=self.loc[index],
log_scale=self.log_scale[index],
max_scale=self._max_scale)
def _kl_logstddevnormal_logstddevnormal(
dist1: LogStddevNormal, dist2: LogStddevNormal,
*unused_args, **unused_kwargs) -> Array:
"""Calculates the batched KL divergence between two LogStddevNormal's.
Args:
dist1: A LogStddevNormal distribution.
dist2: A LogStddevNormal distribution.
Returns:
Batchwise KL(dist1 || dist2).
"""
# KL[N(u_a, s_a^2) || N(u_b, s_b^2)] between two Gaussians:
# (s_a^2 + (u_a - u_b)^2)/(2*s_b^2) + log(s_b) - log(s_a) - 1/2.
variance1 = jnp.square(dist1.scale)
variance2 = jnp.square(dist2.scale)
return ((variance1 + jnp.square(dist1.loc - dist2.loc)) / (2.0 * variance2) +
dist2.log_scale - dist1.log_scale - 0.5)
# Register the KL function.
tfd.RegisterKL(LogStddevNormal, LogStddevNormal)(
_kl_logstddevnormal_logstddevnormal)
| distrax-master | distrax/_src/distributions/log_stddev_normal.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multinomial distribution."""
import functools
import operator
from typing import Any, Tuple, Optional, Union
import chex
from distrax._src.distributions import distribution
from distrax._src.utils import conversion
from distrax._src.utils import math
import jax
from jax import lax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
PRNGKey = chex.PRNGKey
EventT = distribution.EventT
class Multinomial(distribution.Distribution):
"""Multinomial distribution with parameter `probs`."""
equiv_tfp_cls = tfd.Multinomial
def __init__(self,
total_count: Numeric,
logits: Optional[Array] = None,
probs: Optional[Array] = None,
dtype: Union[jnp.dtype, type[Any]] = int):
"""Initializes a Multinomial distribution.
Args:
total_count: The number of trials per sample.
logits: Logit transform of the probability of each category. Only one
of `logits` or `probs` can be specified.
probs: Probability of each category. Only one of `logits` or `probs` can
be specified.
dtype: The type of event samples.
"""
super().__init__()
logits = None if logits is None else conversion.as_float_array(logits)
probs = None if probs is None else conversion.as_float_array(probs)
if (logits is None) == (probs is None):
raise ValueError(
f'One and exactly one of `logits` and `probs` should be `None`, '
f'but `logits` is {logits} and `probs` is {probs}.')
if logits is not None and (not logits.shape or logits.shape[-1] < 2):
raise ValueError(
f'The last dimension of `logits` must be greater than 1, but '
f'`logits.shape = {logits.shape}`.')
if probs is not None and (not probs.shape or probs.shape[-1] < 2):
raise ValueError(
f'The last dimension of `probs` must be greater than 1, but '
f'`probs.shape = {probs.shape}`.')
if not (jnp.issubdtype(dtype, jnp.integer) or
jnp.issubdtype(dtype, jnp.floating)):
raise ValueError(
f'The dtype of `{self.name}` must be integer or floating-point, '
f'instead got `{dtype}`.')
self._total_count = jnp.asarray(total_count, dtype=dtype)
self._probs = None if probs is None else math.normalize(probs=probs)
self._logits = None if logits is None else math.normalize(logits=logits)
self._dtype = dtype
if self._probs is not None:
probs_batch_shape = self._probs.shape[:-1]
else:
assert self._logits is not None
probs_batch_shape = self._logits.shape[:-1]
self._batch_shape = lax.broadcast_shapes(
probs_batch_shape, self._total_count.shape)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
if self._logits is not None:
return self._logits.shape[-1:]
else:
return self._probs.shape[-1:]
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return self._batch_shape
@property
def total_count(self) -> Array:
"""The number of trials per sample."""
return jnp.broadcast_to(self._total_count, self.batch_shape)
@property
def num_trials(self) -> Array:
"""The number of trials for each event."""
return self.total_count
@property
def logits(self) -> Array:
"""The logits for each event."""
if self._logits is not None:
return jnp.broadcast_to(self._logits, self.batch_shape + self.event_shape)
return jnp.broadcast_to(jnp.log(self._probs),
self.batch_shape + self.event_shape)
@property
def probs(self) -> Array:
"""The probabilities for each event."""
if self._probs is not None:
return jnp.broadcast_to(self._probs, self.batch_shape + self.event_shape)
return jnp.broadcast_to(jax.nn.softmax(self._logits, axis=-1),
self.batch_shape + self.event_shape)
@property
def log_of_probs(self) -> Array:
"""The log probabilities for each event."""
if self._logits is not None:
# jax.nn.log_softmax was already applied in init to logits.
return jnp.broadcast_to(self._logits,
self.batch_shape + self.event_shape)
return jnp.broadcast_to(jnp.log(self._probs),
self.batch_shape + self.event_shape)
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
total_permutations = lax.lgamma(self._total_count + 1.)
counts_factorial = lax.lgamma(value + 1.)
redundant_permutations = jnp.sum(counts_factorial, axis=-1)
log_combinations = total_permutations - redundant_permutations
return log_combinations + jnp.sum(
math.multiply_no_nan(self.log_of_probs, value), axis=-1)
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
num_keys = functools.reduce(operator.mul, self.batch_shape, 1)
keys = jax.random.split(key, num=num_keys)
total_count = jnp.reshape(self.total_count, (-1,))
logits = jnp.reshape(self.logits, (-1,) + self.event_shape)
sample_fn = jax.vmap(
self._sample_n_scalar, in_axes=(0, 0, None, 0, None), out_axes=1)
samples = sample_fn(keys, total_count, n, logits, self._dtype) # [n, B, K]
return samples.reshape((n,) + self.batch_shape + self.event_shape)
@staticmethod
def _sample_n_scalar(
key: PRNGKey, total_count: Union[int, Array], n: int, logits: Array,
dtype: jnp.dtype) -> Array:
"""Sample method for a Multinomial with integer `total_count`."""
def cond_func(args):
i, _, _ = args
return jnp.less(i, total_count)
def body_func(args):
i, key_i, sample_aggregator = args
key_i, current_key = jax.random.split(key_i)
sample_i = jax.random.categorical(current_key, logits=logits, shape=(n,))
one_hot_i = jax.nn.one_hot(sample_i, logits.shape[0]).astype(dtype)
return i + 1, key_i, sample_aggregator + one_hot_i
init_aggregator = jnp.zeros((n, logits.shape[0]), dtype=dtype)
return lax.while_loop(cond_func, body_func, (0, key, init_aggregator))[2]
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
# The method `_entropy_scalar` does not work when `self.total_count` is an
# array (instead of a scalar) or when we jit the function, so we default to
# computing the entropy using an alternative method that uses a lax while
# loop and does not create intermediate arrays whose shape depends on
# `self.total_count`.
entropy_fn = jnp.vectorize(
self._entropy_scalar_with_lax, signature='(),(k),(k)->()')
return entropy_fn(self.total_count, self.probs, self.log_of_probs)
@staticmethod
def _entropy_scalar(
total_count: int, probs: Array, log_of_probs: Array
) -> Union[jnp.float32, jnp.float64]:
"""Calculates the entropy for a Multinomial with integer `total_count`."""
# Constant factors in the entropy.
xi = jnp.arange(total_count + 1, dtype=probs.dtype)
log_xi_factorial = lax.lgamma(xi + 1)
log_n_minus_xi_factorial = jnp.flip(log_xi_factorial, axis=-1)
log_n_factorial = log_xi_factorial[..., -1]
log_comb_n_xi = (
log_n_factorial[..., None] - log_xi_factorial
- log_n_minus_xi_factorial)
comb_n_xi = jnp.round(jnp.exp(log_comb_n_xi))
chex.assert_shape(comb_n_xi, (total_count + 1,))
likelihood1 = math.power_no_nan(probs[..., None], xi)
likelihood2 = math.power_no_nan(1. - probs[..., None], total_count - xi)
chex.assert_shape(likelihood1, (probs.shape[-1], total_count + 1,))
chex.assert_shape(likelihood2, (probs.shape[-1], total_count + 1,))
likelihood = jnp.sum(likelihood1 * likelihood2, axis=-2)
chex.assert_shape(likelihood, (total_count + 1,))
comb_term = jnp.sum(comb_n_xi * log_xi_factorial * likelihood, axis=-1)
chex.assert_shape(comb_term, ())
# Probs factors in the entropy.
n_probs_factor = jnp.sum(
total_count * math.multiply_no_nan(log_of_probs, probs), axis=-1)
return - log_n_factorial - n_probs_factor + comb_term
@staticmethod
def _entropy_scalar_with_lax(
total_count: int, probs: Array, log_of_probs: Array
) -> Union[jnp.float32, jnp.float64]:
"""Like `_entropy_scalar`, but uses a lax while loop."""
dtype = probs.dtype
log_n_factorial = lax.lgamma(jnp.asarray(total_count + 1, dtype=dtype))
def cond_func(args):
xi, _ = args
return jnp.less_equal(xi, total_count)
def body_func(args):
xi, accumulated_sum = args
xi_float = jnp.asarray(xi, dtype=dtype)
log_xi_factorial = lax.lgamma(xi_float + 1.)
log_comb_n_xi = (log_n_factorial - log_xi_factorial
- lax.lgamma(total_count - xi_float + 1.))
comb_n_xi = jnp.round(jnp.exp(log_comb_n_xi))
likelihood1 = math.power_no_nan(probs, xi)
likelihood2 = math.power_no_nan(1. - probs, total_count - xi)
likelihood = likelihood1 * likelihood2
comb_term = comb_n_xi * log_xi_factorial * likelihood # [K]
chex.assert_shape(comb_term, (probs.shape[-1],))
return xi + 1, accumulated_sum + comb_term
comb_term = jnp.sum(
lax.while_loop(cond_func, body_func, (0, jnp.zeros_like(probs)))[1],
axis=-1)
n_probs_factor = jnp.sum(
total_count * math.multiply_no_nan(log_of_probs, probs), axis=-1)
return - log_n_factorial - n_probs_factor + comb_term
def mean(self) -> Array:
"""Calculates the mean."""
return self._total_count[..., None] * self.probs
def variance(self) -> Array:
"""Calculates the variance."""
probs = self.probs
return self._total_count[..., None] * probs * (1. - probs)
def covariance(self) -> Array:
"""Calculates the covariance."""
probs = self.probs
cov_matrix = -self._total_count[..., None, None] * (
probs[..., None, :] * probs[..., :, None])
chex.assert_shape(cov_matrix, probs.shape + self.event_shape)
# Missing diagonal term in the covariance matrix.
cov_matrix += jnp.vectorize(
jnp.diag, signature='(k)->(k,k)')(
self._total_count[..., None] * probs)
return cov_matrix
def __getitem__(self, index) -> 'Multinomial':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
total_count = self.total_count[index]
if self._logits is not None:
return Multinomial(
total_count=total_count, logits=self.logits[index], dtype=self._dtype)
return Multinomial(
total_count=total_count, probs=self.probs[index], dtype=self._dtype)
| distrax-master | distrax/_src/distributions/multinomial.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `laplace.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import laplace
from distrax._src.utils import equivalence
import jax.numpy as jnp
import numpy as np
class LaplaceTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(laplace.Laplace)
@parameterized.named_parameters(
('1d std laplace', (0, 1)),
('2d std laplace', (np.zeros(2), np.ones(2))),
('rank 2 std normal', (np.zeros((3, 2)), np.ones((3, 2)))),
('broadcasted loc', (0, np.ones(3))),
('broadcasted scale', (np.ones(3), 1)),
)
def test_event_shape(self, distr_params):
super()._test_event_shape(distr_params, dict())
@chex.all_variants
@parameterized.named_parameters(
('1d std laplace, no shape', (0, 1), ()),
('1d std laplace, int shape', (0, 1), 1),
('1d std laplace, 1-tuple shape', (0, 1), (1,)),
('1d std laplace, 2-tuple shape', (0, 1), (2, 2)),
('2d std laplace, no shape', (np.zeros(2), np.ones(2)), ()),
('2d std laplace, int shape', ([0, 0], [1, 1]), 1),
('2d std laplace, 1-tuple shape', (np.zeros(2), np.ones(2)), (1,)),
('2d std laplace, 2-tuple shape', ([0, 0], [1, 1]), (2, 2)),
('rank 2 std normal, 2-tuple shape', (np.zeros((3, 2)), np.ones(
(3, 2))), (2, 2)),
('broadcasted loc', (0, np.ones(3)), (2, 2)),
('broadcasted scale', (np.ones(3), 1), ()),
)
def test_sample_shape(self, distr_params, sample_shape):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_sample_shape(distr_params, dict(), sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist = self.distrax_cls(
loc=jnp.zeros((), dtype), scale=jnp.ones((), dtype))
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
@chex.all_variants
@parameterized.named_parameters(
('1d std laplace, no shape', (0, 1), ()),
('1d std laplace, int shape', (0, 1), 1),
('1d std laplace, 1-tuple shape', (0, 1), (1,)),
('1d std laplace, 2-tuple shape', (0, 1), (2, 2)),
('2d std laplace, no shape', (np.zeros(2), np.ones(2)), ()),
('2d std laplace, int shape', ([0, 0], [1, 1]), 1),
('2d std laplace, 1-tuple shape', (np.zeros(2), np.ones(2)), (1,)),
('2d std laplace, 2-tuple shape', ([0, 0], [1, 1]), (2, 2)),
('rank 2 std normal, 2-tuple shape', (np.zeros((3, 2)), np.ones(
(3, 2))), (2, 2)),
('broadcasted loc', (0, np.ones(3)), (2, 2)),
('broadcasted scale', (np.ones(3), 1), ()),
)
def test_sample_and_log_prob(self, distr_params, sample_shape):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_sample_and_log_prob(
dist_args=distr_params,
dist_kwargs=dict(),
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=2e-2))
@chex.all_variants
@parameterized.named_parameters(
('1d dist, 1d value', (0, 1), 1),
('1d dist, 2d value', (0.5, 0.1), np.array([1, 2])),
('1d dist, 2d value as list', (0.5, 0.1), [1, 2]),
('2d dist, 1d value', (0.5 + np.zeros(2), 0.3 * np.ones(2)), 1),
('2d broadcasted dist, 1d value', (np.zeros(2), 0.8), 1),
('2d dist, 2d value', ([0.1, -0.5], 0.9 * np.ones(2)), np.array([1, 2])),
('1d dist, 1d value, edge case', (0, 1), 200),
)
def test_log_prob(self, distr_params, value):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
value = np.asarray(value, dtype=np.float32)
super()._test_attribute(
attribute_string='log_prob',
dist_args=distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=2e-2))
@chex.all_variants
@parameterized.named_parameters(
('1d dist, 1d value', (0, 1), 1),
('1d dist, 2d value', (0.5, 0.1), np.array([1, 2])),
('1d dist, 2d value as list', (0.5, 0.1), [1, 2]),
('2d dist, 1d value', (0.5 + np.zeros(2), 0.3 * np.ones(2)), 1),
('2d broadcasted dist, 1d value', (np.zeros(2), 0.8), 1),
('2d dist, 2d value', ([0.1, -0.5], 0.9 * np.ones(2)), np.array([1, 2])),
('1d dist, 1d value, edge case', (0, 1), 200),
)
def test_prob(self, distr_params, value):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
value = np.asarray(value, dtype=np.float32)
super()._test_attribute(
attribute_string='prob',
dist_args=distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=2e-2))
@chex.all_variants
@parameterized.named_parameters(
('1d dist, 1d value', (0, 1), 1),
('1d dist, 2d value', (0.5, 0.1), np.array([1, 2])),
('1d dist, 2d value as list', (0.5, 0.1), [1, 2]),
('2d dist, 1d value', (0.5 + np.zeros(2), 0.3 * np.ones(2)), 1),
('2d broadcasted dist, 1d value', (np.zeros(2), 0.8), 1),
('2d dist, 2d value', ([0.1, -0.5], 0.9 * np.ones(2)), np.array([1, 2])),
('1d dist, 1d value, edge case', (0, 1), 200),
)
def test_cdf(self, distr_params, value):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
value = np.asarray(value, dtype=np.float32)
super()._test_attribute(
attribute_string='cdf',
dist_args=distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=2e-2))
@chex.all_variants
@parameterized.named_parameters(
('1d dist, 1d value', (0, 1), 1),
('1d dist, 2d value', (0.5, 0.1), np.array([1, 2])),
('1d dist, 2d value as list', (0.5, 0.1), [1, 2]),
('2d dist, 1d value', (0.5 + np.zeros(2), 0.3 * np.ones(2)), 1),
('2d broadcasted dist, 1d value', (np.zeros(2), 0.8), 1),
('2d dist, 2d value', ([0.1, -0.5], 0.9 * np.ones(2)), np.array([1, 2])),
('1d dist, 1d value, edge case', (0, 1), 200),
)
def test_log_cdf(self, distr_params, value):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
value = np.asarray(value, dtype=np.float32)
super()._test_attribute(
attribute_string='log_cdf',
dist_args=distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=2e-2))
@chex.all_variants
@parameterized.named_parameters(
('1d dist, 1d value', (0, 1), 1),
('1d dist, 2d value', (0.5, 0.1), np.array([1, 2])),
('1d dist, 2d value as list', (0.5, 0.1), [1, 2]),
('2d dist, 1d value', (0.5 + np.zeros(2), 0.3 * np.ones(2)), 1),
('2d broadcasted dist, 1d value', (np.zeros(2), 0.8), 1),
('2d dist, 2d value', ([0.1, -0.5], 0.9 * np.ones(2)), np.array([1, 2])),
('1d dist, 1d value, edge case', (0, 1), -200),
)
def test_log_survival_function(self, distr_params, value):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
value = np.asarray(value, dtype=np.float32)
super()._test_attribute(
attribute_string='log_survival_function',
dist_args=distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=2e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('entropy', ([0., 1., -0.5], [0.5, 1., 1.5]), 'entropy'),
('entropy broadcasted loc', (0.5, [0.5, 1., 1.5]), 'entropy'),
('entropy broadcasted scale', ([0., 1., -0.5], 0.8), 'entropy'),
('mean', ([0., 1., -0.5], [0.5, 1., 1.5]), 'mean'),
('mean broadcasted loc', (0.5, [0.5, 1., 1.5]), 'mean'),
('mean broadcasted scale', ([0., 1., -0.5], 0.8), 'mean'),
('variance', ([0., 1., -0.5], [0.5, 1., 1.5]), 'variance'),
('variance broadcasted loc', (0.5, [0.5, 1., 1.5]), 'variance'),
('variance broadcasted scale', ([0., 1., -0.5], 0.8), 'variance'),
('stddev', ([0., 1., -0.5], [0.5, 1., 1.5]), 'stddev'),
('stddev broadcasted loc', (0.5, [0.5, 1., 1.5]), 'stddev'),
('stddev broadcasted scale', ([0., 1., -0.5], 0.8), 'stddev'),
('mode', ([0., 1., -0.5], [0.5, 1., 1.5]), 'mode'),
('mode broadcasted loc', (0.5, [0.5, 1., 1.5]), 'mode'),
('mode broadcasted scale', ([0., 1., -0.5], 0.8), 'mode'),
)
def test_method(self, distr_params, function_string):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_attribute(
attribute_string=function_string,
dist_args=distr_params,
assertion_fn=self.assertion_fn(rtol=2e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('no broadcast', ([0., 1., -0.5], [0.5, 1., 1.5])),
('broadcasted loc', (0.5, [0.5, 1., 1.5])),
('broadcasted scale', ([0., 1., -0.5], 0.8)),
)
def test_median(self, distr_params):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
dist = self.distrax_cls(*distr_params)
self.assertion_fn(rtol=2e-2)(self.variant(dist.median)(), dist.mean())
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax')
)
def test_with_two_distributions(self, function_string, mode_string):
rng = np.random.default_rng(42)
super()._test_with_two_distributions(
attribute_string=function_string,
mode_string=mode_string,
dist1_kwargs={
'loc': rng.normal(size=(4, 1, 2)),
'scale': np.array([[0.8, 0.2], [0.1, 1.2], [1.4, 3.1]]),
},
dist2_kwargs={
'loc': rng.normal(size=(3, 2)),
'scale': 0.1 + rng.uniform(size=(4, 1, 2)),
},
assertion_fn=self.assertion_fn(rtol=2e-2))
def test_jitable(self):
super()._test_jittable((0., 1.))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
('ellipsis', (Ellipsis, -1)),
)
def test_slice(self, slice_):
rng = np.random.default_rng(42)
loc = jnp.array(rng.normal(size=(3, 4, 5)))
scale = jnp.array(rng.uniform(size=(3, 4, 5)))
dist = self.distrax_cls(loc=loc, scale=scale)
self.assertion_fn(rtol=2e-2)(dist[slice_].mean(), loc[slice_])
def test_slice_different_parameterization(self):
rng = np.random.default_rng(42)
loc = jnp.array(rng.normal(size=(4,)))
scale = jnp.array(rng.uniform(size=(3, 4)))
dist = self.distrax_cls(loc=loc, scale=scale)
self.assertion_fn(rtol=2e-2)(dist[0].loc, loc) # Not slicing loc.
self.assertion_fn(rtol=2e-2)(dist[0].scale, scale[0])
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/laplace_test.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Von Mises distribution."""
import functools
import math
from typing import cast, Sequence, Tuple, Union
import chex
from distrax._src.distributions import distribution
from distrax._src.distributions import normal
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
PRNGKey = chex.PRNGKey
IntLike = Union[int, np.int16, np.int32, np.int64]
EventT = distribution.EventT
class VonMises(distribution.Distribution):
"""The von Mises distribution over angles.
The von Mises distribution is a distribution over angles. It is the maximum
entropy distribution on the space of angles, given a circular mean and a
circular variance.
In this implementation, the distribution is defined over the range [-pi, pi),
with all samples in this interval and the CDF is constant outside this
interval. Do note that the prob and log_prob also accept values outside of
[-pi, pi) and will return values as if they are inside the interval.
When `concentration=0`, this distribution becomes the uniform distribution
over the interval [-pi, pi). When the concentration goes to infinity, this
distribution approximates a Normal distribution.
#### Details
The probability density function (pdf) of this distribution is,
```none
pdf(x; loc, concentration) = exp(concentration * cos(x - loc))
/ (2 * pi * I_0 (concentration))
```
where:
* `I_0` is the zeroth order modified Bessel function;
* `loc` the circular mean of the distribution, a scalar in radians.
It can take arbitrary values, also outside of [-pi, pi).
* `concentration >= 0` is the concentration parameter. It is the
analogue to 1/sigma of the Normal distribution.
#### Examples
Examples of initialization of this distribution.
```python
# Create a batch of two von Mises distributions.
dist = distrax.VonMises(loc=[1.0, 2.0], concentration=[3.0, 4.0])
dist.sample(sample_shape=(3,), seed=0) # Sample of shape [3, 2]
```
Arguments are broadcast when possible.
```python
dist = distrax.VonMises(loc=1.0, concentration=[3.0, 4.0])
# Evaluating the pdf of both distributions on the point 3.0 returns a length 2
# tensor.
dist.prob(3.0)
```
"""
equiv_tfp_cls = tfd.VonMises
def __init__(self, loc: Numeric, concentration: Numeric):
super().__init__()
self._loc = conversion.as_float_array(loc)
self._concentration = conversion.as_float_array(concentration)
self._batch_shape = jax.lax.broadcast_shapes(
self._loc.shape, self._concentration.shape
)
@property
def loc(self) -> Array:
"""The circular mean of the distribution."""
return jnp.broadcast_to(self._loc, self.batch_shape)
@property
def concentration(self) -> Array:
"""The concentration of the distribution."""
return jnp.broadcast_to(self._concentration, self.batch_shape)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return self._batch_shape
def mean(self) -> Array:
"""The circular mean of the distribution."""
return self.loc
def variance(self) -> Array:
"""The circular variance of the distribution."""
conc = self._concentration
return 1. - jax.scipy.special.i1e(conc) / jax.scipy.special.i0e(conc)
def prob(self, value: EventT) -> Array:
"""The probability of value under the distribution."""
conc = self._concentration
unnormalized_prob = jnp.exp(conc * (jnp.cos(value - self._loc) - 1.))
normalization = (2. * math.pi) * jax.scipy.special.i0e(conc)
return unnormalized_prob / normalization
def log_prob(self, value: EventT) -> Array:
"""The logarithm of the probability of value under the distribution."""
conc = self._concentration
i_0 = jax.scipy.special.i0(conc)
return (
conc * jnp.cos(value - self._loc) - math.log(2 * math.pi) - jnp.log(i_0)
)
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""Returns `n` samples in [-pi, pi)."""
out_shape = (n,) + self.batch_shape
conc = self._concentration
dtype = jnp.result_type(self._loc, self._concentration)
sample = _von_mises_sample(out_shape, conc, key, dtype) + self._loc
return _convert_angle_to_standard(sample)
def entropy(self) -> Array:
"""Returns the entropy."""
conc = self._concentration
i0e = jax.scipy.special.i0e(conc)
i1e = jax.scipy.special.i1e(conc)
return conc * (1 - i1e / i0e) + math.log(2 * math.pi) + jnp.log(i0e)
def mode(self) -> Array:
"""The mode of the distribution."""
return self.mean()
def cdf(self, value: EventT) -> Array:
"""The CDF of `value` under the distribution.
Note that the CDF takes values of 0. or 1. for values outside of
[-pi, pi). Note that this behaviour is different from
`tensorflow_probability.VonMises` or `scipy.stats.vonmises`.
Args:
value: the angle evaluated under the distribution.
Returns:
the circular CDF of value.
"""
dtype = jnp.result_type(value, self._loc, self._concentration)
loc = _convert_angle_to_standard(self._loc)
return jnp.clip(
_von_mises_cdf(value - loc, self._concentration, dtype)
- _von_mises_cdf(-math.pi - loc, self._concentration, dtype),
a_min=0.,
a_max=1.,
)
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return jnp.log(self.cdf(value))
def survival_function(self, value: EventT) -> Array:
"""See `Distribution.survival_function`."""
dtype = jnp.result_type(value, self._loc, self._concentration)
loc = _convert_angle_to_standard(self._loc)
return jnp.clip(
_von_mises_cdf(math.pi - loc, self._concentration, dtype)
- _von_mises_cdf(value - loc, self._concentration, dtype),
a_min=0.,
a_max=1.,
)
def log_survival_function(self, value: EventT) -> Array:
"""See `Distribution.log_survival_function`."""
return jnp.log(self.survival_function(value))
def __getitem__(self, index) -> 'VonMises':
index = distribution.to_batch_shape_index(self.batch_shape, index)
return VonMises(
loc=self.loc[index],
concentration=self.concentration[index],
)
def _convert_angle_to_standard(angle: Array) -> Array:
"""Converts angle in radians to representation between [-pi, pi)."""
num_periods = jnp.round(angle / (2 * math.pi))
rep = angle - (2 * math.pi) * num_periods
return rep
@functools.partial(jax.custom_jvp, nondiff_argnums=(0, 2, 3))
def _von_mises_sample(
shape: Union[IntLike, Sequence[IntLike]],
concentration: Array,
seed: PRNGKey,
dtype: jnp.dtype,
) -> Array:
"""Rejection sample from the standard von Mises which has loc=0."""
concentration = jnp.asarray(concentration, dtype=dtype)
s_concentration_cutoff_dict = {
jnp.float16.dtype: 1.8e-1,
jnp.float32.dtype: 2e-2,
jnp.float64.dtype: 1.2e-4,
}
s_concentration_cutoff = s_concentration_cutoff_dict[dtype]
use_exact = concentration > s_concentration_cutoff
# Avoid NaN's, even when not used later.
conc = jnp.where(use_exact, concentration, 1.)
r = 1. + jnp.sqrt(1 + 4 * jnp.square(conc))
rho = (r - jnp.sqrt(2. * r)) / (2 * conc)
s_exact = (1. + jnp.square(rho)) / (2. * rho)
s_approximate = 1. / jnp.clip(concentration, a_min=1e-7)
s = jnp.where(use_exact, s_exact, s_approximate)
def loop_body(arg):
done, u_in, w, seed, count = arg
del u_in
u_seed, v_seed, next_seed = jax.random.split(seed, 3)
u = jax.random.uniform(
u_seed, shape=shape, dtype=dtype, minval=-1., maxval=1.
)
z = jnp.cos(math.pi * u)
w = jnp.where(done, w, (1 + s * z) / (s + z))
y = concentration * (s - w)
v = jax.random.uniform(v_seed, shape, dtype=dtype, minval=0., maxval=1.)
# Use `logical_not` to accept all "nan" samples.
accept = jnp.logical_not(y * jnp.exp(1 - y) < v)
return jnp.logical_or(accept, done), u, w, next_seed, count + 1
def loop_cond(arg):
done, u_in, w, seed, count = arg
del u_in, w, seed
# The rejection sampling is actually very efficient. With the worst
# concentration, about half of the samples are rejected. So only
# 1 in 1.51e25 samples will ever hit this counter on the worst possible
# concentration, which is a point way beyond numerical accuracy anyway.
return jnp.logical_and(jnp.any(jnp.logical_not(done)), count < 100)
_, u, w, _, _ = jax.lax.while_loop(
loop_cond,
loop_body,
init_val=(
jnp.zeros(shape, dtype=jnp.bool_),
jnp.zeros(shape, dtype=dtype),
jnp.zeros(shape, dtype=dtype),
seed,
0
)
)
return jnp.sign(u) * jnp.arccos(jnp.clip(w, a_min=-1., a_max=1.))
# Since rejection sampling does not permit autodiff, add an analytic gradient.
@_von_mises_sample.defjvp
def _von_mises_sample_jvp(
shape: Union[IntLike, Sequence[IntLike]],
seed: PRNGKey,
dtype: jnp.dtype,
primals: Tuple[Array],
tangents: Tuple[Array],
) -> Tuple[Array, Array]:
"""Returns the jvp of the von Mises sample operation."""
concentration, = primals
dconcentration, = tangents
concentration = jnp.clip(concentration, a_min=1e-7)
samples = _von_mises_sample(shape, concentration, seed, dtype)
vectorized_grad_cdf = jnp.vectorize(
jax.grad(_von_mises_cdf, argnums=1),
signature='(),()->()',
excluded=(2,),
)
dcdf_dconcentration = vectorized_grad_cdf(samples, concentration, dtype)
inv_prob = jnp.exp(-concentration * (jnp.cos(samples) - 1.)) * (
(2. * math.pi) * jax.scipy.special.i0e(concentration)
)
dcdf_dconcentration = cast(chex.Array, dcdf_dconcentration)
dsamples = dconcentration * (-inv_prob * dcdf_dconcentration)
return samples, dsamples
@functools.partial(jax.custom_jvp, nondiff_argnums=(2,))
def _von_mises_cdf(
value: Array,
concentration: Array,
dtype: jnp.dtype,
) -> Array:
"""Returns the cumulative density function (CDF) of von Mises distribution.
Denote the density of VonMises(loc=0, concentration=concentration) by p(t).
The CDF at the point x is defined as int_{-pi}^x p(t) dt when x is in the
interval [-pi, pi]. Below -pi, the CDF is zero, above pi, it is one.
The CDF is not available in closed form. Instead, we use the method [1]
which uses either a series expansion or a Normal approximation, depending on
the value of concentration.
We also compute the derivative of the CDF w.r.t. both x and concentration
using the method described in [2].
Args:
value: The point at which to evaluate the CDF.
concentration: The concentration parameter of the von Mises distribution.
dtype: Type of the return value.
Returns:
The value of the CDF computed elementwise.
References:
[1] G. Hill "Algorithm 518: Incomplete Bessel Function I_0. The Von Mises
Distribution." ACM Transactions on Mathematical Software, 1977.
[2] Figurnov, M., Mohamed, S. and Mnih, A., "Implicit reparameterization
gradients." Advances in Neural Information Processing Systems, 31, 2018.
"""
primals = (value, concentration)
tangents = (jnp.zeros_like(value), jnp.zeros_like(concentration))
primal_out, _ = _von_mises_cdf_jvp(dtype, primals, tangents)
return primal_out
# Use a custom jvp to increase numerical accuracy.
@_von_mises_cdf.defjvp
def _von_mises_cdf_jvp(
dtype: jnp.dtype,
primals: Tuple[Array, Array],
tangents: Tuple[Array, Array],
):
"""Returns the jvp CDF of a von Mises."""
x, concentration = primals
dx, dconcentration = tangents
num_periods = jnp.round(x / (2 * math.pi))
x = x - (2 * math.pi) * num_periods
# This is the cutoff-concentration for choosing between the two numerical
# recipes for computing the CDF. For concentrations larger than
# `concentration_cutoff`, a Normal approximation is used.
concentration_cutoff = 10.5
cdf_series, dcdf_dconcentration_series = _von_mises_cdf_series(
x, concentration, dtype
)
cdf_normal, dcdf_dconcentration_normal = _von_mises_cdf_normal(
x, concentration, dtype
)
use_series = concentration < concentration_cutoff
cdf = jnp.where(use_series, cdf_series, cdf_normal)
cdf = cdf + num_periods
dcdf_dconcentration = jnp.where(
use_series,
dcdf_dconcentration_series,
dcdf_dconcentration_normal,
)
prob = jnp.exp(concentration * (jnp.cos(x) - 1.)) / (
(2. * math.pi) * jax.scipy.special.i0e(concentration)
)
return cdf, dconcentration * dcdf_dconcentration + dx * prob
def _von_mises_cdf_series(
value: Array,
concentration: Array,
dtype: jnp.dtype,
num_terms: int = 20,
) -> Tuple[Array, Array]:
"""Computes the CDF based on a series of `num_terms` terms."""
rn = jnp.zeros_like(value, dtype=dtype)
drn_dconcentration = jnp.zeros_like(value, dtype=dtype)
vn = jnp.zeros_like(value, dtype=dtype)
dvn_dconcentration = jnp.zeros_like(value, dtype=dtype)
for n in range(num_terms, 0, -1):
denominator = 2. * n / concentration + rn
ddenominator_dk = -2. * n / jnp.square(concentration) + drn_dconcentration
rn = 1. / denominator
drn_dconcentration = -ddenominator_dk / jnp.square(denominator)
multiplier = jnp.sin(n * value) / n + vn
vn = rn * multiplier
dvn_dconcentration = (
drn_dconcentration * multiplier + rn * dvn_dconcentration
)
cdf = .5 + value / (2. * math.pi) + vn / math.pi
dcdf_dconcentration = dvn_dconcentration / math.pi
# Clip the result to [0, 1].
cdf_clipped = jnp.clip(cdf, 0., 1.)
# The clipped values do not depend on concentration anymore, so set their
# derivative to zero.
dcdf_dconcentration = (
dcdf_dconcentration * jnp.logical_and(cdf >= 0., cdf <= 1.)
)
return cdf_clipped, dcdf_dconcentration
def _von_mises_cdf_normal(
value: Array,
concentration: Array,
dtype: jnp.dtype,
) -> Tuple[Array, Array]:
"""Computes the CDF, based on a Normal approximation to the von Mises."""
def cdf_func(value, concentration):
"""A helper function that is passed to value_and_gradient."""
# z is an "almost Normally distributed" random variable.
z = (
jnp.sqrt(2. / math.pi) /
jax.scipy.special.i0e(concentration) * jnp.sin(.5 * value)
)
# This is a correction described in [1].
# It reduces the error of the Normal approximation.
z2 = jnp.square(z)
z3 = z2 * z
z4 = jnp.square(z2)
c = 24. * concentration
c1 = 56.
xi = z - z3 / jnp.square(
(c - 2. * z2 - 16.) / 3. -
(z4 + (7. / 4.) * z2 + 167. / 2.) / (c - c1 - z2 + 3.)
)
distrib = normal.Normal(
loc=jnp.array(0., dtype),
scale=jnp.array(1., dtype)
)
return distrib.cdf(xi)
vectorized_cdf_with_grad = jnp.vectorize(
jax.value_and_grad(cdf_func, argnums=1),
signature='(),()->(),()',
)
return vectorized_cdf_with_grad(value, concentration)
def _kl_divergence_vonmises_vonmises(
dist1: Union[VonMises, tfd.VonMises],
dist2: Union[VonMises, tfd.VonMises],
*unused_args, **unused_kwargs,
) -> Array:
"""Batched KL divergence KL(d1 || d2) between von Mises distributions.
Args:
dist1: A VonMises distribution.
dist2: A VonMises distribution.
Returns:
Batchwise `KL(dist1 || dist2)`.
"""
i0e_concentration1 = jax.scipy.special.i0e(dist1.concentration)
i1e_concentration1 = jax.scipy.special.i1e(dist1.concentration)
i0e_concentration2 = jax.scipy.special.i0e(dist2.concentration)
return (
(dist2.concentration - dist1.concentration) +
jnp.log(i0e_concentration2 / i0e_concentration1) +
(i1e_concentration1 / i0e_concentration1) * (
dist1.concentration
- dist2.concentration * jnp.cos(dist1.loc - dist2.loc)
)
)
# Register the KL functions with TFP.
tfd.RegisterKL(VonMises, VonMises)(_kl_divergence_vonmises_vonmises)
tfd.RegisterKL(VonMises, VonMises.equiv_tfp_cls)(
_kl_divergence_vonmises_vonmises
)
tfd.RegisterKL(VonMises.equiv_tfp_cls, VonMises)(
_kl_divergence_vonmises_vonmises
)
| distrax-master | distrax/_src/distributions/von_mises.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `mixture_of_two.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import mixture_of_two
from distrax._src.utils import equivalence
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
BATCH_SIZE = 5
PROPORTION = 0.3
class MixtureOfTwoTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(mixture_of_two.MixtureOfTwo)
components = self._get_components(hk.PRNGSequence(0))
self.component_a = components[0]
self.component_b = components[1]
self.tfp_mixture = components[2]
def assertion_fn(self, rtol: float = 1e-3):
return lambda x, y: np.testing.assert_allclose(x, y, rtol=rtol)
def _get_components(self, rng_seq):
loc = jax.random.normal(next(rng_seq), (BATCH_SIZE, 2))
scale = jax.nn.sigmoid(jax.random.normal(next(rng_seq), (BATCH_SIZE, 2)))
component_a = tfp.distributions.Normal(loc=loc[:, 0], scale=scale[:, 0])
component_b = tfp.distributions.Normal(loc=loc[:, 1], scale=scale[:, 1])
full_proportion = jnp.full([BATCH_SIZE], PROPORTION)
tfp_mixture = tfp.distributions.MixtureSameFamily(
tfp.distributions.Categorical(probs=jnp.stack(
[full_proportion, 1 - full_proportion], axis=-1)),
components_distribution=tfp.distributions.Normal(loc, scale))
return component_a, component_b, tfp_mixture
def test_mixture_methods(self):
rng_seq = hk.PRNGSequence(0)
mix = self.distrax_cls(PROPORTION, self.component_a, self.component_b)
sample_shape = (8, 1024)
sample = self.tfp_mixture.sample(
sample_shape=sample_shape, seed=next(rng_seq))
other_sample = mix.sample(sample_shape=sample_shape, seed=next(rng_seq))
chex.assert_equal_shape([sample, other_sample])
np.testing.assert_allclose(
sample.mean(axis=[0, 1]), other_sample.mean(axis=[0, 1]), atol=1e-1)
np.testing.assert_allclose(
sample.std(axis=[0, 1]), other_sample.std(axis=[0, 1]), atol=1e-1)
np.testing.assert_allclose(
self.tfp_mixture.log_prob(sample),
mix.log_prob(sample), atol=1e-3)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('empty shape', ()),
('int shape', 10),
('2-tuple shape', (10, 20)),
)
def test_sample_and_log_prob(self, sample_shape):
mix = self.distrax_cls(PROPORTION, self.component_a, self.component_b)
expected_event = mix.sample(
seed=jax.random.PRNGKey(42), sample_shape=sample_shape)
expected_log_prob = mix.log_prob(expected_event)
event, log_prob = self.variant(
mix.sample_and_log_prob, static_argnames='sample_shape')(
seed=jax.random.PRNGKey(42), sample_shape=sample_shape)
np.testing.assert_allclose(expected_log_prob, log_prob, atol=1e-3)
np.testing.assert_allclose(expected_event, event, atol=1e-3)
def test_jitable(self):
@jax.jit
def jitted_function(event, dist):
return dist.log_prob(event)
mix = self.distrax_cls(PROPORTION, self.component_a, self.component_b)
event = mix.sample(seed=jax.random.PRNGKey(4242))
log_prob = mix.log_prob(event)
jitted_log_prob = jitted_function(event, mix)
chex.assert_trees_all_close(
jitted_log_prob, log_prob, atol=1e-4, rtol=1e-4)
def test_prob_a(self):
mix = self.distrax_cls(PROPORTION, self.component_a, self.component_b)
self.assertEqual(mix.prob_a, PROPORTION)
def test_prob_b(self):
mix = self.distrax_cls(PROPORTION, self.component_a, self.component_b)
self.assertEqual(mix.prob_b, 1. - PROPORTION)
def test_batch_shape(self):
mix = self.distrax_cls(PROPORTION, self.component_a, self.component_b)
self.assertEqual(mix.batch_shape, (BATCH_SIZE,))
self.assertEqual(mix.batch_shape, (BATCH_SIZE,))
def test_event_shape(self):
mix = self.distrax_cls(PROPORTION, self.component_a, self.component_b)
self.assertEqual(mix.event_shape, ())
self.assertEqual(mix.event_shape, ())
@parameterized.named_parameters(
('single element', 1, ()),
('range', slice(-1), (4,)),
)
def test_slice(self, slice_, expected_batch_shape):
mix = self.distrax_cls(PROPORTION, self.component_a, self.component_b)
sliced_dist = mix[slice_]
self.assertEqual(sliced_dist.batch_shape, expected_batch_shape)
self.assertEqual(sliced_dist.event_shape, mix.event_shape)
self.assertIsInstance(sliced_dist, self.distrax_cls)
def test_invalid_parameters(self):
rng_seq = hk.PRNGSequence(0)
loc = jax.random.normal(next(rng_seq), (BATCH_SIZE,))
scale = jax.nn.sigmoid(jax.random.normal(next(rng_seq), (BATCH_SIZE,)))
concentration = jax.random.normal(next(rng_seq), (BATCH_SIZE,))
with self.assertRaisesRegex(ValueError, 'must have the same event shape'):
component_a = tfp.distributions.Normal(loc=loc, scale=scale)
component_b = tfp.distributions.Dirichlet(concentration=concentration)
self.distrax_cls(PROPORTION, component_a, component_b)
with self.assertRaisesRegex(ValueError, 'must have the same batch shape'):
component_a = tfp.distributions.Normal(loc=loc, scale=scale)
component_b = tfp.distributions.Normal(loc=loc[1:], scale=scale[1:])
self.distrax_cls(PROPORTION, component_a, component_b)
with self.assertRaisesRegex(ValueError, 'must have the same dtype'):
component_a = tfp.distributions.Normal(
loc=loc.astype(jnp.float16), scale=scale.astype(jnp.float16))
component_b = tfp.distributions.Normal(loc=loc, scale=scale)
self.distrax_cls(PROPORTION, component_a, component_b)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/mixture_of_two_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution abstract base class."""
import abc
import collections.abc
import contextlib
import functools
import operator
import typing
from typing import Any, Generic, Iterable, Mapping, Sequence, Tuple, TypeVar, Union
import chex
from distrax._src.utils import jittable
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
PRNGKey = chex.PRNGKey
IntLike = Union[int, np.int16, np.int32, np.int64]
# Generic type.
T = TypeVar('T')
# Generic nested type.
NestedT = Union[T, Iterable['NestedT'], Mapping[Any, 'NestedT']] # pylint: disable=invalid-name
# Nested types.
EventT = TypeVar('EventT', bound=NestedT[Array])
ShapeT = TypeVar('ShapeT', bound=NestedT[Tuple[int, ...]])
DTypeT = TypeVar('DTypeT', bound=NestedT[jnp.dtype])
class Distribution(
jittable.Jittable, Generic[EventT, ShapeT, DTypeT], metaclass=abc.ABCMeta):
"""Jittable abstract base class for all Distrax distributions."""
@abc.abstractmethod
def _sample_n(self, key: PRNGKey, n: int) -> EventT:
"""Returns `n` samples."""
def _sample_n_and_log_prob(
self,
key: PRNGKey,
n: int,
) -> Tuple[EventT, Array]:
"""Returns `n` samples and their log probs.
By default, it just calls `log_prob` on the generated samples. However, for
many distributions it's more efficient to compute the log prob of samples
than of arbitrary events (for example, there's no need to check that a
sample is within the distribution's domain). If that's the case, a subclass
may override this method with a more efficient implementation.
Args:
key: PRNG key.
n: Number of samples to generate.
Returns:
A tuple of `n` samples and their log probs.
"""
samples = self._sample_n(key, n)
log_prob = self.log_prob(samples)
return samples, log_prob
@abc.abstractmethod
def log_prob(self, value: EventT) -> Array:
"""Calculates the log probability of an event.
Args:
value: An event.
Returns:
The log probability log P(value).
"""
def prob(self, value: EventT) -> Array:
"""Calculates the probability of an event.
Args:
value: An event.
Returns:
The probability P(value).
"""
return jnp.exp(self.log_prob(value))
@property
@abc.abstractmethod
def event_shape(self) -> ShapeT:
"""Shape of event of distribution samples."""
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
sample_spec = jax.eval_shape(
self.sample, seed=jax.random.PRNGKey(0), sample_shape=())
if not self.event_shape:
batch_shapes = jax.tree_util.tree_map(lambda x: x.shape, sample_spec)
else:
batch_shapes = jax.tree_util.tree_map(
lambda s, e: s.shape[:s.ndim - len(e)], sample_spec, self.event_shape)
# Get flat batch shapes.
batch_shapes = jax.tree_util.tree_structure(sample_spec).flatten_up_to(
batch_shapes)
if not batch_shapes:
return ()
# Ensure batch shapes are consistent.
batch_shape = batch_shapes[0]
for i in range(1, len(batch_shapes)):
np.testing.assert_equal(batch_shape, batch_shapes[i])
return batch_shape
@property
def name(self) -> str:
"""Distribution name."""
return type(self).__name__
@property
def dtype(self) -> DTypeT:
"""The data type of the samples generated by the distribution."""
sample_spec = jax.eval_shape(
self.sample, seed=jax.random.PRNGKey(0), sample_shape=())
return jax.tree_util.tree_map(lambda x: x.dtype, sample_spec)
def sample(self,
*,
seed: Union[IntLike, PRNGKey],
sample_shape: Union[IntLike, Sequence[IntLike]] = ()) -> EventT:
"""Samples an event.
Args:
seed: PRNG key or integer seed.
sample_shape: Additional leading dimensions for sample.
Returns:
A sample of shape `sample_shape + self.batch_shape + self.event_shape`.
"""
rng, sample_shape = convert_seed_and_sample_shape(seed, sample_shape)
num_samples = functools.reduce(operator.mul, sample_shape, 1) # product
samples = self._sample_n(rng, num_samples)
return jax.tree_util.tree_map(
lambda t: t.reshape(sample_shape + t.shape[1:]), samples)
def sample_and_log_prob(
self,
*,
seed: Union[IntLike, PRNGKey],
sample_shape: Union[IntLike, Sequence[IntLike]] = ()
) -> Tuple[EventT, Array]:
"""Returns a sample and associated log probability. See `sample`."""
rng, sample_shape = convert_seed_and_sample_shape(seed, sample_shape)
num_samples = functools.reduce(operator.mul, sample_shape, 1) # product
samples, log_prob = self._sample_n_and_log_prob(rng, num_samples)
samples, log_prob = jax.tree_util.tree_map(
lambda t: t.reshape(sample_shape + t.shape[1:]), (samples, log_prob))
return samples, log_prob
def kl_divergence(self, other_dist, **kwargs) -> Array:
"""Calculates the KL divergence to another distribution.
Args:
other_dist: A compatible Distax or TFP Distribution.
**kwargs: Additional kwargs.
Returns:
The KL divergence `KL(self || other_dist)`.
"""
return tfd.kullback_leibler.kl_divergence(self, other_dist, **kwargs)
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `entropy`.')
def log_cdf(self, value: EventT) -> Array:
"""Evaluates the log cumulative distribution function at `value`.
Args:
value: An event.
Returns:
The log CDF evaluated at value, i.e. log P[X <= value].
"""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `log_cdf`.')
def cdf(self, value: EventT) -> Array:
"""Evaluates the cumulative distribution function at `value`.
Args:
value: An event.
Returns:
The CDF evaluated at value, i.e. P[X <= value].
"""
return jnp.exp(self.log_cdf(value))
def survival_function(self, value: EventT) -> Array:
"""Evaluates the survival function at `value`.
Note that by default we use a numerically not necessarily stable definition
of the survival function in terms of the CDF.
More stable definitions should be implemented in subclasses for
distributions for which they exist.
Args:
value: An event.
Returns:
The survival function evaluated at `value`, i.e. P[X > value]
"""
if not self.event_shape:
# Defined for univariate distributions only.
return 1. - self.cdf(value)
else:
raise NotImplementedError('`survival_function` is not defined for '
f'distribution `{self.name}`.')
def log_survival_function(self, value: EventT) -> Array:
"""Evaluates the log of the survival function at `value`.
Note that by default we use a numerically not necessarily stable definition
of the log of the survival function in terms of the CDF.
More stable definitions should be implemented in subclasses for
distributions for which they exist.
Args:
value: An event.
Returns:
The log of the survival function evaluated at `value`, i.e.
log P[X > value]
"""
if not self.event_shape:
# Defined for univariate distributions only.
return jnp.log1p(-self.cdf(value))
else:
raise NotImplementedError('`log_survival_function` is not defined for '
f'distribution `{self.name}`.')
def mean(self) -> EventT:
"""Calculates the mean."""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `mean`.')
def median(self) -> EventT:
"""Calculates the median."""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `median`.')
def variance(self) -> EventT:
"""Calculates the variance."""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `variance`.')
def stddev(self) -> EventT:
"""Calculates the standard deviation."""
return jnp.sqrt(self.variance())
def mode(self) -> EventT:
"""Calculates the mode."""
raise NotImplementedError(
f'Distribution `{self.name}` does not implement `mode`.')
def cross_entropy(self, other_dist, **kwargs) -> Array:
"""Calculates the cross entropy to another distribution.
Args:
other_dist: A compatible Distax or TFP Distribution.
**kwargs: Additional kwargs.
Returns:
The cross entropy `H(self || other_dist)`.
"""
return self.kl_divergence(other_dist, **kwargs) + self.entropy()
@contextlib.contextmanager
def _name_and_control_scope(self, *unused_a, **unused_k):
yield
def __getitem__(self, index) -> 'Distribution':
"""Returns a matching distribution obtained by indexing the batch shape.
Args:
index: An object, typically int or slice (or a tuple thereof), used for
indexing the distribution.
"""
raise NotImplementedError(f'Indexing not implemented for `{self.name}`.')
def convert_seed_and_sample_shape(
seed: Union[IntLike, PRNGKey],
sample_shape: Union[IntLike, Sequence[IntLike]]
) -> Tuple[PRNGKey, Tuple[int, ...]]:
"""Shared functionality to ensure that seeds and shapes are the right type."""
if not isinstance(sample_shape, collections.abc.Sequence):
sample_shape = (sample_shape,)
sample_shape = tuple(map(int, sample_shape))
if isinstance(seed, (int, np.signedinteger)):
rng = jax.random.PRNGKey(seed)
else: # key is of type PRNGKey
rng = seed
return rng, sample_shape # type: ignore[bad-return-type]
def to_batch_shape_index(
batch_shape: Tuple[int, ...],
index,
) -> Tuple[jnp.ndarray, ...]:
"""Utility function that transforms the index to respect the batch shape.
When indexing a distribution we only want to index based on the batch shape.
For example, a Categorical with logits shaped (2, 3, 4) has batch shape of
(2, 3) and number of categoricals 4. Indexing this distribution creates a new
distribution with indexed logits. If the index is [0], the new distribution's
logits will be shaped (3, 4). But if the index is [..., -1] the new logits
should be shaped (2, 4), but applying the index operation on logits directly
will result in shape (2, 3). This function fixes such indices such that they
are only applied on the batch shape.
Args:
batch_shape: Distribution's batch_shape.
index: An object, typically int or slice (or a tuple thereof), used for
indexing the distribution.
Returns:
A new index that is only applied on the batch shape.
"""
try:
new_index = [x[index] for x in np.indices(batch_shape)]
return tuple(new_index)
except IndexError as e:
raise IndexError(f'Batch shape `{batch_shape}` not compatible with index '
f'`{index}`.') from e
DistributionLike = Union[Distribution, tfd.Distribution]
DistributionT = typing.TypeVar('DistributionT', bound=Distribution)
| distrax-master | distrax/_src/distributions/distribution.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `mvn_diag_plus_low_rank.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions.mvn_diag_plus_low_rank import MultivariateNormalDiagPlusLowRank
from distrax._src.utils import equivalence
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
def _covariance_matrix_from_low_rank(
scale_diag, scale_u_matrix, scale_v_matrix):
"""Constructs the covariance matrix from the low-rank matrices."""
if scale_u_matrix is not None:
if scale_v_matrix is None:
scale_v_matrix = np.copy(scale_u_matrix)
scale_v_matrix_t = np.vectorize(
np.transpose, signature='(k,m)->(m,k)')(scale_v_matrix)
scale = np.matmul(scale_u_matrix, scale_v_matrix_t) + np.vectorize(
np.diag, signature='(k)->(k,k)')(scale_diag)
else:
scale = np.vectorize(np.diag, signature='(k)->(k,k)')(scale_diag)
scale_t = np.vectorize(np.transpose, signature='(k,k)->(k,k)')(scale)
return np.matmul(scale, scale_t)
class MultivariateNormalDiagPlusLowRankTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(MultivariateNormalDiagPlusLowRank)
@parameterized.named_parameters(
('all inputs are None', {}),
('scale_v_matrix is provided but scale_u_matrix is None', {
'loc': np.zeros((4,)),
'scale_v_matrix': np.ones((4, 2)),
}),
('wrong dimension of loc', {
'loc': np.array(0.),
}),
('wrong dimension of scale_diag', {
'scale_diag': np.array(0.),
}),
('wrong dimension of scale_u_matrix', {
'scale_u_matrix': np.ones((4,)),
}),
('wrong dimension of scale_v_matrix', {
'scale_u_matrix': np.ones((4, 2)),
'scale_v_matrix': np.ones((4,)),
}),
('last dimension of scale_u_matrix is zero', {
'scale_u_matrix': np.ones((4, 0)),
}),
('inconsistent dimensions of scale_u_matrix and scale_v_matrix', {
'scale_u_matrix': np.ones((4, 2)),
'scale_v_matrix': np.ones((4, 1)),
}),
('inconsistent event_dim across two params', {
'loc': np.zeros((4,)),
'scale_u_matrix': np.ones((5, 2)),
}),
('inconsistent event_dim across three params', {
'loc': np.zeros((4,)),
'scale_diag': np.ones((5,)),
'scale_u_matrix': np.ones((4, 2)),
}),
)
def test_raises_on_wrong_inputs(self, dist_kwargs):
with self.assertRaises(ValueError):
MultivariateNormalDiagPlusLowRank(**dist_kwargs)
@parameterized.named_parameters(
('loc provided', {'loc': np.zeros((4,))}),
('scale_diag provided', {'scale_diag': np.ones((4,))}),
('scale_u_matrix provided', {'scale_u_matrix': np.zeros((4, 1))}),
)
def test_default_properties(self, dist_kwargs):
dist = MultivariateNormalDiagPlusLowRank(**dist_kwargs)
self.assertion_fn(rtol=1e-3)(dist.loc, jnp.zeros((4,)))
self.assertion_fn(rtol=1e-3)(dist.scale_diag, jnp.ones((4,)))
self.assertion_fn(rtol=1e-3)(dist.scale_u_matrix, jnp.zeros((4, 1)))
self.assertion_fn(rtol=1e-3)(dist.scale_v_matrix, jnp.zeros((4, 1)))
@parameterized.named_parameters(
('unbatched', (), (4,), (4,), (4, 2), (4, 2)),
('batched loc', (7,), (7, 4), (4,), (4, 2), (4, 2)),
('batched scale_diag', (7,), (4,), (7, 4), (4, 2), (4, 2)),
('batched scale_u_matrix', (7,), (4,), (4,), (7, 4, 2), (4, 2)),
('batched scale_v_matrix', (7,), (4,), (4,), (4, 2), (7, 4, 2)),
)
def test_properties(self, batch_shape, loc_shape, scale_diag_shape,
scale_u_matrix_shape, scale_v_matrix_shape):
rng = np.random.default_rng(2022)
loc = rng.normal(size=loc_shape)
scale_diag = rng.normal(size=scale_diag_shape)
scale_u_matrix = rng.normal(size=scale_u_matrix_shape)
scale_v_matrix = rng.normal(size=scale_v_matrix_shape)
dist = MultivariateNormalDiagPlusLowRank(
loc=loc,
scale_diag=scale_diag,
scale_u_matrix=scale_u_matrix,
scale_v_matrix=scale_v_matrix,
)
self.assertEqual(dist.batch_shape, batch_shape)
self.assertion_fn(rtol=1e-3)(
dist.loc, jnp.broadcast_to(loc, batch_shape + (4,)))
self.assertion_fn(rtol=1e-3)(
dist.scale_diag, jnp.broadcast_to(scale_diag, batch_shape + (4,)))
self.assertion_fn(rtol=1e-3)(
dist.scale_u_matrix,
jnp.broadcast_to(scale_u_matrix, batch_shape + (4, 2)))
self.assertion_fn(rtol=1e-3)(
dist.scale_v_matrix,
jnp.broadcast_to(scale_v_matrix, batch_shape + (4, 2)))
@chex.all_variants
@parameterized.named_parameters(
('unbatched, no shape', (), (4,), (4,), (4, 2), (4, 2)),
('batched loc, no shape', (), (7, 4), (4,), (4, 2), (4, 2)),
('batched scale_diag, no shape', (), (4,), (7, 4), (4, 2), (4, 2)),
('batched scale_u_matrix, no shape', (), (4,), (4,), (7, 4, 2), (4, 2)),
('batched scale_v_matrix, no shape', (), (4,), (4,), (4, 2), (7, 4, 2)),
('unbatched, with shape', (3,), (4,), (4,), (4, 2), (4, 2)),
('batched loc, with shape', (3,), (7, 4), (4,), (4, 2), (4, 2)),
('batched scale_diag, with shape', (3,), (4,), (7, 4), (4, 2), (4, 2)),
('batched scale_u_matrix, with shape',
(3,), (4,), (4,), (7, 4, 2), (4, 2)),
('batched scale_v_matrix, with shape',
(3,), (4,), (4,), (4, 2), (7, 4, 2)),
)
def test_sample_shape(self, sample_shape, loc_shape, scale_diag_shape,
scale_u_matrix_shape, scale_v_matrix_shape):
rng = np.random.default_rng(2022)
loc = rng.normal(size=loc_shape)
scale_diag = rng.normal(size=scale_diag_shape)
scale_u_matrix = rng.normal(size=scale_u_matrix_shape)
scale_v_matrix = rng.normal(size=scale_v_matrix_shape)
dist = MultivariateNormalDiagPlusLowRank(
loc=loc,
scale_diag=scale_diag,
scale_u_matrix=scale_u_matrix,
scale_v_matrix=scale_v_matrix,
)
tfp_dist = tfd.MultivariateNormalFullCovariance(
loc=loc,
covariance_matrix=_covariance_matrix_from_low_rank(
scale_diag, scale_u_matrix, scale_v_matrix)
)
sample_fn = self.variant(
lambda rng: dist.sample(sample_shape=sample_shape, seed=rng))
distrax_samples = sample_fn(jax.random.PRNGKey(0))
tfp_samples = tfp_dist.sample(
sample_shape=sample_shape, seed=jax.random.PRNGKey(0))
self.assertEqual(distrax_samples.shape, tfp_samples.shape)
@chex.all_variants
@parameterized.named_parameters(
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist_params = {
'loc': np.array([0., 0.], dtype),
'scale_diag': np.array([1., 1.], dtype)}
dist = MultivariateNormalDiagPlusLowRank(**dist_params)
samples = self.variant(dist.sample)(seed=jax.random.PRNGKey(0))
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
@chex.all_variants
@parameterized.named_parameters(
('unbatched, unbatched value', (4,), (4,), (4,), (4, 2), (4, 2)),
('batched loc, unbatched value', (4,), (7, 4), (4,), (4, 2), (4, 2)),
('batched scale_diag, unbatched value',
(4,), (4,), (7, 4), (4, 2), (4, 2)),
('batched scale_u_matrix, unbatched value',
(4,), (4,), (4,), (7, 4, 2), (4, 2)),
('batched scale_v_matrix, unbatched value',
(4,), (4,), (4,), (4, 2), (7, 4, 2)),
('unbatched, batched value', (7, 4), (4,), (4,), (4, 2), (4, 2)),
('batched loc, batched value', (7, 4), (7, 4), (4,), (4, 2), (4, 2)),
('batched scale_diag, batched value',
(7, 4), (4,), (7, 4), (4, 2), (4, 2)),
('batched scale_u_matrix, batched value',
(7, 4), (4,), (4,), (7, 4, 2), (4, 2)),
('batched scale_v_matrix, batched value',
(7, 4), (4,), (4,), (4, 2), (7, 4, 2)),
)
def test_log_prob(self, value_shape, loc_shape, scale_diag_shape,
scale_u_matrix_shape, scale_v_matrix_shape):
rng = np.random.default_rng(2022)
loc = rng.normal(size=loc_shape)
scale_diag = rng.normal(size=scale_diag_shape)
scale_u_matrix = 0.1 * rng.normal(size=scale_u_matrix_shape)
scale_v_matrix = 0.1 * rng.normal(size=scale_v_matrix_shape)
dist = MultivariateNormalDiagPlusLowRank(
loc=loc,
scale_diag=scale_diag,
scale_u_matrix=scale_u_matrix,
scale_v_matrix=scale_v_matrix,
)
tfp_dist = tfd.MultivariateNormalFullCovariance(
loc=loc,
covariance_matrix=_covariance_matrix_from_low_rank(
scale_diag, scale_u_matrix, scale_v_matrix)
)
value = rng.normal(size=value_shape)
self.assertion_fn(rtol=2e-3)(
self.variant(dist.log_prob)(value), tfp_dist.log_prob(value))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('unbatched', (4,), (4,), (4, 2), (4, 2)),
('batched loc', (7, 4), (4,), (4, 2), (4, 2)),
('batched scale_diag', (4,), (7, 4), (4, 2), (4, 2)),
('batched scale_u_matrix', (4,), (4,), (7, 4, 2), (4, 2)),
('batched scale_v_matrix', (4,), (4,), (4, 2), (7, 4, 2)),
('scale_u_matrix is None', (4,), (4,), None, None),
('scale_v_matrix is None', (4,), (4,), (4, 2), None),
)
def test_method(self, loc_shape, scale_diag_shape,
scale_u_matrix_shape, scale_v_matrix_shape):
rng = np.random.default_rng(2022)
loc = rng.normal(size=loc_shape)
scale_diag = rng.normal(size=scale_diag_shape)
if scale_u_matrix_shape is None:
scale_u_matrix = None
else:
scale_u_matrix = 0.1 * rng.normal(size=scale_u_matrix_shape)
if scale_v_matrix_shape is None:
scale_v_matrix = None
else:
scale_v_matrix = 0.1 * rng.normal(size=scale_v_matrix_shape)
dist = MultivariateNormalDiagPlusLowRank(
loc=loc,
scale_diag=scale_diag,
scale_u_matrix=scale_u_matrix,
scale_v_matrix=scale_v_matrix,
)
tfp_dist = tfd.MultivariateNormalFullCovariance(
loc=loc,
covariance_matrix=_covariance_matrix_from_low_rank(
scale_diag, scale_u_matrix, scale_v_matrix)
)
for method in [
'entropy', 'mean', 'stddev', 'variance', 'covariance', 'mode']:
if method in ['stddev', 'variance']:
rtol = 1e-2
elif method in ['covariance']:
rtol = 8e-2
else:
rtol = 1e-3
with self.subTest(method=method):
fn = self.variant(getattr(dist, method))
self.assertion_fn(rtol=rtol)(fn(), getattr(tfp_dist, method)())
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax'))
def test_with_two_distributions(self, function_string, mode_string):
rng = np.random.default_rng(2022)
loc1 = rng.normal(size=(5, 1, 4))
scale_diag1 = rng.normal(size=(3, 4))
scale_u_matrix1 = 0.1 * rng.normal(size=(5, 1, 4, 2))
scale_perturb_diag1 = rng.normal(size=(5, 1, 2))
scale_v_matrix1 = scale_u_matrix1 * np.expand_dims(
scale_perturb_diag1, axis=-2)
loc2 = rng.normal(size=(3, 4))
scale_diag2 = rng.normal(size=(3, 4))
scale_u_matrix2 = 0.1 * rng.normal(size=(4, 2))
scale_perturb_diag2 = rng.normal(size=(2,))
scale_v_matrix2 = scale_u_matrix2 * np.expand_dims(
scale_perturb_diag2, axis=-2)
distrax_dist1 = MultivariateNormalDiagPlusLowRank(
loc=loc1,
scale_diag=scale_diag1,
scale_u_matrix=scale_u_matrix1,
scale_v_matrix=scale_v_matrix1,
)
distrax_dist2 = MultivariateNormalDiagPlusLowRank(
loc=loc2,
scale_diag=scale_diag2,
scale_u_matrix=scale_u_matrix2,
scale_v_matrix=scale_v_matrix2,
)
tfp_dist1 = tfd.MultivariateNormalDiagPlusLowRank(
loc=loc1,
scale_diag=scale_diag1,
scale_perturb_factor=scale_u_matrix1,
scale_perturb_diag=scale_perturb_diag1,
)
tfp_dist2 = tfd.MultivariateNormalDiagPlusLowRank(
loc=loc2,
scale_diag=scale_diag2,
scale_perturb_factor=scale_u_matrix2,
scale_perturb_diag=scale_perturb_diag2,
)
expected_result1 = getattr(tfp_dist1, function_string)(tfp_dist2)
expected_result2 = getattr(tfp_dist2, function_string)(tfp_dist1)
if mode_string == 'distrax_to_distrax':
result1 = self.variant(getattr(distrax_dist1, function_string))(
distrax_dist2)
result2 = self.variant(getattr(distrax_dist2, function_string))(
distrax_dist1)
elif mode_string == 'distrax_to_tfp':
result1 = self.variant(getattr(distrax_dist1, function_string))(tfp_dist2)
result2 = self.variant(getattr(distrax_dist2, function_string))(tfp_dist1)
elif mode_string == 'tfp_to_distrax':
result1 = self.variant(getattr(tfp_dist1, function_string))(distrax_dist2)
result2 = self.variant(getattr(tfp_dist2, function_string))(distrax_dist1)
self.assertion_fn(rtol=3e-3)(result1, expected_result1)
self.assertion_fn(rtol=3e-3)(result2, expected_result2)
def test_jittable(self):
super()._test_jittable(
dist_kwargs={'loc': np.zeros((4,))},
assertion_fn=self.assertion_fn(rtol=1e-3))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
)
def test_slice(self, slice_):
rng = np.random.default_rng(2022)
loc = rng.normal(size=(6, 5, 4))
scale_diag = rng.normal(size=(4,))
scale_u_matrix = rng.normal(size=(1, 4, 2))
scale_v_matrix = rng.normal(size=(4, 2))
dist = MultivariateNormalDiagPlusLowRank(
loc=loc,
scale_diag=scale_diag,
scale_u_matrix=scale_u_matrix,
scale_v_matrix=scale_v_matrix,
)
self.assertEqual(dist[slice_].batch_shape, loc[slice_].shape[:-1])
self.assertEqual(dist[slice_].event_shape, loc[slice_].shape[-1:])
self.assertion_fn(rtol=1e-3)(dist[slice_].mean(), loc[slice_])
def test_slice_ellipsis(self):
rng = np.random.default_rng(2022)
loc = rng.normal(size=(6, 5, 4))
scale_diag = rng.normal(size=(4,))
scale_u_matrix = rng.normal(size=(1, 4, 2))
scale_v_matrix = rng.normal(size=(4, 2))
dist = MultivariateNormalDiagPlusLowRank(
loc=loc,
scale_diag=scale_diag,
scale_u_matrix=scale_u_matrix,
scale_v_matrix=scale_v_matrix,
)
self.assertEqual(dist[..., -1].batch_shape, (6,))
self.assertEqual(dist[..., -1].event_shape, dist.event_shape)
self.assertion_fn(rtol=1e-3)(dist[..., -1].mean(), loc[:, -1, :])
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/mvn_diag_plus_low_rank_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gumbel distribution."""
import math
from typing import Tuple, Union
import chex
from distrax._src.distributions import distribution
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
PRNGKey = chex.PRNGKey
EventT = distribution.EventT
class Gumbel(distribution.Distribution):
"""Gumbel distribution with location `loc` and `scale` parameters."""
equiv_tfp_cls = tfd.Gumbel
def __init__(self, loc: Numeric, scale: Numeric):
"""Initializes a Gumbel distribution.
Args:
loc: Mean of the distribution.
scale: Spread of the distribution.
"""
super().__init__()
self._loc = conversion.as_float_array(loc)
self._scale = conversion.as_float_array(scale)
self._batch_shape = jax.lax.broadcast_shapes(
self._loc.shape, self._scale.shape)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return self._batch_shape
@property
def loc(self) -> Array:
"""Mean of the distribution."""
return jnp.broadcast_to(self._loc, self.batch_shape)
@property
def scale(self) -> Array:
"""Scale of the distribution."""
return jnp.broadcast_to(self._scale, self.batch_shape)
def _standardize(self, value: Array) -> Array:
"""Standardizes the input `value` in location and scale."""
return (value - self._loc) / self._scale
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
z = self._standardize(value)
return -(z + jnp.exp(-z)) - jnp.log(self._scale)
def _sample_from_std_gumbel(self, key: PRNGKey, n: int) -> Array:
out_shape = (n,) + self.batch_shape
dtype = jnp.result_type(self._loc, self._scale)
return jax.random.gumbel(key, shape=out_shape, dtype=dtype)
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
rnd = self._sample_from_std_gumbel(key, n)
return self._scale * rnd + self._loc
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
rnd = self._sample_from_std_gumbel(key, n)
samples = self._scale * rnd + self._loc
log_prob = -(rnd + jnp.exp(-rnd)) - jnp.log(self._scale)
return samples, log_prob
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
return jnp.log(self._scale) + 1. + jnp.euler_gamma
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
z = self._standardize(value)
return -jnp.exp(-z)
def mean(self) -> Array:
"""Calculates the mean."""
return self._loc + self._scale * jnp.euler_gamma
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return self._scale * jnp.ones_like(self._loc) * jnp.pi / math.sqrt(6.)
def variance(self) -> Array:
"""Calculates the variance."""
return jnp.square(self._scale * jnp.ones_like(self._loc) * jnp.pi) / 6.
def mode(self) -> Array:
"""Calculates the mode."""
return self.loc
def median(self) -> Array:
"""Calculates the median."""
return self._loc - self._scale * math.log(math.log(2.))
def __getitem__(self, index) -> 'Gumbel':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Gumbel(loc=self.loc[index], scale=self.scale[index])
def _kl_divergence_gumbel_gumbel(
dist1: Union[Gumbel, tfd.Gumbel],
dist2: Union[Gumbel, tfd.Gumbel],
*unused_args, **unused_kwargs,
) -> Array:
"""Batched KL divergence KL(dist1 || dist2) between two Gumbel distributions.
Args:
dist1: A Gumbel distribution.
dist2: A Gumbel distribution.
Returns:
Batchwise `KL(dist1 || dist2)`.
"""
return (jnp.log(dist2.scale) - jnp.log(dist1.scale) + jnp.euler_gamma *
(dist1.scale / dist2.scale - 1.) +
jnp.expm1((dist2.loc - dist1.loc) / dist2.scale +
jax.lax.lgamma(dist1.scale / dist2.scale + 1.)) +
(dist1.loc - dist2.loc) / dist2.scale)
# Register the KL functions with TFP.
tfd.RegisterKL(Gumbel, Gumbel)(_kl_divergence_gumbel_gumbel)
tfd.RegisterKL(Gumbel, Gumbel.equiv_tfp_cls)(_kl_divergence_gumbel_gumbel)
tfd.RegisterKL(Gumbel.equiv_tfp_cls, Gumbel)(_kl_divergence_gumbel_gumbel)
| distrax-master | distrax/_src/distributions/gumbel.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MultivariateNormalDiag distribution."""
from typing import Optional
import chex
from distrax._src.bijectors.diag_linear import DiagLinear
from distrax._src.distributions import distribution
from distrax._src.distributions.mvn_from_bijector import MultivariateNormalFromBijector
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
EventT = distribution.EventT
def _check_parameters(
loc: Optional[Array], scale_diag: Optional[Array]) -> None:
"""Checks that the `loc` and `scale_diag` parameters are correct."""
chex.assert_not_both_none(loc, scale_diag)
if scale_diag is not None and not scale_diag.shape:
raise ValueError('If provided, argument `scale_diag` must have at least '
'1 dimension.')
if loc is not None and not loc.shape:
raise ValueError('If provided, argument `loc` must have at least '
'1 dimension.')
if loc is not None and scale_diag is not None and (
loc.shape[-1] != scale_diag.shape[-1]):
raise ValueError(f'The last dimension of arguments `loc` and '
f'`scale_diag` must coincide, but {loc.shape[-1]} != '
f'{scale_diag.shape[-1]}.')
class MultivariateNormalDiag(MultivariateNormalFromBijector):
"""Multivariate normal distribution on `R^k` with diagonal covariance."""
equiv_tfp_cls = tfd.MultivariateNormalDiag
def __init__(self,
loc: Optional[Array] = None,
scale_diag: Optional[Array] = None):
"""Initializes a MultivariateNormalDiag distribution.
Args:
loc: Mean vector of the distribution. Can also be a batch of vectors. If
not specified, it defaults to zeros. At least one of `loc` and
`scale_diag` must be specified.
scale_diag: Vector of standard deviations. Can also be a batch of vectors.
If not specified, it defaults to ones. At least one of `loc` and
`scale_diag` must be specified.
"""
_check_parameters(loc, scale_diag)
if scale_diag is None:
loc = conversion.as_float_array(loc)
scale_diag = jnp.ones(loc.shape[-1], loc.dtype)
elif loc is None:
scale_diag = conversion.as_float_array(scale_diag)
loc = jnp.zeros(scale_diag.shape[-1], scale_diag.dtype)
else:
loc = conversion.as_float_array(loc)
scale_diag = conversion.as_float_array(scale_diag)
# Add leading dimensions to the paramteters to match the batch shape. This
# prevents automatic rank promotion.
broadcasted_shapes = jnp.broadcast_shapes(loc.shape, scale_diag.shape)
loc = jnp.expand_dims(
loc, axis=list(range(len(broadcasted_shapes) - loc.ndim)))
scale_diag = jnp.expand_dims(
scale_diag, axis=list(range(len(broadcasted_shapes) - scale_diag.ndim)))
bias = jnp.zeros_like(loc, shape=loc.shape[-1:])
bias = jnp.expand_dims(
bias, axis=list(range(len(broadcasted_shapes) - bias.ndim)))
scale = DiagLinear(scale_diag)
super().__init__(loc=loc, scale=scale)
self._scale_diag = scale_diag
@property
def scale_diag(self) -> Array:
"""Scale of the distribution."""
return jnp.broadcast_to(
self._scale_diag, self.batch_shape + self.event_shape)
def _standardize(self, value: Array) -> Array:
return (value - self._loc) / self._scale_diag
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
return jnp.prod(jax.scipy.special.ndtr(self._standardize(value)), axis=-1)
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return jnp.sum(
jax.scipy.special.log_ndtr(self._standardize(value)), axis=-1)
def __getitem__(self, index) -> 'MultivariateNormalDiag':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return MultivariateNormalDiag(
loc=self.loc[index], scale_diag=self.scale_diag[index])
| distrax-master | distrax/_src/distributions/mvn_diag.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MultivariateNormalFullCovariance distribution."""
from typing import Optional
import chex
from distrax._src.distributions import distribution
from distrax._src.distributions.mvn_tri import MultivariateNormalTri
from distrax._src.utils import conversion
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
def _check_parameters(
loc: Optional[Array], covariance_matrix: Optional[Array]) -> None:
"""Checks that the inputs are correct."""
if loc is None and covariance_matrix is None:
raise ValueError(
'At least one of `loc` and `covariance_matrix` must be specified.')
if loc is not None and loc.ndim < 1:
raise ValueError('The parameter `loc` must have at least one dimension.')
if covariance_matrix is not None and covariance_matrix.ndim < 2:
raise ValueError(
f'The `covariance_matrix` must have at least two dimensions, but '
f'`covariance_matrix.shape = {covariance_matrix.shape}`.')
if covariance_matrix is not None and (
covariance_matrix.shape[-1] != covariance_matrix.shape[-2]):
raise ValueError(
f'The `covariance_matrix` must be a (batched) square matrix, but '
f'`covariance_matrix.shape = {covariance_matrix.shape}`.')
if loc is not None:
num_dims = loc.shape[-1]
if covariance_matrix is not None and (
covariance_matrix.shape[-1] != num_dims):
raise ValueError(
f'Shapes are not compatible: `loc.shape = {loc.shape}` and '
f'`covariance_matrix.shape = {covariance_matrix.shape}`.')
class MultivariateNormalFullCovariance(MultivariateNormalTri):
"""Multivariate normal distribution on `R^k`.
The `MultivariateNormalFullCovariance` distribution is parameterized by a
`k`-length location (mean) vector `b` and a covariance matrix `C` of size
`k x k` that must be positive definite and symmetric.
This class makes no attempt to verify that `C` is positive definite or
symmetric. It is the responsibility of the user to make sure that it is the
case.
"""
equiv_tfp_cls = tfd.MultivariateNormalFullCovariance
def __init__(self,
loc: Optional[Array] = None,
covariance_matrix: Optional[Array] = None):
"""Initializes a MultivariateNormalFullCovariance distribution.
Args:
loc: Mean vector of the distribution of shape `k` (can also be a batch of
such vectors). If not specified, it defaults to zeros.
covariance_matrix: The covariance matrix `C`. It must be a `k x k` matrix
(additional dimensions index batches). If not specified, it defaults to
the identity.
"""
loc = None if loc is None else conversion.as_float_array(loc)
covariance_matrix = None if covariance_matrix is None else (
conversion.as_float_array(covariance_matrix))
_check_parameters(loc, covariance_matrix)
if loc is not None:
num_dims = loc.shape[-1]
elif covariance_matrix is not None:
num_dims = covariance_matrix.shape[-1]
dtype = jnp.result_type(
*[x for x in [loc, covariance_matrix] if x is not None])
if loc is None:
loc = jnp.zeros((num_dims,), dtype=dtype)
if covariance_matrix is None:
self._covariance_matrix = jnp.eye(num_dims, dtype=dtype)
scale_tril = None
else:
self._covariance_matrix = covariance_matrix
scale_tril = jnp.linalg.cholesky(covariance_matrix)
super().__init__(loc=loc, scale_tri=scale_tril)
@property
def covariance_matrix(self) -> Array:
"""Covariance matrix `C`."""
return jnp.broadcast_to(
self._covariance_matrix,
self.batch_shape + self.event_shape + self.event_shape)
def covariance(self) -> Array:
"""Covariance matrix `C`."""
return self.covariance_matrix
def variance(self) -> Array:
"""Calculates the variance of all one-dimensional marginals."""
return jnp.vectorize(jnp.diag, signature='(k,k)->(k)')(
self.covariance_matrix)
def __getitem__(self, index) -> 'MultivariateNormalFullCovariance':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return MultivariateNormalFullCovariance(
loc=self.loc[index],
covariance_matrix=self.covariance_matrix[index])
| distrax-master | distrax/_src/distributions/mvn_full_covariance.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Straight-through gradient sampling distribution."""
from distrax._src.distributions import categorical
from distrax._src.distributions import distribution
import jax
def straight_through_wrapper( # pylint: disable=invalid-name
Distribution,
) -> distribution.DistributionLike:
"""Wrap a distribution to use straight-through gradient for samples."""
def sample(self, seed, sample_shape=()): # pylint: disable=g-doc-args
"""Sampling with straight through biased gradient estimator.
Sample a value from the distribution, but backpropagate through the
underlying probability to compute the gradient.
References:
[1] Yoshua Bengio, Nicholas Léonard, Aaron Courville, Estimating or
Propagating Gradients Through Stochastic Neurons for Conditional
Computation, https://arxiv.org/abs/1308.3432
Args:
seed: a random seed.
sample_shape: the shape of the required sample.
Returns:
A sample with straight-through gradient.
"""
# pylint: disable=protected-access
obj = Distribution(probs=self._probs, logits=self._logits)
assert isinstance(obj, categorical.Categorical)
sample = obj.sample(seed=seed, sample_shape=sample_shape)
probs = obj.probs
padded_probs = _pad(probs, sample.shape)
# Keep sample unchanged, but add gradient through probs.
sample += padded_probs - jax.lax.stop_gradient(padded_probs)
return sample
def _pad(probs, shape):
"""Grow probs to have the same number of dimensions as shape."""
while len(probs.shape) < len(shape):
probs = probs[None]
return probs
parent_name = Distribution.__name__
# Return a new object, overriding sample.
return type('StraighThrough' + parent_name, (Distribution,),
{'sample': sample})
| distrax-master | distrax/_src/distributions/straight_through.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `mvn_diag.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import mvn_diag
from distrax._src.distributions import normal
from distrax._src.utils import equivalence
import jax
import jax.numpy as jnp
import numpy as np
class MultivariateNormalDiagTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(mvn_diag.MultivariateNormalDiag)
@parameterized.named_parameters(
('1d std normal', {'scale_diag': np.ones((1,))}),
('2d std normal', {'loc': [0., 0.], 'scale_diag': [1., 1.]}),
('2d std normal, None loc', {'scale_diag': [1., 1.]}),
('2d std normal, None scale_diag', {'loc': [0., 0.]}),
('rank-2 parameters',
{'loc': np.zeros((3, 2)), 'scale_diag': np.ones((3, 2))}),
('broadcasted scale_diag',
{'loc': np.zeros((3, 2)), 'scale_diag': np.ones((2,))}),
('broadcasted loc',
{'loc': np.zeros((2)), 'scale_diag': np.ones((3, 2,))}),
)
def test_event_shape(self, distr_params):
distr_params = {
k: np.asarray(v, dtype=np.float32) for k, v in distr_params.items()}
super()._test_event_shape((), distr_params)
@parameterized.named_parameters(
('1d std normal', {'scale_diag': np.ones((1,))}),
('2d std normal', {'loc': [0., 0.], 'scale_diag': [1., 1.]}),
('2d std normal, None loc', {'scale_diag': [1., 1.]}),
('2d std normal, None scale_diag', {'loc': [0., 0.]}),
('rank-2 parameters',
{'loc': np.zeros((3, 2)), 'scale_diag': np.ones((3, 2))}),
('broadcasted scale_diag',
{'loc': np.zeros((3, 2)), 'scale_diag': np.ones((2,))}),
('broadcasted loc',
{'loc': np.zeros((2)), 'scale_diag': np.ones((3, 2,))}),
)
def test_batch_shape(self, distr_params):
distr_params = {
k: np.asarray(v, dtype=np.float32) for k, v in distr_params.items()}
super()._test_batch_shape((), distr_params)
def test_invalid_parameters(self):
self._test_raises_error(dist_kwargs={'loc': None, 'scale_diag': None})
self._test_raises_error(
dist_kwargs={'loc': None, 'scale_diag': np.array(1.)})
self._test_raises_error(
dist_kwargs={'loc': np.array(1.), 'scale_diag': None})
self._test_raises_error(
dist_kwargs={'loc': np.zeros((3, 5)), 'scale_diag': np.ones((3, 4))})
@chex.all_variants
@parameterized.named_parameters(
('1d std normal, no shape',
{'scale_diag': np.ones((1,))},
()),
('2d std normal, no shape',
{'loc': [0., 0.],
'scale_diag': [1., 1.]},
()),
('2d std normal, None loc, no shape',
{'scale_diag': [1., 1.]},
()),
('2d std normal, None scale_diag, no shape',
{'loc': [0., 0.]},
()),
('2d std normal, int shape',
{'loc': [0., 0.],
'scale_diag': [1., 1.]},
3),
('2d std normal, None loc, int shape',
{'scale_diag': [1., 1.]},
3),
('2d std normal, None scale_diag, int shape',
{'loc': [0., 0.]},
3),
('2d std normal, 1-tuple shape',
{'loc': [0., 0.],
'scale_diag': [1., 1.]},
(3,)),
('2d std normal, None loc, 1-tuple shape',
{'scale_diag': [1., 1.]},
(3,)),
('2d std normal, None scale_diag, 1-tuple shape',
{'loc': [0., 0.]},
(3,)),
('2d std normal, 2-tuple shape',
{'loc': [0., 0.],
'scale_diag': [1., 1.]},
(3, 4)),
('2d std normal, None loc, 2-tuple shape',
{'scale_diag': [1., 1.]},
(3, 4)),
('2d std normal, None scale_diag, 2-tuple shape',
{'loc': [0., 0.]},
(3, 4)),
('rank-2 parameters, 2-tuple shape',
{'loc': np.zeros((3, 2)),
'scale_diag': np.ones((3, 2))},
(5, 4)),
('broadcasted scale_diag',
{'loc': np.zeros((3, 2)),
'scale_diag': np.ones((2,))},
5),
('broadcasted loc',
{'loc': np.zeros((2)),
'scale_diag': np.ones((3, 2,))},
5),
)
def test_sample_shape(self, distr_params, sample_shape):
distr_params = {
k: np.asarray(v, dtype=np.float32) for k, v in distr_params.items()}
super()._test_sample_shape(
dist_args=(),
dist_kwargs=distr_params,
sample_shape=sample_shape)
@chex.all_variants
@jax.numpy_rank_promotion('raise')
@parameterized.named_parameters(
('1d std normal, no shape',
{'scale_diag': np.ones((1,))},
()),
('2d std normal, no shape',
{'loc': [0., 0.],
'scale_diag': [1., 1.]},
()),
('2d std normal, None loc, no shape',
{'scale_diag': [1., 1.]},
()),
('2d std normal, None scale_diag, no shape',
{'loc': [0., 0.]},
()),
('2d std normal, int shape',
{'loc': [0., 0.],
'scale_diag': [1., 1.]},
3),
('2d std normal, None loc, int shape',
{'scale_diag': [1., 1.]},
3),
('2d std normal, None scale_diag, int shape',
{'loc': [0., 0.]},
3),
('2d std normal, 1-tuple shape',
{'loc': [0., 0.],
'scale_diag': [1., 1.]},
(3,)),
('2d std normal, None loc, 1-tuple shape',
{'scale_diag': [1., 1.]},
(3,)),
('2d std normal, None scale_diag, 1-tuple shape',
{'loc': [0., 0.]},
(3,)),
('2d std normal, 2-tuple shape',
{'loc': [0., 0.],
'scale_diag': [1., 1.]},
(3, 4)),
('2d std normal, None loc, 2-tuple shape',
{'scale_diag': [1., 1.]},
(3, 4)),
('2d std normal, None scale_diag, 2-tuple shape',
{'loc': [0., 0.]},
(3, 4)),
('rank-2 parameters, 2-tuple shape',
{'loc': np.zeros((3, 2)),
'scale_diag': np.ones((3, 2))},
(5, 4)),
('broadcasted scale_diag',
{'loc': np.zeros((3, 2)),
'scale_diag': np.ones((2,))},
5),
('broadcasted loc',
{'loc': np.zeros((2)),
'scale_diag': np.ones((3, 2,))},
5),
)
def test_sample_and_log_prob(self, distr_params, sample_shape):
distr_params = {
k: np.asarray(v, dtype=np.float32) for k, v in distr_params.items()}
super()._test_sample_and_log_prob(
dist_args=(),
dist_kwargs=distr_params,
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=1e-3))
@chex.all_variants
@parameterized.named_parameters(
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist_params = {
'loc': np.array([0., 0.], dtype),
'scale_diag': np.array([1., 1.], dtype)}
dist = self.distrax_cls(**dist_params)
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
@chex.all_variants
@parameterized.named_parameters(
('log_prob; 2d dist, 2d value', 'log_prob',
{'scale_diag': [1., 1.]},
[0., -0.5]),
('log_prob; 3d dist, broadcasted params, 3d value', 'log_prob',
{'loc': np.zeros((4, 3)),
'scale_diag': 0.3 * np.ones((3,))},
[-0.1, 0., -0.5]),
('log_prob; 3d dist, broadcasted scale_diag, rank-2 value', 'log_prob',
{'loc': np.zeros((4, 3)),
'scale_diag': 0.1 * np.ones((3,))},
-0.1 * np.ones((4, 3))),
('log_prob; 3d dist, broadcasted scale_diag, rank-3 value', 'log_prob',
{'loc': np.zeros((4, 3)),
'scale_diag': 0.1 * np.ones((3,))},
-0.1 * np.ones((5, 4, 3))),
('log_prob; 2d dist, 2d value, edge case', 'log_prob',
{'scale_diag': [1., 1.]},
[200., -200.]),
('prob; 2d dist, 2d value', 'prob',
{'scale_diag': [1., 1.]},
[0., -0.5]),
('prob; 3d dist, broadcasted params, 3d value', 'prob',
{'loc': np.zeros((4, 3)),
'scale_diag': 0.3 * np.ones((3,))},
[-0.1, 0., -0.5]),
('prob; 3d dist, broadcasted scale_diag, rank-2 value', 'prob',
{'loc': np.zeros((4, 3)),
'scale_diag': 0.1 * np.ones((3,))},
-0.1 * np.ones((4, 3))),
('prob; 3d dist, broadcasted scale_diag, rank-3 value', 'prob',
{'loc': np.zeros((4, 3)),
'scale_diag': 0.1 * np.ones((3,))},
-0.1 * np.ones((5, 4, 3))),
('prob; 2d dist, 2d value, edge case', 'prob',
{'scale_diag': [1., 1.]},
[200., -200.]),
)
def test_pdf(self, function_string, distr_params, value):
distr_params = {
k: np.asarray(v, dtype=np.float32) for k, v in distr_params.items()}
value = np.asarray(value)
super()._test_attribute(
attribute_string=function_string,
dist_kwargs=distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=1e-3))
@chex.all_variants
@parameterized.named_parameters(
('log_cdf; 2d dist, 2d value', 'log_cdf',
{'scale_diag': [1., 1.]},
[0., -0.5]),
('log_cdf; 3d dist, broadcasted params, 3d value', 'log_cdf',
{'loc': np.zeros((4, 3)),
'scale_diag': 0.3 * np.ones((3,))},
[-0.1, 0., -0.5]),
('log_cdf; 3d dist, broadcasted scale_diag, rank-2 value', 'log_cdf',
{'loc': np.zeros((4, 3)),
'scale_diag': 0.1 * np.ones((3,))},
-0.1 * np.ones((4, 3))),
('log_cdf; 3d dist, broadcasted scale_diag, rank-3 value', 'log_cdf',
{'loc': np.zeros((4, 3)),
'scale_diag': 0.1 * np.ones((3,))},
-0.1 * np.ones((5, 4, 3))),
('log_cdf; 2d dist, 2d value, edge case', 'log_cdf',
{'scale_diag': [1., 1.]},
[200., -200.]),
('cdf; 2d dist, 2d value', 'cdf',
{'scale_diag': [1., 1.]},
[0., -0.5]),
('cdf; 3d dist, broadcasted params, 3d value', 'cdf',
{'loc': np.zeros((4, 3)),
'scale_diag': 0.3 * np.ones((3,))},
[-0.1, 0., -0.5]),
('cdf; 3d dist, broadcasted scale_diag, rank-2 value', 'cdf',
{'loc': np.zeros((4, 3)),
'scale_diag': 0.1 * np.ones((3,))},
-0.1 * np.ones((4, 3))),
('cdf; 3d dist, broadcasted scale_diag, rank-3 value', 'cdf',
{'loc': np.zeros((4, 3)),
'scale_diag': 0.1 * np.ones((3,))},
-0.1 * np.ones((5, 4, 3))),
('cdf; 2d dist, 2d value, edge case', 'cdf',
{'scale_diag': [1., 1.]},
[200., -200.]),
)
def test_cdf(self, function_string, distr_params, value):
distr_params = {
k: np.asarray(v, dtype=np.float32) for k, v in distr_params.items()}
value = np.asarray(value)
dist = self.distrax_cls(**distr_params)
result = self.variant(getattr(dist, function_string))(value)
# The `cdf` is not implemented in TFP, so we test against a `Normal`.
loc = 0. if 'loc' not in distr_params else distr_params['loc']
univariate_normal = normal.Normal(loc, distr_params['scale_diag'])
expected_result = getattr(univariate_normal, function_string)(value)
if function_string == 'cdf':
reduce_fn = lambda x: jnp.prod(x, axis=-1)
elif function_string == 'log_cdf':
reduce_fn = lambda x: jnp.sum(x, axis=-1)
expected_result = reduce_fn(expected_result)
self.assertion_fn(rtol=1e-3)(result, expected_result)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('entropy; one distribution', 'entropy',
{'loc': [0.1, -0.1],
'scale_diag': [0.8, 0.5]}),
('entropy; broadcasted loc', 'entropy',
{'loc': [0., 0.1, -0.1],
'scale_diag': [[1.5, 0.8, 0.5], [0.8, 0.1, 0.4]]}),
('entropy; broadcasted scale_diag', 'entropy',
{'loc': [[0., 0.1, -0.1], [0.2, 0., 0.5]],
'scale_diag': [1.5, 0.8, 0.5]}),
('entropy; None loc', 'entropy',
{'scale_diag': [0.8, 0.5]}),
('entropy; None scale_diag', 'entropy',
{'loc': [0.1, -0.1]}),
('mean; one distribution', 'mean',
{'loc': [0.1, -0.1],
'scale_diag': [0.8, 0.5]}),
('mean; broadcasted loc', 'mean',
{'loc': [0., 0.1, -0.1],
'scale_diag': [[1.5, 0.8, 0.5], [0.8, 0.1, 0.4]]}),
('mean; broadcasted scale_diag', 'mean',
{'loc': [[0., 0.1, -0.1], [0.2, 0., 0.5]],
'scale_diag': [1.5, 0.8, 0.5]}),
('mean; None loc', 'mean',
{'scale_diag': [0.8, 0.5]}),
('mean; None scale_diag', 'mean',
{'loc': [0.1, -0.1]}),
('stddev; one distribution', 'stddev',
{'loc': [0.1, -0.1],
'scale_diag': [0.8, 0.5]}),
('stddev; broadcasted loc', 'stddev',
{'loc': [0., 0.1, -0.1],
'scale_diag': [[1.5, 0.8, 0.5], [0.8, 0.1, 0.4]]}),
('stddev; broadcasted scale_diag', 'stddev',
{'loc': [[0., 0.1, -0.1], [0.2, 0., 0.5]],
'scale_diag': [1.5, 0.8, 0.5]}),
('stddev; None loc', 'stddev',
{'scale_diag': [0.8, 0.5]}),
('stddev; None scale_diag', 'stddev',
{'loc': [0.1, -0.1]}),
('variance; one distribution', 'variance',
{'loc': [0.1, -0.1],
'scale_diag': [0.8, 0.5]}),
('variance; broadcasted loc', 'variance',
{'loc': [0., 0.1, -0.1],
'scale_diag': [[1.5, 0.8, 0.5], [0.8, 0.1, 0.4]]}),
('variance; broadcasted scale_diag', 'variance',
{'loc': [[0., 0.1, -0.1], [0.2, 0., 0.5]],
'scale_diag': [1.5, 0.8, 0.5]}),
('variance; None loc', 'variance',
{'scale_diag': [0.8, 0.5]}),
('variance; None scale_diag', 'variance',
{'loc': [0.1, -0.1]}),
('covariance; one distribution', 'covariance',
{'loc': [0.1, -0.1],
'scale_diag': [0.8, 0.5]}),
('covariance; broadcasted loc', 'covariance',
{'loc': [0., 0.1, -0.1],
'scale_diag': [[1.5, 0.8, 0.5], [0.8, 0.1, 0.4]]}),
('covariance; None loc', 'covariance',
{'scale_diag': [0.8, 0.5]}),
('covariance; None scale_diag', 'covariance',
{'loc': [0.1, -0.1]}),
('mode; broadcasted scale_diag', 'mode',
{'loc': [[0., 0.1, -0.1], [0.2, 0., 0.5]],
'scale_diag': [1.5, 0.8, 0.5]}),
)
def test_method(self, function_string, distr_params):
distr_params = {
k: np.asarray(v, dtype=np.float32) for k, v in distr_params.items()}
super()._test_attribute(
function_string,
dist_kwargs=distr_params,
assertion_fn=self.assertion_fn(rtol=1e-3))
@chex.all_variants(with_pmap=False)
def test_median(self):
dist_params = {'loc': np.array([0.3, -0.1, 0.0]),
'scale_diag': np.array([0.1, 1.4, 0.5])}
dist = self.distrax_cls(**dist_params)
self.assertion_fn(rtol=1e-3)(self.variant(dist.median)(), dist.mean())
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax'))
def test_with_two_distributions(self, function_string, mode_string):
rng = np.random.default_rng(42)
super()._test_with_two_distributions(
attribute_string=function_string,
mode_string=mode_string,
dist1_kwargs={
'loc': rng.normal(size=(4, 1, 5)).astype(np.float32),
'scale_diag': 0.1 + rng.uniform(size=(3, 5)).astype(np.float32),
},
dist2_kwargs={
'loc': np.asarray([-2.4, -1., 0., 1.2, 6.5]).astype(np.float32),
'scale_diag': None,
},
assertion_fn=self.assertion_fn(rtol=1e-3))
def test_jittable(self):
super()._test_jittable(
(np.zeros((2, 3,)), np.ones((2, 3,))),
assertion_fn=self.assertion_fn(rtol=1e-3))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
)
def test_slice(self, slice_):
rng = np.random.default_rng(42)
loc = jnp.array(rng.normal(size=(3, 4, 5)))
scale_diag = jnp.array(rng.uniform(size=(3, 4, 5)))
dist = self.distrax_cls(loc=loc, scale_diag=scale_diag)
self.assertion_fn(rtol=1e-3)(dist[slice_].mean(), loc[slice_])
def test_slice_different_parameterization(self):
rng = np.random.default_rng(42)
loc = jnp.array(rng.normal(size=(4,)))
scale_diag = jnp.array(rng.uniform(size=(3, 4)))
dist = self.distrax_cls(loc=loc, scale_diag=scale_diag)
self.assertion_fn(rtol=1e-3)(dist[0].mean(), loc) # Not slicing loc.
self.assertion_fn(rtol=1e-3)(dist[0].stddev(), scale_diag[0])
def test_slice_ellipsis(self):
rng = np.random.default_rng(42)
loc = jnp.array(rng.normal(size=(3, 4, 5)))
scale_diag = jnp.array(rng.uniform(size=(3, 4, 5)))
dist = self.distrax_cls(loc=loc, scale_diag=scale_diag)
self.assertion_fn(rtol=1e-3)(dist[..., -1].mean(), loc[:, -1])
self.assertion_fn(rtol=1e-3)(dist[..., -1].stddev(), scale_diag[:, -1])
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/mvn_diag_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `softmax.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import softmax
from distrax._src.utils import equivalence
from distrax._src.utils import math
import jax
import jax.numpy as jnp
import numpy as np
class SoftmaxUnitTemperatureTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(softmax.Softmax)
self.temperature = 1.
self.probs = jnp.array([0.2, 0.4, 0.1, 0.3])
self.logits = jnp.log(self.probs)
def test_num_categories(self):
dist = self.distrax_cls(logits=self.logits)
np.testing.assert_equal(dist.num_categories, len(self.logits))
def test_parameters(self):
dist = self.distrax_cls(logits=self.logits)
self.assertion_fn(rtol=2e-3)(dist.logits, self.logits)
self.assertion_fn(rtol=2e-3)(dist.probs, self.probs)
class SoftmaxTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(softmax.Softmax)
self.temperature = 10.
self.logits = jnp.array([2., 4., 1., 3.])
self.probs = jax.nn.softmax(self.logits / self.temperature)
def test_num_categories(self):
dist = self.distrax_cls(logits=self.logits, temperature=self.temperature)
np.testing.assert_equal(dist.num_categories, len(self.logits))
def test_parameters(self):
dist = self.distrax_cls(logits=self.logits, temperature=self.temperature)
self.assertion_fn(rtol=2e-3)(
dist.logits, math.normalize(logits=self.logits / self.temperature))
self.assertion_fn(rtol=2e-3)(dist.probs, self.probs)
@chex.all_variants
@parameterized.named_parameters(
('int32', jnp.int32),
('int64', jnp.int64),
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist = self.distrax_cls(
logits=self.logits, temperature=self.temperature, dtype=dtype)
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
def test_jittable(self):
super()._test_jittable((np.array([2., 4., 1., 3.]),))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
)
def test_slice(self, slice_):
logits = jnp.array(np.random.randn(3, 4, 5))
temperature = 0.8
scaled_logits = logits / temperature
dist = self.distrax_cls(logits=logits, temperature=temperature)
self.assertIsInstance(dist[slice_], self.distrax_cls)
self.assertion_fn(rtol=2e-3)(dist[slice_].temperature, temperature)
self.assertion_fn(rtol=2e-3)(
jax.nn.softmax(dist[slice_].logits, axis=-1),
jax.nn.softmax(scaled_logits[slice_], axis=-1))
def test_slice_ellipsis(self):
logits = jnp.array(np.random.randn(3, 4, 5))
temperature = 0.8
scaled_logits = logits / temperature
dist = self.distrax_cls(logits=logits, temperature=temperature)
dist_sliced = dist[..., -1]
self.assertIsInstance(dist_sliced, self.distrax_cls)
self.assertion_fn(rtol=2e-3)(dist_sliced.temperature, temperature)
self.assertion_fn(rtol=2e-3)(
jax.nn.softmax(dist_sliced.logits, axis=-1),
jax.nn.softmax(scaled_logits[:, -1], axis=-1))
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/softmax_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `mvn_from_bijector.py`."""
from typing import Tuple
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import linear
from distrax._src.bijectors.diag_linear import DiagLinear
from distrax._src.bijectors.triangular_linear import TriangularLinear
from distrax._src.distributions.mvn_from_bijector import MultivariateNormalFromBijector
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
class MockLinear(linear.Linear):
"""A mock linear bijector."""
def __init__(self, event_dims: int):
super().__init__(event_dims, batch_shape=(), dtype=float)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return x, jnp.zeros_like(x)[:-1]
class MultivariateNormalFromBijectorTest(parameterized.TestCase):
@parameterized.named_parameters(
('loc is 0d', 4, np.zeros(shape=())),
('loc and scale dims not compatible', 3, np.zeros((4,))),
)
def test_raises_on_wrong_inputs(self, event_dims, loc):
bij = MockLinear(event_dims)
with self.assertRaises(ValueError):
MultivariateNormalFromBijector(loc, bij)
@parameterized.named_parameters(
('no broadcast', np.ones((4,)), np.zeros((4,)), (4,)),
('broadcasted loc', np.ones((3, 4)), np.zeros((4,)), (3, 4)),
('broadcasted diag', np.ones((4,)), np.zeros((3, 4)), (3, 4)),
)
def test_loc_scale_and_shapes(self, diag, loc, expected_shape):
scale = DiagLinear(diag)
batch_shape = jnp.broadcast_shapes(diag.shape, loc.shape)[:-1]
dist = MultivariateNormalFromBijector(loc, scale)
np.testing.assert_allclose(dist.loc, np.zeros(expected_shape))
self.assertTrue(scale.same_as(dist.scale))
self.assertEqual(dist.event_shape, (4,))
self.assertEqual(dist.batch_shape, batch_shape)
@chex.all_variants
def test_sample(self):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
diag = 0.5 + jax.random.uniform(next(prng), (4,))
loc = jax.random.normal(next(prng), (4,))
scale = DiagLinear(diag)
dist = MultivariateNormalFromBijector(loc, scale)
num_samples = 100_000
sample_fn = lambda seed: dist.sample(seed=seed, sample_shape=num_samples)
samples = self.variant(sample_fn)(jax.random.PRNGKey(2000))
self.assertEqual(samples.shape, (num_samples, 4))
np.testing.assert_allclose(jnp.mean(samples, axis=0), loc, rtol=0.1)
np.testing.assert_allclose(jnp.std(samples, axis=0), diag, rtol=0.1)
@chex.all_variants
def test_log_prob(self):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
diag = 0.5 + jax.random.uniform(next(prng), (4,))
loc = jax.random.normal(next(prng), (4,))
scale = DiagLinear(diag)
dist = MultivariateNormalFromBijector(loc, scale)
values = jax.random.normal(next(prng), (5, 4))
tfp_dist = tfd.MultivariateNormalDiag(loc=loc, scale_diag=diag)
np.testing.assert_allclose(
self.variant(dist.log_prob)(values), tfp_dist.log_prob(values),
rtol=2e-7)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('no broadcast', (4,), (4,)),
('broadcasted loc', (3, 4), (4,)),
('broadcasted diag', (4,), (3, 4)),
)
def test_mean_median_mode(self, diag_shape, loc_shape):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
diag = jax.random.normal(next(prng), diag_shape)
loc = jax.random.normal(next(prng), loc_shape)
scale = DiagLinear(diag)
batch_shape = jnp.broadcast_shapes(diag_shape, loc_shape)[:-1]
dist = MultivariateNormalFromBijector(loc, scale)
for method in ['mean', 'median', 'mode']:
with self.subTest(method=method):
fn = self.variant(getattr(dist, method))
np.testing.assert_allclose(
fn(), jnp.broadcast_to(loc, batch_shape + loc.shape[-1:]))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('no broadcast', (4,), (4,)),
('broadcasted loc', (3, 4), (4,)),
('broadcasted diag', (4,), (3, 4)),
)
def test_variance_stddev_covariance_diag(self, scale_shape, loc_shape):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
scale_diag = jax.random.normal(next(prng), scale_shape)
loc = jax.random.normal(next(prng), loc_shape)
scale = DiagLinear(scale_diag)
batch_shape = jnp.broadcast_shapes(scale_shape[:-1], loc_shape[:-1])
dist = MultivariateNormalFromBijector(loc, scale)
for method in ['variance', 'stddev', 'covariance']:
with self.subTest(method=method):
fn = self.variant(getattr(dist, method))
if method == 'variance':
expected_result = jnp.broadcast_to(
jnp.square(scale_diag), batch_shape + loc.shape[-1:])
elif method == 'stddev':
expected_result = jnp.broadcast_to(
jnp.abs(scale_diag), batch_shape + loc.shape[-1:])
elif method == 'covariance':
expected_result = jnp.broadcast_to(
jnp.vectorize(jnp.diag, signature='(k)->(k,k)')(
jnp.square(scale_diag)),
batch_shape + loc.shape[-1:] + loc.shape[-1:])
np.testing.assert_allclose(fn(), expected_result, rtol=5e-3)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('no broadcast', (4, 4), (4,)),
('broadcasted loc', (3, 4, 4), (4,)),
('broadcasted diag', (4, 4), (3, 4)),
)
def test_variance_stddev_covariance_no_diag(self, scale_shape, loc_shape):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
scale_tril = jnp.tril(jax.random.normal(next(prng), scale_shape))
loc = jax.random.normal(next(prng), loc_shape)
scale = TriangularLinear(matrix=scale_tril, is_lower=True)
batch_shape = jnp.broadcast_shapes(scale_shape[:-2], loc_shape[:-1])
dist = MultivariateNormalFromBijector(loc, scale)
for method in ['variance', 'stddev', 'covariance']:
with self.subTest(method=method):
fn = self.variant(getattr(dist, method))
scale_tril_t = jnp.vectorize(
jnp.transpose, signature='(k,k)->(k,k)')(scale_tril)
scale_times_scale_t = jnp.matmul(scale_tril, scale_tril_t)
if method == 'variance':
expected_result = jnp.vectorize(jnp.diag, signature='(k,k)->(k)')(
scale_times_scale_t)
expected_result = jnp.broadcast_to(
expected_result, batch_shape + loc.shape[-1:])
elif method == 'stddev':
expected_result = jnp.vectorize(jnp.diag, signature='(k,k)->(k)')(
jnp.sqrt(scale_times_scale_t))
expected_result = jnp.broadcast_to(
expected_result, batch_shape + loc.shape[-1:])
elif method == 'covariance':
expected_result = jnp.broadcast_to(
scale_times_scale_t, batch_shape + scale_tril.shape[-2:])
np.testing.assert_allclose(fn(), expected_result, rtol=5e-3)
@chex.all_variants(with_pmap=False)
def test_kl_divergence_diag_distributions(self):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
scale_diag1 = 0.1 + jax.random.uniform(next(prng), (3, 4))
loc1 = jax.random.normal(next(prng), (1, 4))
dist1_distrax = MultivariateNormalFromBijector(
loc=loc1,
scale=DiagLinear(scale_diag1),
)
dist1_tfp = tfd.MultivariateNormalDiag(
loc=loc1, scale_diag=scale_diag1)
scale_diag2 = 0.1 + jax.random.uniform(next(prng), (4,))
loc2 = jax.random.normal(next(prng), (4,))
dist2_distrax = MultivariateNormalFromBijector(
loc=loc2,
scale=DiagLinear(scale_diag2),
)
dist2_tfp = tfd.MultivariateNormalDiag(
loc=loc2, scale_diag=scale_diag2)
expected_result1 = dist1_tfp.kl_divergence(dist2_tfp)
expected_result2 = dist2_tfp.kl_divergence(dist1_tfp)
for mode in ['distrax_to_distrax', 'distrax_to_tfp', 'tfp_to_distrax']:
with self.subTest(mode=mode):
if mode == 'distrax_to_distrax':
result1 = self.variant(dist1_distrax.kl_divergence)(dist2_distrax)
result2 = self.variant(dist2_distrax.kl_divergence)(dist1_distrax)
elif mode == 'distrax_to_tfp':
result1 = self.variant(dist1_distrax.kl_divergence)(dist2_tfp)
result2 = self.variant(dist2_distrax.kl_divergence)(dist1_tfp)
elif mode == 'tfp_to_distrax':
result1 = self.variant(dist1_tfp.kl_divergence)(dist2_distrax)
result2 = self.variant(dist2_tfp.kl_divergence)(dist1_distrax)
np.testing.assert_allclose(result1, expected_result1, rtol=1e-3)
np.testing.assert_allclose(result2, expected_result2, rtol=1e-3)
@chex.all_variants(with_pmap=False)
def test_kl_divergence_non_diag_distributions(self):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
scale_tril1 = jnp.tril(jax.random.normal(next(prng), (3, 4, 4)))
loc1 = jax.random.normal(next(prng), (1, 4))
dist1_distrax = MultivariateNormalFromBijector(
loc=loc1,
scale=TriangularLinear(matrix=scale_tril1),
)
dist1_tfp = tfd.MultivariateNormalTriL(loc=loc1, scale_tril=scale_tril1)
scale_tril2 = jnp.tril(jax.random.normal(next(prng), (4, 4)))
loc2 = jax.random.normal(next(prng), (4,))
dist2_distrax = MultivariateNormalFromBijector(
loc=loc2,
scale=TriangularLinear(matrix=scale_tril2),
)
dist2_tfp = tfd.MultivariateNormalTriL(loc=loc2, scale_tril=scale_tril2)
expected_result1 = dist1_tfp.kl_divergence(dist2_tfp)
expected_result2 = dist2_tfp.kl_divergence(dist1_tfp)
for mode in ['distrax_to_distrax', 'distrax_to_tfp', 'tfp_to_distrax']:
with self.subTest(mode=mode):
if mode == 'distrax_to_distrax':
result1 = self.variant(dist1_distrax.kl_divergence)(dist2_distrax)
result2 = self.variant(dist2_distrax.kl_divergence)(dist1_distrax)
elif mode == 'distrax_to_tfp':
result1 = self.variant(dist1_distrax.kl_divergence)(dist2_tfp)
result2 = self.variant(dist2_distrax.kl_divergence)(dist1_tfp)
elif mode == 'tfp_to_distrax':
result1 = self.variant(dist1_tfp.kl_divergence)(dist2_distrax)
result2 = self.variant(dist2_tfp.kl_divergence)(dist1_distrax)
np.testing.assert_allclose(result1, expected_result1, rtol=1e-3)
np.testing.assert_allclose(result2, expected_result2, rtol=1e-3)
def test_kl_divergence_raises_on_incompatible_distributions(self):
dim = 4
dist1 = MultivariateNormalFromBijector(
loc=jnp.zeros((dim,)),
scale=DiagLinear(diag=jnp.ones((dim,))),
)
dim = 5
dist2 = MultivariateNormalFromBijector(
loc=jnp.zeros((dim,)),
scale=DiagLinear(diag=jnp.ones((dim,))),
)
with self.assertRaises(ValueError):
dist1.kl_divergence(dist2)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/mvn_from_bijector_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `bernoulli.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import bernoulli
from distrax._src.utils import equivalence
import jax.numpy as jnp
import numpy as np
from scipy import special as sp_special
class BernoulliTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(bernoulli.Bernoulli)
self.p = np.asarray([0.2, 0.4, 0.6, 0.8])
self.logits = sp_special.logit(self.p)
@parameterized.named_parameters(
('0d probs', (), True),
('0d logits', (), False),
('1d probs', (4,), True),
('1d logits', (4,), False),
('2d probs', (3, 4), True),
('2d logits', (3, 4), False),
)
def test_properties(self, shape, from_probs):
rng = np.random.default_rng(42)
probs = rng.uniform(size=shape)
logits = sp_special.logit(probs)
dist_kwargs = {'probs': probs} if from_probs else {'logits': logits}
dist = self.distrax_cls(**dist_kwargs)
self.assertion_fn(rtol=1e-3)(dist.logits, logits)
self.assertion_fn(rtol=1e-3)(dist.probs, probs)
self.assertEqual(dist.event_shape, ())
self.assertEqual(dist.batch_shape, shape)
@parameterized.named_parameters(
('probs and logits', {'logits': [0.1, -0.2], 'probs': [0.5, 0.4]}),
('both probs and logits are None', {'logits': None, 'probs': None}),
('complex64 dtype', {'logits': [0.1, -0.2], 'dtype': jnp.complex64}),
('complex128 dtype', {'logits': [0.1, -0.2], 'dtype': jnp.complex128}),
)
def test_raises_on_invalid_inputs(self, dist_params):
with self.assertRaises(ValueError):
self.distrax_cls(**dist_params)
@chex.all_variants
@parameterized.named_parameters(
('1d logits, no shape', {'logits': [0.0, 1.0, -0.5]}, ()),
('1d probs, no shape', {'probs': [0.1, 0.5, 0.3]}, ()),
('1d logits, int shape', {'logits': [0.0, 1.0, -0.5]}, 1),
('1d probs, int shape', {'probs': [0.1, 0.5, 0.3]}, 1),
('1d logits, 1-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (1,)),
('1d probs, 1-tuple shape', {'probs': [0.1, 0.5, 0.3]}, (1,)),
('1d logits, 2-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (5, 4)),
('1d probs, 2-tuple shape', {'probs': [0.1, 0.5, 0.3]}, (5, 4)),
('2d logits, no shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, ()),
('2d probs, no shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, ()),
('2d logits, int shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, 4),
('2d probs, int shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, 4),
('2d logits, 1-tuple shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, (5,)),
('2d probs, 1-tuple shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, (5,)),
('2d logits, 2-tuple shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, (5, 4)),
('2d probs, 2-tuple shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, (5, 4)),
)
def test_sample_shape(self, distr_params, sample_shape):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
super()._test_sample_shape(
dist_args=(),
dist_kwargs=distr_params,
sample_shape=sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('sample, from probs', 'sample', True),
('sample, from logits', 'sample', False),
('sample_and_log_prob, from probs', 'sample_and_log_prob', True),
('sample_and_log_prob, from logits', 'sample_and_log_prob', False),
)
def test_sample_values(self, method, from_probs):
probs = np.array([0., 0.2, 0.5, 0.8, 1.]) # Includes edge cases (0 and 1).
logits = sp_special.logit(probs)
n_samples = 100000
dist_kwargs = {'probs': probs} if from_probs else {'logits': logits}
dist = self.distrax_cls(**dist_kwargs)
sample_fn = self.variant(
lambda key: getattr(dist, method)(seed=key, sample_shape=n_samples))
samples = sample_fn(self.key)
samples = samples[0] if method == 'sample_and_log_prob' else samples
self.assertEqual(samples.shape, (n_samples,) + probs.shape)
self.assertTrue(np.all(np.logical_or(samples == 0, samples == 1)))
self.assertion_fn(rtol=0.1)(np.mean(samples, axis=0), probs)
self.assertion_fn(atol=2e-3)(np.std(samples, axis=0), dist.stddev())
@chex.all_variants
@parameterized.named_parameters(
('1d logits, no shape', {'logits': [0.0, 1.0, -0.5]}, ()),
('1d probs, no shape', {'probs': [0.1, 0.5, 0.3]}, ()),
('1d logits, int shape', {'logits': [0.0, 1.0, -0.5]}, 1),
('1d probs, int shape', {'probs': [0.1, 0.5, 0.3]}, 1),
('1d logits, 1-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (1,)),
('1d probs, 1-tuple shape', {'probs': [0.1, 0.5, 0.3]}, (1,)),
('1d logits, 2-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (5, 4)),
('1d probs, 2-tuple shape', {'probs': [0.1, 0.5, 0.3]}, (5, 4)),
('2d logits, no shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, ()),
('2d probs, no shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, ()),
('2d logits, int shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, 4),
('2d probs, int shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, 4),
('2d logits, 1-tuple shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, (5,)),
('2d probs, 1-tuple shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, (5,)),
('2d logits, 2-tuple shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, (5, 4)),
('2d probs, 2-tuple shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, (5, 4)),
)
def test_sample_and_log_prob(self, distr_params, sample_shape):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
super()._test_sample_and_log_prob(
dist_args=(),
dist_kwargs=distr_params,
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants
@parameterized.named_parameters(
('sample, bool', 'sample', jnp.bool_),
('sample, uint16', 'sample', jnp.uint16),
('sample, uint32', 'sample', jnp.uint32),
('sample, int16', 'sample', jnp.int16),
('sample, int32', 'sample', jnp.int32),
('sample, float16', 'sample', jnp.float16),
('sample, float32', 'sample', jnp.float32),
('sample_and_log_prob, bool', 'sample_and_log_prob', jnp.bool_),
('sample_and_log_prob, uint16', 'sample_and_log_prob', jnp.uint16),
('sample_and_log_prob, uint32', 'sample_and_log_prob', jnp.uint32),
('sample_and_log_prob, int16', 'sample_and_log_prob', jnp.int16),
('sample_and_log_prob, int32', 'sample_and_log_prob', jnp.int32),
('sample_and_log_prob, float16', 'sample_and_log_prob', jnp.float16),
('sample_and_log_prob, float32', 'sample_and_log_prob', jnp.float32),
)
def test_sample_dtype(self, method, dtype):
dist_params = {'logits': self.logits, 'dtype': dtype}
dist = self.distrax_cls(**dist_params)
samples = self.variant(getattr(dist, method))(seed=self.key)
samples = samples[0] if method == 'sample_and_log_prob' else samples
self.assertEqual(samples.dtype, dist.dtype)
self.assertEqual(samples.dtype, dtype)
@chex.all_variants
@parameterized.named_parameters(
('1d logits, int value', {'logits': [0.0, 0.5, -0.5]}, 1),
('1d probs, int value', {'probs': [0.3, 0.2, 0.5]}, 1),
('1d logits, 1d value', {'logits': [0.0, 0.5, -0.5]}, [1, 0, 1]),
('1d probs, 1d value', {'probs': [0.3, 0.2, 0.5]}, [1, 0, 1]),
('1d logits, 2d value', {'logits': [0.0, 0.5, -0.5]},
[[1, 0, 0], [0, 1, 0]]),
('1d probs, 2d value', {'probs': [0.3, 0.2, 0.5]},
[[1, 0, 0], [0, 1, 0]]),
('2d logits, 1d value', {'logits': [[0.0, 0.5, -0.5], [-0.2, 0.3, 0.5]]},
[1, 0, 1]),
('2d probs, 1d value', {'probs': [[0.1, 0.5, 0.4], [0.3, 0.3, 0.4]]},
[1, 0, 1]),
('2d logits, 2d value', {'logits': [[0.0, 0.5, -0.5], [-0.2, 0.3, 0.5]]},
[[1, 0, 0], [1, 1, 0]]),
('2d probs, 2d value', {'probs': [[0.1, 0.5, 0.4], [0.3, 0.3, 0.4]]},
[[1, 0, 0], [1, 1, 0]]),
('edge cases with logits', {'logits': [-np.inf, -np.inf, np.inf, np.inf]},
[0, 1, 0, 1]),
('edge cases with probs', {'probs': [0.0, 0.0, 1.0, 1.0]}, [0, 1, 0, 1]),
)
def test_method_with_value(self, distr_params, value):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
value = jnp.asarray(value)
for method in ['prob', 'log_prob', 'cdf', 'log_cdf',
'survival_function', 'log_survival_function']:
with self.subTest(method=method):
super()._test_attribute(
attribute_string=method,
dist_kwargs=distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('from logits', {'logits': [[0.0, 0.5, -0.5], [-0.2, 0.3, 0.5]]}),
('from probs', {'probs': [[0.1, 0.5, 0.4], [0.2, 0.4, 0.4]]}),
)
def test_method(self, distr_params):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
for method in ['entropy', 'mode', 'mean', 'variance', 'stddev']:
with self.subTest(method=method):
super()._test_attribute(
attribute_string=method,
dist_kwargs=distr_params,
call_args=(),
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax'),
)
def test_with_two_distributions(self, function_string, mode_string):
super()._test_with_two_distributions(
attribute_string=function_string,
mode_string=mode_string,
dist1_kwargs={
'probs': jnp.asarray([[0.1, 0.5, 0.4], [0.2, 0.4, 0.8]])},
dist2_kwargs={'logits': jnp.asarray([0.0, -0.1, 0.1]),},
assertion_fn=self.assertion_fn(rtol=1e-2))
def test_jittable(self):
super()._test_jittable(
(np.array([0., 4., -1., 4.]),),
assertion_fn=self.assertion_fn(rtol=1e-3))
@parameterized.named_parameters(
('single element, from probs', 2, True),
('single element, from logits', 2, False),
('range, from probs', slice(-1), True),
('range, from logits', slice(-1), False),
('range_2, from probs', (slice(None), slice(-1)), True),
('range_2, from logits', (slice(None), slice(-1)), False),
('ellipsis, from probs', (Ellipsis, -1), True),
('ellipsis, from logits', (Ellipsis, -1), False),
)
def test_slice(self, slice_, from_probs):
rng = np.random.default_rng(42)
probs = rng.uniform(size=(3, 4, 5))
logits = sp_special.logit(probs)
dist_kwargs = {'probs': probs} if from_probs else {'logits': logits}
dist = self.distrax_cls(**dist_kwargs)
self.assertion_fn(rtol=1e-3)(dist[slice_].logits, logits[slice_])
self.assertion_fn(rtol=1e-3)(dist[slice_].probs, probs[slice_])
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/bernoulli_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `categorical.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import categorical
from distrax._src.utils import equivalence
from distrax._src.utils import math
import jax
import jax.numpy as jnp
import numpy as np
class CategoricalTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(categorical.Categorical)
@parameterized.named_parameters(
('1d probs', (4,), True),
('1d logits', (4,), False),
('2d probs', (3, 4), True),
('2d logits', (3, 4), False),
)
def test_properties(self, shape, from_probs):
rng = np.random.default_rng(42)
probs = rng.uniform(size=shape) # Intentional unnormalization of `probs`.
logits = np.log(probs)
dist_kwargs = {'probs': probs} if from_probs else {'logits': logits}
dist = self.distrax_cls(**dist_kwargs)
self.assertEqual(dist.event_shape, ())
self.assertEqual(dist.batch_shape, shape[:-1])
self.assertEqual(dist.num_categories, shape[-1])
self.assertion_fn(rtol=1e-3)(dist.logits, math.normalize(logits=logits))
self.assertion_fn(rtol=1e-3)(dist.probs, math.normalize(probs=probs))
@parameterized.named_parameters(
('probs and logits', {'logits': [0.1, -0.2], 'probs': [0.6, 0.4]}),
('both probs and logits are None', {'logits': None, 'probs': None}),
('bool dtype', {'logits': [0.1, -0.2], 'dtype': jnp.bool_}),
('complex64 dtype', {'logits': [0.1, -0.2], 'dtype': jnp.complex64}),
('complex128 dtype', {'logits': [0.1, -0.2], 'dtype': jnp.complex128}),
)
def test_raises_on_invalid_inputs(self, dist_params):
with self.assertRaises(ValueError):
self.distrax_cls(**dist_params)
@chex.all_variants
@parameterized.named_parameters(
('1d logits, no shape', {'logits': [0.0, 1.0, -0.5]}, ()),
('1d probs, no shape', {'probs': [0.2, 0.5, 0.3]}, ()),
('1d logits, int shape', {'logits': [0.0, 1.0, -0.5]}, 1),
('1d probs, int shape', {'probs': [0.2, 0.5, 0.3]}, 1),
('1d logits, 1-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (1,)),
('1d probs, 1-tuple shape', {'probs': [0.2, 0.5, 0.3]}, (1,)),
('1d logits, 2-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (5, 4)),
('1d probs, 2-tuple shape', {'probs': [0.2, 0.5, 0.3]}, (5, 4)),
('2d logits, no shape', {'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
()),
('2d probs, no shape', {'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
()),
('2d logits, int shape', {'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
4),
('2d probs, int shape', {'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
4),
('2d logits, 1-tuple shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]}, (5,)),
('2d probs, 1-tuple shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]}, (5,)),
('2d logits, 2-tuple shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]}, (5, 4)),
('2d probs, 2-tuple shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]}, (5, 4)),
)
def test_sample_shape(self, distr_params, sample_shape):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
super()._test_sample_shape(
dist_args=(),
dist_kwargs=distr_params,
sample_shape=sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('1d logits, no shape', {'logits': [0.0, 1.0, -0.5]}, ()),
('1d probs, no shape', {'probs': [0.2, 0.5, 0.3]}, ()),
('1d logits, int shape', {'logits': [0.0, 1.0, -0.5]}, 1),
('1d probs, int shape', {'probs': [0.2, 0.5, 0.3]}, 1),
('1d logits, 1-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (1,)),
('1d probs, 1-tuple shape', {'probs': [0.2, 0.5, 0.3]}, (1,)),
('1d logits, 2-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (5, 4)),
('1d probs, 2-tuple shape', {'probs': [0.2, 0.5, 0.3]}, (5, 4)),
('2d logits, no shape', {'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
()),
('2d probs, no shape', {'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
()),
('2d logits, int shape', {'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
4),
('2d probs, int shape', {'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
4),
('2d logits, 1-tuple shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]}, (5,)),
('2d probs, 1-tuple shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]}, (5,)),
('2d logits, 2-tuple shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]}, (5, 4)),
('2d probs, 2-tuple shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]}, (5, 4)),
)
def test_sample_and_log_prob(self, distr_params, sample_shape):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
super()._test_sample_and_log_prob(
dist_args=(),
dist_kwargs=distr_params,
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=2e-3))
@chex.all_variants
@parameterized.named_parameters(
('sample, int16', 'sample', jnp.int16),
('sample, int32', 'sample', jnp.int32),
('sample, uint16', 'sample', jnp.uint16),
('sample, uint32', 'sample', jnp.uint32),
('sample, float16', 'sample', jnp.float16),
('sample, float32', 'sample', jnp.float32),
('sample_and_log_prob, int16', 'sample_and_log_prob', jnp.int16),
('sample_and_log_prob, int32', 'sample_and_log_prob', jnp.int32),
('sample_and_log_prob, uint16', 'sample_and_log_prob', jnp.uint16),
('sample_and_log_prob, uint32', 'sample_and_log_prob', jnp.uint32),
('sample_and_log_prob, float16', 'sample_and_log_prob', jnp.float16),
('sample_and_log_prob, float32', 'sample_and_log_prob', jnp.float32),
)
def test_sample_dtype(self, method, dtype):
dist_params = {'logits': [0.1, -0.1, 0.5, -0.8, 1.5], 'dtype': dtype}
dist = self.distrax_cls(**dist_params)
samples = self.variant(getattr(dist, method))(seed=self.key)
samples = samples[0] if method == 'sample_and_log_prob' else samples
self.assertEqual(samples.dtype, dist.dtype)
self.assertEqual(samples.dtype, dtype)
@chex.all_variants
@parameterized.named_parameters(
('sample, from probs', 'sample', True),
('sample, from logits', 'sample', False),
('sample_and_log_prob, from probs', 'sample_and_log_prob', True),
('sample_and_log_prob, from logits', 'sample_and_log_prob', False),
)
def test_sample_values(self, method, from_probs):
probs = np.array([[0.5, 0.25, 0.25], [0., 0., 1.]]) # Includes edge case.
num_categories = probs.shape[-1]
logits = np.log(probs)
n_samples = 100000
dist_kwargs = {'probs': probs} if from_probs else {'logits': logits}
dist = self.distrax_cls(**dist_kwargs)
sample_fn = self.variant(
lambda key: getattr(dist, method)(seed=key, sample_shape=n_samples))
samples = sample_fn(self.key)
samples = samples[0] if method == 'sample_and_log_prob' else samples
self.assertEqual(samples.shape, (n_samples,) + probs.shape[:-1])
self.assertTrue(np.all(
np.logical_and(samples >= 0, samples < num_categories)))
np.testing.assert_array_equal(jnp.floor(samples), samples)
samples_one_hot = jax.nn.one_hot(samples, num_categories, axis=-1)
self.assertion_fn(rtol=0.1)(np.mean(samples_one_hot, axis=0), probs)
@chex.all_variants
@parameterized.named_parameters(
('sample', 'sample'),
('sample_and_log_prob', 'sample_and_log_prob'),
)
def test_sample_values_invalid_probs(self, method):
# Check that samples=-1 if probs are negative or NaN after normalization.
n_samples = 1000
probs = np.array([
[0.1, -0.4, 0.2, 0.3], # Negative probabilities.
[-0.1, 0.1, 0.0, 0.0], # NaN probabilities after normalization.
[0.1, 0.25, 0.2, 0.8], # Valid (unnormalized) probabilities.
])
dist = self.distrax_cls(probs=probs)
sample_fn = self.variant(
lambda key: getattr(dist, method)(seed=key, sample_shape=n_samples))
samples = sample_fn(self.key)
samples = samples[0] if method == 'sample_and_log_prob' else samples
self.assertion_fn(rtol=1e-4)(samples[..., :-1], -1)
np.testing.assert_array_compare(lambda x, y: x >= y, samples[..., -1], 0)
@chex.all_variants
@parameterized.named_parameters(
('1d logits, 1d value', {'logits': [0.0, 0.5, -0.5]}, [1, 0, 2, 0]),
('1d probs, 1d value', {'probs': [0.3, 0.2, 0.5]}, [1, 0, 2, 0]),
('1d logits, 2d value', {'logits': [0.0, 0.5, -0.5]}, [[1, 0], [2, 0]]),
('1d probs, 2d value', {'probs': [0.3, 0.2, 0.5]}, [[1, 0], [2, 0]]),
('2d logits, 1d value', {'logits': [[0.0, 0.5, -0.5], [-0.2, 0.3, 0.5]]},
[1, 0]),
('2d probs, 1d value', {'probs': [[0.1, 0.5, 0.4], [0.3, 0.3, 0.4]]},
[1, 0]),
('2d logits, 2d value', {'logits': [[0.0, 0.5, -0.5], [-0.2, 0.3, 0.5]]},
[[1, 0], [2, 1]]),
('2d probs, 2d value', {'probs': [[0.1, 0.5, 0.4], [0.3, 0.3, 0.4]]},
[[1, 0], [2, 1]]),
('extreme probs', {'probs': [0.0, 1.0, 0.0]}, [0, 1, 1, 2]),
)
def test_method_with_input(self, distr_params, value):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
value = np.asarray(value, dtype=np.int32)
for method in ['prob', 'log_prob', 'cdf', 'log_cdf', 'survival_function']:
with self.subTest(method=method):
super()._test_attribute(
attribute_string=method,
dist_kwargs=distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn(atol=3e-5))
# We separate the `log_survival_function` method because TFP sometimes gives
# NaN instead of `-jnp.inf` when evaluated at `num_categories - 1` on a TPU
# if the distribution was created using the logits parameter.
dist = self.distrax_cls(**distr_params)
tfp_dist = self.distrax_cls.equiv_tfp_cls(**distr_params)
num_categories = dist.num_categories
log_sf = tfp_dist.log_survival_function(value)
log_sf = jnp.where(value == num_categories - 1, -jnp.inf, log_sf)
with self.subTest(method='log_survival_function'):
self.assertion_fn(atol=3e-5)(
self.variant(dist.log_survival_function)(value), log_sf)
@chex.all_variants
def test_method_with_input_unnormalized_probs(self):
# We test this case separately because the result of `cdf` and `log_cdf`
# differs from TFP when the input `probs` are not normalized.
probs = np.array([0.1, 0.2, 0.3])
normalized_probs = probs / np.sum(probs, axis=-1, keepdims=True)
distr_params = {'probs': probs}
value = np.asarray([0, 1, 2], dtype=np.int32)
dist = self.distrax_cls(**distr_params)
self.assertion_fn(rtol=1e-3)(
self.variant(dist.prob)(value), normalized_probs)
self.assertion_fn(rtol=1e-3)(
self.variant(dist.log_prob)(value), np.log(normalized_probs))
self.assertion_fn(rtol=1e-3)(
self.variant(dist.cdf)(value), np.cumsum(normalized_probs))
self.assertion_fn(atol=5e-5)(
self.variant(dist.log_cdf)(value), np.log(np.cumsum(normalized_probs)))
self.assertion_fn(atol=1e-5)(
self.variant(dist.survival_function)(value),
1. - np.cumsum(normalized_probs))
# In the line below, we compare against `jnp` instead of `np` because the
# latter gives `1. - np.cumsum(normalized_probs)[-1] = 1.1e-16` instead of
# `0.`, so its log is innacurate: it gives `-36.7` instead of `-np.inf`.
self.assertion_fn(atol=1e-5)(
self.variant(dist.log_survival_function)(value),
jnp.log(1. - jnp.cumsum(normalized_probs)))
@chex.all_variants
def test_method_with_input_outside_domain(self):
probs = jnp.asarray([0.2, 0.3, 0.5])
dist = self.distrax_cls(probs=probs)
value = jnp.asarray([-1, -2, 3, 4], dtype=jnp.int32)
self.assertion_fn(atol=1e-5)(
self.variant(dist.prob)(value), np.asarray([0., 0., 0., 0.]))
self.assertTrue(np.all(self.variant(dist.log_prob)(value) == -jnp.inf))
self.assertion_fn(atol=1e-5)(
self.variant(dist.cdf)(value), np.asarray([0., 0., 1., 1.]))
self.assertion_fn(rtol=1e-3)(
self.variant(dist.log_cdf)(value), np.log(np.asarray([0., 0., 1., 1.])))
self.assertion_fn(atol=1e-5)(
self.variant(dist.survival_function)(value),
np.asarray([1., 1., 0., 0.]))
self.assertion_fn(atol=1e-5)(
self.variant(dist.log_survival_function)(value),
np.log(np.asarray([1., 1., 0., 0.])))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('2d probs', True),
('2d logits', False),
)
def test_method(self, from_probs):
rng = np.random.default_rng(42)
probs = rng.uniform(size=(4, 3))
probs /= np.sum(probs, axis=-1, keepdims=True)
logits = np.log(probs)
distr_params = {'probs': probs} if from_probs else {'logits': logits}
for method in ['entropy', 'mode', 'logits_parameter']:
with self.subTest(method=method):
super()._test_attribute(
attribute_string=method,
dist_kwargs=distr_params,
call_args=(),
assertion_fn=self.assertion_fn(rtol=1e-3))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax'))
def test_with_two_distributions(self, function_string, mode_string):
super()._test_with_two_distributions(
attribute_string=function_string,
mode_string=mode_string,
dist1_kwargs={
'probs':
jnp.asarray([[0.1, 0.5, 0.4], [0.2, 0.4, 0.4]])
},
dist2_kwargs={
'logits': jnp.asarray([0.0, 0.1, 0.1]),
},
assertion_fn=self.assertion_fn(rtol=2e-3))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax'),
)
def test_with_two_distributions_extreme_cases(
self, function_string, mode_string):
super()._test_with_two_distributions(
attribute_string=function_string,
mode_string=mode_string,
dist1_kwargs={
'probs':
jnp.asarray([[0.1, 0.5, 0.4], [0.4, 0.0, 0.6], [0.4, 0.6, 0.]])
},
dist2_kwargs={
'logits': jnp.asarray([0.0, 0.1, -jnp.inf]),
},
assertion_fn=self.assertion_fn(atol=5e-5))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax'),
)
def test_with_two_distributions_raises_on_invalid_num_categories(
self, function_string, mode_string):
probs1 = jnp.asarray([0.1, 0.5, 0.4])
distrax_dist1 = self.distrax_cls(probs=probs1)
tfp_dist1 = self.distrax_cls.equiv_tfp_cls(probs=probs1)
logits2 = jnp.asarray([-0.1, 0.3])
distrax_dist2 = self.distrax_cls(logits=logits2)
tfp_dist2 = self.distrax_cls.equiv_tfp_cls(logits=logits2)
dist_a = tfp_dist1 if mode_string == 'tfp_to_distrax' else distrax_dist1
dist_b = tfp_dist2 if mode_string == 'distrax_to_tfp' else distrax_dist2
first_fn = self.variant(getattr(dist_a, function_string))
with self.assertRaises(ValueError):
_ = first_fn(dist_b)
dist_a = tfp_dist2 if mode_string == 'tfp_to_distrax' else distrax_dist2
dist_b = tfp_dist1 if mode_string == 'distrax_to_tfp' else distrax_dist1
second_fn = self.variant(getattr(dist_a, function_string))
with self.assertRaises(ValueError):
_ = second_fn(dist_b)
def test_jittable(self):
super()._test_jittable((np.array([0., 4., -1., 4.]),))
@parameterized.named_parameters(
('single element, from probs', 2, True),
('single element, from logits', 2, False),
('range, from probs', slice(-1), True),
('range, from logits', slice(-1), False),
('range_2, from probs', (slice(None), slice(-1)), True),
('range_2, from logits', (slice(None), slice(-1)), False),
)
def test_slice(self, slice_, from_probs):
rng = np.random.default_rng(42)
logits = rng.normal(size=(3, 4, 5))
probs = jax.nn.softmax(logits, axis=-1)
dist_kwargs = {'probs': probs} if from_probs else {'logits': logits}
dist = self.distrax_cls(**dist_kwargs)
self.assertion_fn(rtol=1e-3)(
dist[slice_].logits, math.normalize(logits=logits[slice_]))
self.assertion_fn(rtol=1e-3)(dist[slice_].probs, probs[slice_])
@parameterized.named_parameters(
('from probs', True),
('from logits', False),
)
def test_slice_ellipsis(self, from_probs):
rng = np.random.default_rng(42)
logits = rng.normal(size=(3, 4, 5))
probs = jax.nn.softmax(logits, axis=-1)
dist_kwargs = {'probs': probs} if from_probs else {'logits': logits}
dist = self.distrax_cls(**dist_kwargs)
self.assertion_fn(rtol=1e-3)(
dist[..., -1].logits, math.normalize(logits=logits[:, -1, :]))
self.assertion_fn(rtol=1e-3)(dist[..., -1].probs, probs[:, -1, :])
def test_vmap_inputs(self):
def log_prob_sum(dist, x):
return dist.log_prob(x).sum()
dist = categorical.Categorical(jnp.arange(3 * 4 * 5).reshape((3, 4, 5)))
x = jnp.zeros((3, 4), jnp.int_)
with self.subTest('no vmap'):
actual = log_prob_sum(dist, x)
expected = dist.log_prob(x).sum()
self.assertion_fn()(actual, expected)
with self.subTest('axis=0'):
actual = jax.vmap(log_prob_sum, in_axes=0)(dist, x)
expected = dist.log_prob(x).sum(axis=1)
self.assertion_fn()(actual, expected)
with self.subTest('axis=1'):
actual = jax.vmap(log_prob_sum, in_axes=1)(dist, x)
expected = dist.log_prob(x).sum(axis=0)
self.assertion_fn()(actual, expected)
def test_vmap_outputs(self):
def summed_dist(logits):
logits1 = logits.sum(keepdims=True)
logits2 = -logits1
logits = jnp.concatenate([logits1, logits2], axis=-1)
return categorical.Categorical(logits)
logits = jnp.arange((3 * 4 * 5)).reshape((3, 4, 5))
actual = jax.vmap(summed_dist)(logits)
logits1 = logits.sum(axis=(1, 2), keepdims=True)
logits2 = -logits1
logits = jnp.concatenate([logits1, logits2], axis=-1)
expected = categorical.Categorical(logits)
np.testing.assert_equal(actual.batch_shape, expected.batch_shape)
np.testing.assert_equal(actual.event_shape, expected.event_shape)
x = jnp.array([[[0]], [[1]], [[0]]], jnp.int_)
self.assertion_fn(rtol=1e-6)(actual.log_prob(x), expected.log_prob(x))
@parameterized.named_parameters(
('-inf logits', np.array([-jnp.inf, 2, -3, -jnp.inf, 5.0])),
('uniform large negative logits', np.array([-1e9] * 11)),
('uniform large positive logits', np.array([1e9] * 11)),
('uniform', np.array([0.0] * 11)),
('typical', np.array([1, 7, -3, 2, 4.0])),
)
def test_entropy_grad(self, logits):
clipped_logits = jnp.maximum(-10000, logits)
def entropy_fn(logits):
return categorical.Categorical(logits).entropy()
entropy, grads = jax.value_and_grad(entropy_fn)(logits)
expected_entropy, expected_grads = jax.value_and_grad(entropy_fn)(
clipped_logits)
self.assertion_fn(rtol=1e-6)(expected_entropy, entropy)
self.assertion_fn(rtol=1e-6)(expected_grads, grads)
self.assertTrue(np.isfinite(entropy).all())
self.assertTrue(np.isfinite(grads).all())
@parameterized.named_parameters(
('-inf logits1', np.array([-jnp.inf, 2, -3, -jnp.inf, 5.0]),
np.array([1, 7, -3, 2, 4.0])),
('-inf logits both', np.array([-jnp.inf, 2, -1000, -jnp.inf, 5.0]),
np.array([-jnp.inf, 7, -jnp.inf, 2, 4.0])),
('typical', np.array([5, -2, 0, 1, 4.0]),
np.array([1, 7, -3, 2, 4.0])),
)
def test_kl_grad(self, logits1, logits2):
clipped_logits1 = jnp.maximum(-10000, logits1)
clipped_logits2 = jnp.maximum(-10000, logits2)
def kl_fn(logits1, logits2):
return categorical.Categorical(logits1).kl_divergence(
categorical.Categorical(logits2))
kl, grads = jax.value_and_grad(
kl_fn, argnums=(0, 1))(logits1, logits2)
expected_kl, expected_grads = jax.value_and_grad(
kl_fn, argnums=(0, 1))(clipped_logits1, clipped_logits2)
self.assertion_fn(rtol=1e-6)(expected_kl, kl)
self.assertion_fn(rtol=1e-6)(expected_grads, grads)
self.assertTrue(np.isfinite(kl).all())
self.assertTrue(np.isfinite(grads).all())
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/categorical_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mixture distributions."""
from typing import Tuple
import chex
from distrax._src.distributions import categorical
from distrax._src.distributions import distribution
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
PRNGKey = chex.PRNGKey
DistributionLike = distribution.DistributionLike
CategoricalLike = categorical.CategoricalLike
EventT = distribution.EventT
class MixtureSameFamily(distribution.Distribution):
"""Mixture with components provided from a single batched distribution."""
equiv_tfp_cls = tfd.MixtureSameFamily
def __init__(self,
mixture_distribution: CategoricalLike,
components_distribution: DistributionLike):
"""Initializes a mixture distribution for components of a shared family.
Args:
mixture_distribution: Distribution over selecting components.
components_distribution: Component distribution, with rightmost batch
dimension indexing components.
"""
super().__init__()
mixture_distribution = conversion.as_distribution(mixture_distribution)
components_distribution = conversion.as_distribution(
components_distribution)
self._mixture_distribution = mixture_distribution
self._components_distribution = components_distribution
# Store normalized weights (last axis of logits is for components).
# This uses the TFP API, which is replicated in Distrax.
self._mixture_log_probs = jax.nn.log_softmax(
mixture_distribution.logits_parameter(), axis=-1)
batch_shape_mixture = mixture_distribution.batch_shape
batch_shape_components = components_distribution.batch_shape
if batch_shape_mixture != batch_shape_components[:-1]:
msg = (f'`mixture_distribution.batch_shape` '
f'({mixture_distribution.batch_shape}) is not compatible with '
f'`components_distribution.batch_shape` '
f'({components_distribution.batch_shape}`)')
raise ValueError(msg)
@property
def components_distribution(self):
"""The components distribution."""
return self._components_distribution
@property
def mixture_distribution(self):
"""The mixture distribution."""
return self._mixture_distribution
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return self._components_distribution.event_shape
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return self._components_distribution.batch_shape[:-1]
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
key_mix, key_components = jax.random.split(key)
mix_sample = self.mixture_distribution.sample(sample_shape=n, seed=key_mix)
num_components = self._components_distribution.batch_shape[-1]
# Sample from all components, then multiply with a one-hot mask and sum.
# While this does computation that is not used eventually, it is faster on
# GPU/TPUs, which excel at batched operations (as opposed to indexing). It
# is in particular more efficient than using `gather` or `where` operations.
mask = jax.nn.one_hot(mix_sample, num_components)
samples_all = self.components_distribution.sample(sample_shape=n,
seed=key_components)
# Make mask broadcast with (potentially multivariate) samples.
mask = mask.reshape(mask.shape + (1,) * len(self.event_shape))
# Need to sum over the component axis, which is the last one for scalar
# components, the second-last one for 1-dim events, etc.
samples = jnp.sum(samples_all * mask, axis=-1 - len(self.event_shape))
return samples
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
# Add component axis to make input broadcast with components distribution.
expanded = jnp.expand_dims(value, axis=-1 - len(self.event_shape))
# Compute `log_prob` in every component.
lp = self.components_distribution.log_prob(expanded)
# Last batch axis is number of components, i.e. last axis of `lp` below.
# Last axis of mixture log probs are components, so reduce last axis.
return jax.scipy.special.logsumexp(a=lp + self._mixture_log_probs, axis=-1)
def mean(self) -> Array:
"""Calculates the mean."""
means = self.components_distribution.mean()
weights = jnp.exp(self._mixture_log_probs)
# Broadcast weights over event shape, and average over component axis.
weights = weights.reshape(weights.shape + (1,) * len(self.event_shape))
return jnp.sum(means * weights, axis=-1 - len(self.event_shape))
def variance(self) -> Array:
"""Calculates the variance."""
means = self.components_distribution.mean()
variances = self.components_distribution.variance()
weights = jnp.exp(self._mixture_log_probs)
# Make weights broadcast over event shape.
weights = weights.reshape(weights.shape + (1,) * len(self.event_shape))
# Component axis to reduce over.
component_axis = -1 - len(self.event_shape)
# Using: Var(Y) = E[Var(Y|X)] + Var(E[Y|X]).
mean = jnp.sum(means * weights, axis=component_axis)
mean_cond_var = jnp.sum(weights * variances, axis=component_axis)
# Need to add an axis to `mean` to make it broadcast over components.
sq_diff = jnp.square(means - jnp.expand_dims(mean, axis=component_axis))
var_cond_mean = jnp.sum(weights * sq_diff, axis=component_axis)
return mean_cond_var + var_cond_mean
def __getitem__(self, index) -> 'MixtureSameFamily':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return MixtureSameFamily(
mixture_distribution=self.mixture_distribution[index],
components_distribution=self.components_distribution[index])
| distrax-master | distrax/_src/distributions/mixture_same_family.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distrax joint distribution over a tree of distributions."""
from typing import Tuple, TypeVar
import chex
from distrax._src.distributions import distribution
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
import tree
tfd = tfp.distributions
DistributionT = TypeVar(
'DistributionT', bound=distribution.NestedT[conversion.DistributionLike])
class Joint(distribution.Distribution):
"""Joint distribution over a tree of statistically independent distributions.
Samples from the Joint distribution take the form of a tree structure that
matches the structure of the underlying distributions. Log-probabilities
are summed over the tree.
All distributions in the tree must have the same `batch_shape` in order for
log-probabilities to be computed correctly and for the `batch_shape` of the
Joint distribution to be correct.
"""
def __init__(self, distributions: DistributionT):
"""Initializes a Joint distribution over a tree of distributions.
Args:
distributions: Tree of distributions that must have the same batch shape.
"""
super().__init__()
self._distributions = tree.map_structure(conversion.as_distribution,
distributions)
batch_shape = None
first_path = None
for path, dist in tree.flatten_with_path(self._distributions):
batch_shape = batch_shape or dist.batch_shape
first_path = '.'.join(map(str, path))
if dist.batch_shape != batch_shape:
path = '.'.join(map(str, path))
raise ValueError(
f'Joint distributions must have the same batch shape, but '
f'distribution "{dist.name}" at location {path} had batch shape '
f'{dist.batch_shape} which is not equal to the batch shape '
f'{batch_shape} of the distribution at location {first_path}.')
def _sample_n(
self,
key: chex.PRNGKey,
n: int) -> distribution.EventT:
keys = list(jax.random.split(key, len(tree.flatten(self._distributions))))
keys = tree.unflatten_as(self._distributions, keys)
return tree.map_structure(lambda d, k: d.sample(seed=k, sample_shape=n),
self._distributions, keys)
def _sample_n_and_log_prob(
self,
key: chex.PRNGKey,
n: int) -> Tuple[distribution.EventT, chex.Array]:
keys = list(jax.random.split(key, len(tree.flatten(self._distributions))))
keys = tree.unflatten_as(self._distributions, keys)
samples_and_log_probs = tree.map_structure(
lambda d, k: d.sample_and_log_prob(seed=k, sample_shape=n),
self._distributions, keys)
samples = tree.map_structure_up_to(
self._distributions, lambda p: p[0], samples_and_log_probs)
log_probs = tree.map_structure_up_to(
self._distributions, lambda p: p[1], samples_and_log_probs)
log_probs = jnp.stack(tree.flatten(log_probs))
log_probs = jnp.sum(log_probs, axis=0)
return samples, log_probs
def log_prob(self, value: distribution.EventT) -> chex.Array:
"""Compute the total log probability of the distributions in the tree."""
log_probs = tree.map_structure(lambda dist, value: dist.log_prob(value),
self._distributions, value)
log_probs = jnp.stack(tree.flatten(log_probs))
return jnp.sum(log_probs, axis=0)
@property
def distributions(self) -> DistributionT:
return self._distributions
@property
def event_shape(self) -> distribution.ShapeT:
return tree.map_structure(lambda dist: dist.event_shape,
self._distributions)
@property
def batch_shape(self) -> Tuple[int, ...]:
return tree.flatten(self._distributions)[0].batch_shape
@property
def dtype(self) -> distribution.DTypeT:
return tree.map_structure(lambda dist: dist.dtype, self._distributions)
def entropy(self) -> chex.Array:
return sum(dist.entropy() for dist in tree.flatten(self._distributions))
def log_cdf(self, value: distribution.EventT) -> chex.Array:
return sum(dist.log_cdf(v)
for dist, v in zip(tree.flatten(self._distributions),
tree.flatten(value)))
def mean(self) -> distribution.EventT:
"""Calculates the mean."""
return tree.map_structure(lambda dist: dist.mean(), self._distributions)
def median(self) -> distribution.EventT:
"""Calculates the median."""
return tree.map_structure(lambda dist: dist.median(), self._distributions)
def mode(self) -> distribution.EventT:
"""Calculates the mode."""
return tree.map_structure(lambda dist: dist.mode(), self._distributions)
def __getitem__(self, index) -> 'Joint':
"""See `Distribution.__getitem__`."""
return Joint(tree.map_structure(lambda dist: dist[index],
self._distributions))
def _kl_divergence_joint_joint(
dist1: Joint, dist2: Joint, *unused_args, **unused_kwargs) -> chex.Array:
tree.assert_same_structure(
dist1.distributions, dist2.distributions, check_types=False)
return sum(inner1.kl_divergence(inner2)
for inner1, inner2 in zip(tree.flatten(dist1.distributions),
tree.flatten(dist2.distributions)))
tfd.RegisterKL(Joint, Joint)(_kl_divergence_joint_joint)
| distrax-master | distrax/_src/distributions/joint.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Epsilon-Greedy distributions with respect to a set of preferences."""
from typing import Any, Union
import chex
from distrax._src.distributions import categorical
from distrax._src.distributions import distribution
import jax.numpy as jnp
Array = chex.Array
def _argmax_with_random_tie_breaking(preferences: Array) -> Array:
"""Compute probabilities greedily with respect to a set of preferences."""
optimal_actions = preferences == preferences.max(axis=-1, keepdims=True)
return optimal_actions / optimal_actions.sum(axis=-1, keepdims=True)
def _mix_probs_with_uniform(probs: Array, epsilon: float) -> Array:
"""Mix an arbitrary categorical distribution with a uniform distribution."""
num_actions = probs.shape[-1]
uniform_probs = jnp.ones_like(probs) / num_actions
return (1 - epsilon) * probs + epsilon * uniform_probs
class EpsilonGreedy(categorical.Categorical):
"""A Categorical that is ε-greedy with respect to some preferences.
Given a set of unnormalized preferences, the distribution is a mixture
of the Greedy and Uniform distribution; with weight (1-ε) and ε, respectively.
"""
def __init__(self,
preferences: Array,
epsilon: float,
dtype: Union[jnp.dtype, type[Any]] = int):
"""Initializes an EpsilonGreedy distribution.
Args:
preferences: Unnormalized preferences.
epsilon: Mixing parameter ε.
dtype: The type of event samples.
"""
self._preferences = jnp.asarray(preferences)
self._epsilon = epsilon
greedy_probs = _argmax_with_random_tie_breaking(self._preferences)
probs = _mix_probs_with_uniform(greedy_probs, epsilon)
super().__init__(probs=probs, dtype=dtype)
@property
def epsilon(self) -> float:
"""Mixing parameters of the distribution."""
return self._epsilon
@property
def preferences(self) -> Array:
"""Unnormalized preferences."""
return self._preferences
def __getitem__(self, index) -> 'EpsilonGreedy':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return EpsilonGreedy(
preferences=self.preferences[index],
epsilon=self.epsilon,
dtype=self.dtype)
| distrax-master | distrax/_src/distributions/epsilon_greedy.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Uniform distribution."""
import math
from typing import Tuple, Union
import chex
from distrax._src.distributions import distribution
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
PRNGKey = chex.PRNGKey
EventT = distribution.EventT
class Uniform(distribution.Distribution):
"""Uniform distribution with `low` and `high` parameters."""
equiv_tfp_cls = tfd.Uniform
def __init__(self, low: Numeric = 0., high: Numeric = 1.):
"""Initializes a Uniform distribution.
Args:
low: Lower bound.
high: Upper bound.
"""
super().__init__()
self._low = conversion.as_float_array(low)
self._high = conversion.as_float_array(high)
self._batch_shape = jax.lax.broadcast_shapes(
self._low.shape, self._high.shape)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of the events."""
return ()
@property
def low(self) -> Array:
"""Lower bound."""
return jnp.broadcast_to(self._low, self.batch_shape)
@property
def high(self) -> Array:
"""Upper bound."""
return jnp.broadcast_to(self._high, self.batch_shape)
@property
def range(self) -> Array:
return self.high - self.low
@property
def batch_shape(self) -> Tuple[int, ...]:
return self._batch_shape
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
new_shape = (n,) + self.batch_shape
uniform = jax.random.uniform(
key=key, shape=new_shape, dtype=self.range.dtype, minval=0., maxval=1.)
low = jnp.expand_dims(self._low, range(uniform.ndim - self._low.ndim))
range_ = jnp.expand_dims(self.range, range(uniform.ndim - self.range.ndim))
return low + range_ * uniform
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
samples = self._sample_n(key, n)
log_prob = -jnp.log(self.range)
log_prob = jnp.repeat(log_prob[None], n, axis=0)
return samples, log_prob
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return jnp.log(self.prob(value))
def prob(self, value: EventT) -> Array:
"""See `Distribution.prob`."""
return jnp.where(
jnp.logical_or(value < self.low, value > self.high),
jnp.zeros_like(value),
jnp.ones_like(value) / self.range)
def entropy(self) -> Array:
"""Calculates the entropy."""
return jnp.log(self.range)
def mean(self) -> Array:
"""Calculates the mean."""
return (self.low + self.high) / 2.
def variance(self) -> Array:
"""Calculates the variance."""
return jnp.square(self.range) / 12.
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return self.range / math.sqrt(12.)
def median(self) -> Array:
"""Calculates the median."""
return self.mean()
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
ones = jnp.ones_like(self.range)
zeros = jnp.zeros_like(ones)
result_if_not_big = jnp.where(
value < self.low, zeros, (value - self.low) / self.range)
return jnp.where(value > self.high, ones, result_if_not_big)
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return jnp.log(self.cdf(value))
def __getitem__(self, index) -> 'Uniform':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Uniform(low=self.low[index], high=self.high[index])
def _kl_divergence_uniform_uniform(
dist1: Union[Uniform, tfd.Uniform],
dist2: Union[Uniform, tfd.Uniform],
*unused_args, **unused_kwargs,
) -> Array:
"""Obtain the KL divergence `KL(dist1 || dist2)` between two Uniforms.
Note that the KL divergence is infinite if the support of `dist1` is not a
subset of the support of `dist2`.
Args:
dist1: A Uniform distribution.
dist2: A Uniform distribution.
Returns:
Batchwise `KL(dist1 || dist2)`.
"""
return jnp.where(
jnp.logical_and(dist2.low <= dist1.low, dist1.high <= dist2.high),
jnp.log(dist2.high - dist2.low) - jnp.log(dist1.high - dist1.low),
jnp.inf)
# Register the KL functions with TFP.
tfd.RegisterKL(Uniform, Uniform)(_kl_divergence_uniform_uniform)
tfd.RegisterKL(Uniform, Uniform.equiv_tfp_cls)(_kl_divergence_uniform_uniform)
tfd.RegisterKL(Uniform.equiv_tfp_cls, Uniform)(_kl_divergence_uniform_uniform)
| distrax-master | distrax/_src/distributions/uniform.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bernoulli distribution."""
from typing import Any, Optional, Tuple, Union
import chex
from distrax._src.distributions import distribution
from distrax._src.utils import conversion
from distrax._src.utils import math
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
PRNGKey = chex.PRNGKey
EventT = distribution.EventT
class Bernoulli(distribution.Distribution):
"""Bernoulli distribution.
Bernoulli distribution with parameter `probs`, the probability of outcome `1`.
"""
equiv_tfp_cls = tfd.Bernoulli
def __init__(self,
logits: Optional[Numeric] = None,
probs: Optional[Numeric] = None,
dtype: Union[jnp.dtype, type[Any]] = int):
"""Initializes a Bernoulli distribution.
Args:
logits: Logit transform of the probability of a `1` event (`0` otherwise),
i.e. `probs = sigmoid(logits)`. Only one of `logits` or `probs` can be
specified.
probs: Probability of a `1` event (`0` otherwise). Only one of `logits` or
`probs` can be specified.
dtype: The type of event samples.
"""
super().__init__()
# Validate arguments.
if (logits is None) == (probs is None):
raise ValueError(
f'One and exactly one of `logits` and `probs` should be `None`, '
f'but `logits` is {logits} and `probs` is {probs}.')
if not (jnp.issubdtype(dtype, bool) or
jnp.issubdtype(dtype, jnp.integer) or
jnp.issubdtype(dtype, jnp.floating)):
raise ValueError(
f'The dtype of `{self.name}` must be boolean, integer or '
f'floating-point, instead got `{dtype}`.')
# Parameters of the distribution.
self._probs = None if probs is None else conversion.as_float_array(probs)
self._logits = None if logits is None else conversion.as_float_array(logits)
self._dtype = dtype
@property
def event_shape(self) -> Tuple[int, ...]:
"""See `Distribution.event_shape`."""
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
"""See `Distribution.batch_shape`."""
if self._logits is not None:
return self._logits.shape
return self._probs.shape
@property
def logits(self) -> Array:
"""The logits of a `1` event."""
if self._logits is not None:
return self._logits
return jnp.log(self._probs) - jnp.log(1 - self._probs)
@property
def probs(self) -> Array:
"""The probabilities of a `1` event.."""
if self._probs is not None:
return self._probs
return jax.nn.sigmoid(self._logits)
def _log_probs_parameter(self) -> Tuple[Array, Array]:
if self._logits is None:
return (jnp.log1p(-1. * self._probs),
jnp.log(self._probs))
return (-jax.nn.softplus(self._logits),
-jax.nn.softplus(-1. * self._logits))
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
probs = self.probs
new_shape = (n,) + probs.shape
uniform = jax.random.uniform(
key=key, shape=new_shape, dtype=probs.dtype, minval=0., maxval=1.)
return jnp.less(uniform, probs).astype(self._dtype)
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
log_probs0, log_probs1 = self._log_probs_parameter()
return (math.multiply_no_nan(log_probs0, 1 - value) +
math.multiply_no_nan(log_probs1, value))
def prob(self, value: EventT) -> Array:
"""See `Distribution.prob`."""
probs1 = self.probs
probs0 = 1 - probs1
return (math.multiply_no_nan(probs0, 1 - value) +
math.multiply_no_nan(probs1, value))
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
# For value < 0 the output should be zero because support = {0, 1}.
return jnp.where(value < 0,
jnp.array(0., dtype=self.probs.dtype),
jnp.where(value >= 1,
jnp.array(1.0, dtype=self.probs.dtype),
1 - self.probs))
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return jnp.log(self.cdf(value))
def entropy(self) -> Array:
"""See `Distribution.entropy`."""
(probs0, probs1,
log_probs0, log_probs1) = _probs_and_log_probs(self)
return -1. * (
math.multiply_no_nan(log_probs0, probs0) +
math.multiply_no_nan(log_probs1, probs1))
def mean(self) -> Array:
"""See `Distribution.mean`."""
return self.probs
def variance(self) -> Array:
"""See `Distribution.variance`."""
return (1 - self.probs) * self.probs
def mode(self) -> Array:
"""See `Distribution.probs`."""
return (self.probs > 0.5).astype(self._dtype)
def __getitem__(self, index) -> 'Bernoulli':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
if self._logits is not None:
return Bernoulli(logits=self.logits[index], dtype=self._dtype)
return Bernoulli(probs=self.probs[index], dtype=self._dtype)
def _probs_and_log_probs(
dist: Union[Bernoulli, tfd.Bernoulli]
) -> Tuple[Array, Array, Array, Array]:
"""Calculates both `probs` and `log_probs`."""
# pylint: disable=protected-access
if dist._logits is None:
probs0 = 1. - dist._probs
probs1 = 1. - probs0
log_probs0 = jnp.log1p(-1. * dist._probs)
log_probs1 = jnp.log(dist._probs)
else:
probs0 = jax.nn.sigmoid(-1. * dist._logits)
probs1 = jax.nn.sigmoid(dist._logits)
log_probs0 = -jax.nn.softplus(dist._logits)
log_probs1 = -jax.nn.softplus(-1. * dist._logits)
return probs0, probs1, log_probs0, log_probs1
def _kl_divergence_bernoulli_bernoulli(
dist1: Union[Bernoulli, tfd.Bernoulli],
dist2: Union[Bernoulli, tfd.Bernoulli],
*unused_args, **unused_kwargs,
) -> Array:
"""KL divergence `KL(dist1 || dist2)` between two Bernoulli distributions.
Args:
dist1: instance of a Bernoulli distribution.
dist2: instance of a Bernoulli distribution.
Returns:
Batchwise `KL(dist1 || dist2)`.
"""
one_minus_p1, p1, log_one_minus_p1, log_p1 = _probs_and_log_probs(dist1)
_, _, log_one_minus_p2, log_p2 = _probs_and_log_probs(dist2)
# KL[a || b] = Pa * Log[Pa / Pb] + (1 - Pa) * Log[(1 - Pa) / (1 - Pb)]
# Multiply each factor individually to avoid Inf - Inf
return (
math.multiply_no_nan(log_p1, p1) -
math.multiply_no_nan(log_p2, p1) +
math.multiply_no_nan(log_one_minus_p1, one_minus_p1) -
math.multiply_no_nan(log_one_minus_p2, one_minus_p1)
)
# Register the KL functions with TFP.
tfd.RegisterKL(Bernoulli, Bernoulli)(
_kl_divergence_bernoulli_bernoulli)
tfd.RegisterKL(Bernoulli, Bernoulli.equiv_tfp_cls)(
_kl_divergence_bernoulli_bernoulli)
tfd.RegisterKL(Bernoulli.equiv_tfp_cls, Bernoulli)(
_kl_divergence_bernoulli_bernoulli)
| distrax-master | distrax/_src/distributions/bernoulli.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical-uniform distributions."""
from typing import Tuple
import chex
from distrax._src.distributions import categorical
from distrax._src.distributions import distribution
from distrax._src.distributions import mixture_same_family
from distrax._src.distributions import uniform
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
Array = chex.Array
Numeric = chex.Numeric
PRNGKey = chex.PRNGKey
EventT = distribution.EventT
class CategoricalUniform(distribution.Distribution):
"""Mixture Categorical-Uniform distribution.
Given an interval `[a, b]` and a probability vector `p = [p_1, ..., p_K]`, a
random variable `x` follows a Categorical-Uniform distribution if its PDF
is `p(x) = p_k / C` if `(k-1)C <= x - a < kC` for any `k = 1, ..., K`,
where `C = (b-a) / K`, and `p(x) = 0` otherwise.
Equivalently, the Categorical-Uniform can be understood as a mixture of
Uniform distributions, with mixture probabilities `p_k` and Uniform component
distributions with support in `[a + (k-1)C, a + kC]`.
"""
def __init__(
self,
*,
low: Numeric,
high: Numeric,
logits: Array,
) -> None:
"""Initializes a CategoricalUniform distribution.
Args:
low: The lowest value of the support, denoted `a` in the class
docstring. It can also be a batch of values.
high: The highest value of the support, denoted `b` in the class
docstring. It can also be a batch of values.
logits: The unnormalized log-probabilities of the mixture. It must be an
array of length `K`. Additional leading dimensions, if any, index
batches.
"""
super().__init__()
self._low = conversion.as_float_array(low)
self._high = conversion.as_float_array(high)
self._logits = conversion.as_float_array(logits)
if self._logits.ndim < 1:
raise ValueError(
'The parameter `logits` must have at least one dimension.')
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return jax.lax.broadcast_shapes(
self._low.shape, self._high.shape, self._logits.shape[:-1])
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
quantile = jax.random.uniform(key, (n,) + self.batch_shape)
return self._inverse_cdf(quantile)
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return self._get_mixture().log_prob(value)
def entropy(self) -> Array:
"""See `Distribution.entropy`."""
# The following holds because the components have non-overlapping domains.
mixture = self._get_mixture()
return mixture.mixture_distribution.entropy() + jnp.log(
(self._high - self._low) / self.num_bins)
def mean(self) -> Array:
"""Calculates the mean."""
return self._get_mixture().mean()
def variance(self) -> Array:
"""Calculates the variance."""
return self._get_mixture().variance()
def __getitem__(self, index) -> 'CategoricalUniform':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return CategoricalUniform(
low=self.low[index], high=self.high[index], logits=self.logits[index])
def _get_category_limits(self) -> Array:
"""Gets limits for each category."""
return jnp.linspace(self.low, self.high, self.num_bins + 1, axis=-1)
def _get_mixture(self) -> mixture_same_family.MixtureSameFamily:
"""Gets a mixture distribution."""
limits = self._get_category_limits()
return mixture_same_family.MixtureSameFamily(
components_distribution=uniform.Uniform(
low=limits[..., :-1], high=limits[..., 1:]),
mixture_distribution=categorical.Categorical(logits=self.logits),
)
def _inverse_cdf(self, quantile):
"""Inverse cumulative density function."""
probs = jax.nn.softmax(self.logits, axis=-1)
cum_probs = jnp.cumsum(probs, axis=-1)
quantile_limits = jnp.concatenate(
[jnp.zeros_like(cum_probs[..., :1]), cum_probs], axis=-1)
limits = self._get_category_limits()
domain_diff = jnp.diff(limits, axis=-1)
quantile_diff = jnp.diff(quantile_limits, axis=-1)
slopes = domain_diff / quantile_diff
quantile_contributions = jnp.minimum(
quantile_diff,
jax.nn.relu(quantile[..., None] - quantile_limits[..., :-1]),
)
return self.low + jnp.sum(slopes * quantile_contributions, axis=-1)
@property
def low(self) -> Array:
# Broadcasted version of the argument passed in the initializer.
return jnp.broadcast_to(self._low, self.batch_shape)
@property
def high(self) -> Array:
# Broadcasted version of the argument passed in the initializer.
return jnp.broadcast_to(self._high, self.batch_shape)
@property
def logits(self) -> Array:
return jnp.broadcast_to(self._logits, self.batch_shape + (self.num_bins,))
@property
def num_bins(self) -> int:
return self._logits.shape[-1]
| distrax-master | distrax/_src/distributions/categorical_uniform.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""OneHotCategorical distribution."""
from typing import Any, Optional, Tuple, Union
import chex
from distrax._src.distributions import categorical
from distrax._src.distributions import distribution
from distrax._src.utils import math
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
PRNGKey = chex.PRNGKey
EventT = distribution.EventT
class OneHotCategorical(categorical.Categorical):
"""OneHotCategorical distribution."""
equiv_tfp_cls = tfd.OneHotCategorical
def __init__(self,
logits: Optional[Array] = None,
probs: Optional[Array] = None,
dtype: Union[jnp.dtype, type[Any]] = int):
"""Initializes a OneHotCategorical distribution.
Args:
logits: Logit transform of the probability of each category. Only one
of `logits` or `probs` can be specified.
probs: Probability of each category. Only one of `logits` or `probs` can
be specified.
dtype: The type of event samples.
"""
super().__init__(logits=logits, probs=probs, dtype=dtype)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return (self.num_categories,)
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
new_shape = (n,) + self.logits.shape[:-1]
is_valid = jnp.logical_and(
jnp.all(jnp.isfinite(self.probs), axis=-1, keepdims=True),
jnp.all(self.probs >= 0, axis=-1, keepdims=True))
draws = jax.random.categorical(
key=key, logits=self.logits, axis=-1, shape=new_shape)
draws_one_hot = jax.nn.one_hot(
draws, num_classes=self.num_categories).astype(self._dtype)
return jnp.where(is_valid, draws_one_hot, jnp.ones_like(draws_one_hot) * -1)
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return jnp.sum(math.multiply_no_nan(self.logits, value), axis=-1)
def prob(self, value: EventT) -> Array:
"""See `Distribution.prob`."""
return jnp.sum(math.multiply_no_nan(self.probs, value), axis=-1)
def mode(self) -> Array:
"""Calculates the mode."""
preferences = self._probs if self._logits is None else self._logits
assert preferences is not None
greedy_index = jnp.argmax(preferences, axis=-1)
return jax.nn.one_hot(greedy_index, self.num_categories).astype(self._dtype)
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
return jnp.sum(math.multiply_no_nan(
jnp.cumsum(self.probs, axis=-1), value), axis=-1)
def __getitem__(self, index) -> 'OneHotCategorical':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
if self._logits is not None:
return OneHotCategorical(logits=self.logits[index], dtype=self._dtype)
return OneHotCategorical(probs=self.probs[index], dtype=self._dtype)
| distrax-master | distrax/_src/distributions/one_hot_categorical.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `distribution.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import distribution
import jax
import jax.numpy as jnp
import numpy as np
class DummyUnivariateDist(distribution.Distribution):
"""Dummy univariate distribution for testing."""
def _sample_n(self, key, n):
return jax.random.uniform(key, shape=(n,))
def log_prob(self, value):
"""Log probability density/mass function."""
@property
def event_shape(self):
"""Shape of the events."""
return jnp.shape([])
class DummyMultivariateDist(distribution.Distribution):
"""Dummy multivariate distribution for testing."""
def __init__(self, dimension):
super().__init__()
self._dimension = dimension
def _sample_n(self, key, n):
return jax.random.uniform(key, shape=(n,) + self._dimension)
def log_prob(self, value):
"""Log probability density/mass function."""
@property
def event_shape(self):
"""Shape of the events."""
return (self._dimension,)
class DummyNestedDist(distribution.Distribution):
"""Dummy distribution with nested events for testing."""
def __init__(self, batch_shape=()) -> None:
self._batch_shape = batch_shape
def _sample_n(self, key, n):
return dict(
foo=jax.random.uniform(key, shape=(n,) + self._batch_shape),
bar=jax.random.uniform(key, shape=(n,) + self._batch_shape + (3,)))
def log_prob(self, value):
"""Log probability density/mass function."""
@property
def event_shape(self):
"""Shape of the events."""
return dict(foo=(), bar=(3,))
class DistributionTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.uni_dist = DummyUnivariateDist()
@chex.all_variants(with_jit=False, with_device=False, with_pmap=False)
@parameterized.named_parameters(
('0d input', 1, (1,)),
('0d np.int16 input', np.int16(2), (2,)),
('0d np.int32 input', np.int32(2), (2,)),
('0d np.int64 input', np.int64(2), (2,)),
('1d tuple input', (2,), (2,)),
('1d list input', [2], (2,)),
('1d tuple of np.int32 input', (np.int32(2),), (2,)),
('2d input', (2, 3), (2, 3)),
('3d input', (2, 3, 4), (2, 3, 4)))
def test_sample_univariate_shape(self, shape, expected_shape):
sample_fn = self.variant(
lambda key: self.uni_dist.sample(seed=key, sample_shape=shape))
samples = sample_fn(0)
np.testing.assert_equal(samples.shape, expected_shape)
@chex.all_variants(with_jit=False, with_device=False, with_pmap=False)
@parameterized.named_parameters(
('0d input', (5,), 1, (1, 5)),
('0d np.int16 input', (5,), np.int16(1), (1, 5)),
('0d np.int32 input', (5,), np.int32(1), (1, 5)),
('0d np.int64 input', (5,), np.int64(1), (1, 5)),
('1d tuple input', (5,), (2,), (2, 5)),
('1d list input', (5,), [2], (2, 5)),
('1d tuple of np.int32 input', (5,), (np.int32(2),), (2, 5)),
('2d input', (4, 5), (2, 3), (2, 3, 4, 5)))
def test_sample_multivariate_shape(self, var_dim, shape, expected_shape):
mult_dist = DummyMultivariateDist(var_dim)
sample_fn = self.variant(
lambda key: mult_dist.sample(seed=key, sample_shape=shape))
samples = sample_fn(0)
np.testing.assert_equal(samples.shape, expected_shape)
@chex.all_variants(with_jit=False, with_device=False, with_pmap=False)
@parameterized.named_parameters(
('0d input', 1),
('0d np.int16 input', np.int16(1)),
('0d np.int32 input', np.int32(1)),
('0d np.int64 input', np.int64(1)),
('1d tuple input', (2,)),
('1d list input', [2]),
('2d input', (2, 3)),
)
def test_sample_nested_shape(self, shape):
dist = DummyNestedDist()
sample_fn = self.variant(
lambda key: dist.sample(seed=key, sample_shape=shape))
samples = sample_fn(0)
# Ensure shape is a tuple.
try:
iter(shape)
except TypeError:
shape = (shape,)
shape = tuple(shape)
np.testing.assert_equal(samples['foo'].shape, shape)
np.testing.assert_equal(samples['bar'].shape, shape + (3,))
@parameterized.named_parameters(
('empty batch', ()),
('1d batch', (3,)),
('2d batch', (3, 4)),
)
def test_nested_batch_shape(self, batch_shape):
dist = DummyNestedDist(batch_shape=batch_shape)
np.testing.assert_equal(dist.batch_shape, batch_shape)
@chex.all_variants(with_jit=False, with_device=False, with_pmap=False)
def test_sample_keys(self):
shape = 5
key = 0
sample_fn = self.variant(
lambda key: self.uni_dist.sample(seed=key, sample_shape=shape))
samples_from_int = sample_fn(key)
rng = jax.random.PRNGKey(key)
samples_from_prngkey = sample_fn(rng)
np.testing.assert_array_equal(samples_from_int, samples_from_prngkey)
def test_jittable(self):
@jax.jit
def sampler(dist, seed):
return dist.sample(seed=seed)
seed = jax.random.PRNGKey(0)
dist = DummyMultivariateDist((5,))
np.testing.assert_array_equal(
sampler(dist, seed=seed), dist.sample(seed=seed))
@parameterized.named_parameters(
('int', int),
('np.int16', np.int16),
('np.int32', np.int32),
('np.int64', np.int64),
('PRNGKey', jax.random.PRNGKey),
)
def test_convert_seed(self, dtype):
rng, _ = distribution.convert_seed_and_sample_shape(dtype(0), 2)
jax.random.split(rng) # Should not raise an error.
@parameterized.named_parameters(
('int', 2, (2,)),
('np.int16', np.int16(2), (2,)),
('np.int32', np.int32(2), (2,)),
('np.int64', np.int64(2), (2,)),
('int tuple', (2, 3), (2, 3)),
('np.int16 tuple', (np.int16(2), np.int16(3)), (2, 3)),
('np.int32 tuple', (np.int32(2), np.int32(3)), (2, 3)),
('np.int64 tuple', (np.int64(2), np.int64(3)), (2, 3)),
('int list', [2, 3], (2, 3)),
('np.int16 list', [np.int16(2), np.int16(3)], (2, 3)),
('np.int32 list', [np.int32(2), np.int32(3)], (2, 3)),
('np.int64 list', [np.int64(2), np.int64(3)], (2, 3)),
)
def test_convert_sample_shape(self, shape_in, shape_out):
_, sample_shape = distribution.convert_seed_and_sample_shape(0, shape_in)
assert sample_shape == shape_out
@parameterized.named_parameters(
('single', 0, (np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]),
np.array([[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]),
np.array([[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]))),
('range', slice(-1),
(np.array([[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
]), np.array([[[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]]),
np.array([[[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]]))),
('two_axes', (slice(None), 1), (np.array(
[[0, 0, 0, 0], [1, 1, 1, 1]]), np.array([[1, 1, 1, 1], [1, 1, 1, 1]]),
np.array([[0, 1, 2, 3], [0, 1, 2, 3]]))),
('ellipsis', (Ellipsis, 2),
(np.array([[0, 0, 0], [1, 1, 1]]), np.array(
[[0, 1, 2], [0, 1, 2]]), np.array([[2, 2, 2], [2, 2, 2]]))),
('np_array', np.array([0, 1, -1]),
((np.array([[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]],
dtype=np.int32),
np.array([[[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]],
[[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]],
[[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]],
dtype=np.int32),
np.array([[[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]],
[[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]],
[[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]],
dtype=np.int32)))),
)
def test_to_batch_shape_index(self, index, expected):
np.testing.assert_allclose(
distribution.to_batch_shape_index(batch_shape=(2, 3, 4), index=index),
expected, 1e-3)
def test_to_batch_shape_index_jnp_array(self):
# This test needs to be a separate function since JAX doesn't allow creating
# jnp.arrays in the top level of the program.
# NOTE: Using jnp.arrays behaves differently compared to np.arrays as it
# wraps instead of raising. Raising for same index is tested for np.arrays
# below.
index = (-1, 0)
expected = (jnp.array([1, 1, 1, 1], dtype=jnp.int32),
jnp.array([0, 0, 0, 0], dtype=jnp.int32),
jnp.array([0, 1, 2, 3], dtype=jnp.int32))
np.testing.assert_allclose(
distribution.to_batch_shape_index(batch_shape=(2, 3, 4), index=index),
expected, 1e-3)
@parameterized.named_parameters(
('long_index', (1, 2, 3, 4)),
('np_array_out_of_bounds', np.array([-1, 2])),
)
def test_to_batch_shape_index_raises(self, index):
with self.assertRaisesRegex(IndexError, 'not compatible with index'):
distribution.to_batch_shape_index(
batch_shape=(2, 3, 4), index=index)
def test_multivariate_survival_function_raises(self):
mult_dist = DummyMultivariateDist(42)
with self.assertRaises(NotImplementedError):
mult_dist.survival_function(jnp.zeros(42))
with self.assertRaises(NotImplementedError):
mult_dist.log_survival_function(jnp.zeros(42))
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/distribution_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `deterministic.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import deterministic
from distrax._src.utils import equivalence
import jax.numpy as jnp
import numpy as np
class DeterministicTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(deterministic.Deterministic)
def test_loc(self):
dist_params = {'loc': [0.1, 0.5, 1.5]}
dist = self.distrax_cls(**dist_params)
self.assertion_fn(rtol=1e-3)(dist.loc, dist_params['loc'])
@parameterized.named_parameters(
('None', None),
('0.0', 0.0),
('0.1', 0.1))
def test_atol(self, atol):
dist_params = {'loc': np.asarray([0.1, 0.5, 1.5]), 'atol': atol}
dist = self.distrax_cls(**dist_params)
broadcasted_atol = np.zeros((3,)) if atol is None else atol * np.ones((3,))
self.assertion_fn(rtol=1e-3)(dist.atol, broadcasted_atol)
@parameterized.named_parameters(
('None', None),
('0.0', 0.0),
('0.1', 0.1))
def test_rtol(self, rtol):
dist_params = {'loc': np.asarray([0.1, 0.5, 1.5]), 'rtol': rtol}
dist = self.distrax_cls(**dist_params)
broadcasted_rtol = np.zeros((3,)) if rtol is None else rtol * np.ones((3,))
self.assertion_fn(rtol=1e-3)(dist.rtol, broadcasted_rtol)
@parameterized.named_parameters(
('atol_None_rtol_None', None, None),
('atol_0.1_rtol_None', 0.1, None),
('atol_None_rtol_0.1', None, 0.1),
('atol_0.05_rtol_0.1', 0.05, 0.1))
def test_slack(self, atol, rtol):
loc = np.asarray([0.1, 0.5, 1.5])
target_value = (0 if atol is None else atol) + (
0 if rtol is None else rtol) * np.abs(loc)
dist_params = {'loc': loc, 'rtol': rtol, 'atol': atol}
dist = self.distrax_cls(**dist_params)
self.assertion_fn(rtol=1e-3)(dist.slack, target_value)
def test_invalid_parameters(self):
self._test_raises_error(
dist_kwargs={'loc': 2., 'atol': np.array([0.1, 0.2])})
self._test_raises_error(
dist_kwargs={'loc': 2., 'rtol': np.array([0.1, 0.2])})
@parameterized.named_parameters(
('1d', np.asarray([0., 1.])),
('2d', np.zeros((2, 3))),
)
def test_event_shape(self, loc):
dist_params = {'loc': loc}
super()._test_event_shape((), dist_params)
@chex.all_variants
@parameterized.named_parameters(
('1d, no shape', [0., 1.], ()),
('1d, int shape', [0., 1.], 1),
('1d, 1-tuple shape', [0., 1.], (1,)),
('1d, 2-tuple shape', [0., 1.], (2, 2)),
('2d, no shape', np.zeros((2, 3)), ()),
('2d, int shape', np.zeros((2, 3)), 1),
('2d, 1-tuple shape', np.zeros((2, 3)), (1,)),
('2d, 2-tuple shape', np.zeros((2, 3)), (5, 4)),
)
def test_sample_shape(self, loc, sample_shape):
dist_params = {'loc': np.asarray(loc)}
super()._test_sample_shape(
dist_args=(),
dist_kwargs=dist_params,
sample_shape=sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('int32', jnp.int32),
('int64', jnp.int64),
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist = self.distrax_cls(loc=jnp.zeros((), dtype=dtype))
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
@chex.all_variants
@parameterized.named_parameters(
('1d, no shape', [0., 1.], ()),
('1d, int shape', [0., 1.], 1),
('1d, 1-tuple shape', [0., 1.], (1,)),
('1d, 2-tuple shape', [0., 1.], (2, 2)),
('2d, no shape', np.zeros((2, 3)), ()),
('2d, int shape', np.zeros((2, 3)), 1),
('2d, 1-tuple shape', np.zeros((2, 3)), (1,)),
('2d, 2-tuple shape', np.zeros((2, 3)), (5, 4)),
)
def test_sample_and_log_prob(self, loc, sample_shape):
dist_params = {'loc': np.asarray(loc)}
super()._test_sample_and_log_prob(
dist_args=(),
dist_kwargs=dist_params,
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=1e-3))
@chex.all_variants
@parameterized.named_parameters(
('log_prob', 'log_prob'),
('prob', 'prob'),
('cdf', 'cdf'),
('log_cdf', 'log_cdf'),
)
def test_method_with_inputs_at_loc(self, function_string):
loc = np.asarray([0.1, -0.9, 5.1])
dist_params = {'loc': loc}
inputs = np.repeat(loc[None, :], 10, axis=0)
super()._test_attribute(
attribute_string=function_string,
dist_kwargs=dist_params,
call_args=(inputs,),
assertion_fn=self.assertion_fn(rtol=1e-3))
@chex.all_variants
@parameterized.named_parameters(
('log_prob', 'log_prob'),
('prob', 'prob'),
('cdf', 'cdf'),
('log_cdf', 'log_cdf'),
)
def test_method_with_inputs_at_random_inputs(self, function_string):
loc = np.asarray([0.1, -0.9, 5.1])
dist_params = {'loc': loc}
inputs = 0.1 * np.random.normal(size=(10,) + (len(loc),))
super()._test_attribute(
attribute_string=function_string,
dist_kwargs=dist_params,
call_args=(inputs,),
assertion_fn=self.assertion_fn(rtol=1e-3))
@chex.all_variants
@parameterized.named_parameters(
('log_prob_stddev0', 'log_prob', 0.0, 0.05, 0.1),
('log_prob_stddev0.05', 'log_prob', 0.05, 0.05, 0.1),
('log_prob_stddev0.1', 'log_prob', 0.1, 0.05, 0.1),
('prob_stddev0', 'prob', 0.0, 0.05, 0.1),
('prob_stddev0.05', 'prob', 0.05, 0.05, 0.1),
('prob_stddev0.1', 'prob', 0.1, 0.05, 0.1),
('cdf_stddev0', 'cdf', 0.0, 0.05, 0.1),
('cdf_stddev0.05', 'cdf', 0.05, 0.05, 0.1),
('cdf_stddev0.1', 'cdf', 0.1, 0.05, 0.1),
('log_cdf_stddev0', 'log_cdf', 0.0, 0.05, 0.1),
('log_cdf_stddev0.05', 'log_cdf', 0.05, 0.05, 0.1),
('log_cdf_stddev0.1', 'log_cdf', 0.1, 0.05, 0.1),
)
def test_method_with_inputs_and_slack(self, function_string, inputs_stddev,
atol, rtol):
loc = np.asarray([[4., -1., 0.], [0.5, 0.1, -8.]])
dist_params = {'loc': loc, 'atol': atol, 'rtol': rtol}
inputs = loc[None, ...] + inputs_stddev * np.random.normal(
size=(20,) + loc.shape)
super()._test_attribute(
attribute_string=function_string,
dist_kwargs=dist_params,
call_args=(inputs,),
assertion_fn=self.assertion_fn(rtol=1e-3))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('entropy', [0., 1.], 'entropy'),
('mean', [0., 1.], 'mean'),
('mode', [0., 1.], 'mode'),
('variance', [0., 1.], 'variance'),
('variance from rank-2 params', np.ones((2, 3)), 'variance'),
('stddev', [-1.], 'stddev'),
('stddev from rank-2 params', -np.ones((2, 3)), 'stddev'),
)
def test_method(self, distr_params, function_string):
super()._test_attribute(
attribute_string=function_string,
dist_kwargs={'loc': np.asarray(distr_params)},
assertion_fn=self.assertion_fn(rtol=1e-3))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax'))
def test_with_two_distributions(self, function_string, mode_string):
loc1 = np.random.randn(3)
loc2 = np.stack([loc1, np.random.randn(3)], axis=0)
super()._test_with_two_distributions(
attribute_string=function_string,
mode_string=mode_string,
dist1_kwargs={
'loc': loc1,
},
dist2_kwargs={
'loc': loc2,
},
assertion_fn=self.assertion_fn(rtol=1e-3))
def test_jittable(self):
super()._test_jittable((np.array([0., 4., -1., 4.]),))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
('ellipsis', (Ellipsis, -1)),
)
def test_slice(self, slice_):
loc = jnp.array(np.random.randn(3, 4, 5))
atol = jnp.array(np.random.randn(3, 4, 5))
rtol = jnp.array(np.random.randn(3, 4, 5))
dist = self.distrax_cls(loc=loc, atol=atol, rtol=rtol)
self.assertion_fn(rtol=1e-3)(dist[slice_].loc, loc[slice_])
self.assertion_fn(rtol=1e-3)(dist[slice_].atol, atol[slice_])
self.assertion_fn(rtol=1e-3)(dist[slice_].rtol, rtol[slice_])
def test_slice_different_parameterization(self):
loc = jnp.array(np.random.randn(3, 4, 5))
atol = jnp.array(np.random.randn(4, 5))
rtol = jnp.array(np.random.randn(4, 5))
dist = self.distrax_cls(loc=loc, atol=atol, rtol=rtol)
self.assertion_fn(rtol=1e-3)(dist[0].loc, loc[0])
self.assertion_fn(rtol=1e-3)(dist[0].atol, atol) # Not slicing atol.
self.assertion_fn(rtol=1e-3)(dist[0].rtol, rtol) # Not slicing rtol.
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/deterministic_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MultivariateNormalDiagPlusLowRank distribution."""
from typing import Optional
import chex
from distrax._src.bijectors.diag_linear import DiagLinear
from distrax._src.bijectors.diag_plus_low_rank_linear import DiagPlusLowRankLinear
from distrax._src.distributions import distribution
from distrax._src.distributions.mvn_from_bijector import MultivariateNormalFromBijector
from distrax._src.utils import conversion
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
def _check_parameters(
loc: Optional[Array],
scale_diag: Optional[Array],
scale_u_matrix: Optional[Array],
scale_v_matrix: Optional[Array]) -> None:
"""Checks that the inputs are correct."""
if all(x is None for x in [loc, scale_diag, scale_u_matrix]):
raise ValueError(
'At least one of `loc`, `scale_diag`, and `scale_u_matrix` must '
'be specified.')
if scale_v_matrix is not None and scale_u_matrix is None:
raise ValueError('`scale_v_matrix` can be specified only when '
'`scale_u_matrix` is also specified.')
for name, x, n_dims in [
('loc', loc, 1), ('scale_diag', scale_diag, 1),
('scale_u_matrix', scale_u_matrix, 2),
('scale_v_matrix', scale_v_matrix, 2)]:
if x is not None and x.ndim < n_dims:
raise ValueError(f'`{name}` must have at least {n_dims} dimensions.')
if scale_u_matrix is not None and scale_v_matrix is not None:
if scale_u_matrix.shape[-1] != scale_v_matrix.shape[-1]:
raise ValueError(
f'The last dimension of `scale_u_matrix` must coincide with '
f'the last dimension of `scale_v_matrix`, but got '
f'`scale_u_matrix.shape[-1] = {scale_u_matrix.shape[-1]}`'
f' and `scale_v_matrix.shape[-1] = {scale_v_matrix.shape[-1]}`.')
if scale_u_matrix is not None and scale_u_matrix.shape[-1] < 1:
raise ValueError(
'The last dimension of `scale_u_matrix` cannot be zero.')
loc_dim = None if loc is None else loc.shape[-1]
scale_diag_dim = None if scale_diag is None else scale_diag.shape[-1]
scale_u_matrix_dim = (
None if scale_u_matrix is None else scale_u_matrix.shape[-2])
scale_v_matrix_dim = (
None if scale_v_matrix is None else scale_v_matrix.shape[-2])
num_dims = loc_dim if loc_dim is not None else scale_diag_dim
num_dims = num_dims if num_dims is not None else scale_u_matrix_dim
array_dims = [
x for x in [
loc_dim, scale_diag_dim, scale_u_matrix_dim, scale_v_matrix_dim]
if x is not None]
if not all(x == num_dims for x in array_dims):
raise ValueError(
f'If specified, the following shapes must all coincide, but got '
f'`loc.shape[-1] = {loc_dim}`, '
f'`scale_diag.shape[-1] = {scale_diag_dim}`, '
f'`scale_u_matrix.shape[-2] = {scale_u_matrix_dim}`, and '
f'`scale_v_matrix.shape[-2] = {scale_v_matrix_dim}`.')
class MultivariateNormalDiagPlusLowRank(MultivariateNormalFromBijector):
"""Multivariate normal distribution on `R^k`.
The `MultivariateNormalDiagPlusLowRank` distribution is parameterized by a
location (mean) vector `b` and a scale matrix `S` that has the following
structure: `S = diag(D) + U @ V.T`, where `D` is a `k`-length vector, and both
`U` and `V` are `k x r` matrices (with `r < k` typically). The covariance
matrix of the multivariate normal distribution is `C = S @ S.T`.
This class makes no attempt to verify that the scale matrix `S` is invertible,
which happens if and only if both `diag(D)` and `I + V^T diag(D)^{-1} U` are
invertible. It is the responsibility of the user to make sure that this is the
case.
"""
equiv_tfp_cls = tfd.MultivariateNormalDiagPlusLowRank
def __init__(self,
loc: Optional[Array] = None,
scale_diag: Optional[Array] = None,
scale_u_matrix: Optional[Array] = None,
scale_v_matrix: Optional[Array] = None):
"""Initializes a MultivariateNormalDiagPlusLowRank distribution.
Args:
loc: Mean vector of the distribution of shape `k` (can also be a batch of
such vectors). If not specified, it defaults to zeros.
scale_diag: The diagonal matrix added to the scale `S`, specified by a
`k`-length vector containing its diagonal entries (or a batch of
vectors). If not specified, the diagonal matrix defaults to the
identity.
scale_u_matrix: The low-rank matrix `U` that specifies the scale, as
described in the class docstring. It is a `k x r` matrix (or a batch of
such matrices). If not specified, it defaults to zeros. At least one
of `loc`, `scale_diag`, and `scale_u_matrix` must be specified.
scale_v_matrix: The low-rank matrix `V` that specifies the scale, as
described in the class docstring. It is a `k x r` matrix (or a batch of
such matrices). If not specified, it defaults to `scale_u_matrix`. It
can only be specified if `scale_u_matrix` is also specified.
"""
loc = None if loc is None else conversion.as_float_array(loc)
scale_diag = None if scale_diag is None else conversion.as_float_array(
scale_diag)
scale_u_matrix = (
None if scale_u_matrix is None else conversion.as_float_array(
scale_u_matrix))
scale_v_matrix = (
None if scale_v_matrix is None else conversion.as_float_array(
scale_v_matrix))
_check_parameters(loc, scale_diag, scale_u_matrix, scale_v_matrix)
if loc is not None:
num_dims = loc.shape[-1]
elif scale_diag is not None:
num_dims = scale_diag.shape[-1]
elif scale_u_matrix is not None:
num_dims = scale_u_matrix.shape[-2]
dtype = jnp.result_type(
*[x for x in [loc, scale_diag, scale_u_matrix, scale_v_matrix]
if x is not None])
if loc is None:
loc = jnp.zeros((num_dims,), dtype=dtype)
self._scale_diag = scale_diag
if scale_diag is None:
self._scale_diag = jnp.ones((num_dims,), dtype=dtype)
self._scale_u_matrix = scale_u_matrix
if scale_u_matrix is None:
self._scale_u_matrix = jnp.zeros((num_dims, 1), dtype=dtype)
self._scale_v_matrix = scale_v_matrix
if scale_v_matrix is None:
self._scale_v_matrix = self._scale_u_matrix
if scale_u_matrix is None:
# The scale matrix is diagonal.
scale = DiagLinear(self._scale_diag)
else:
scale = DiagPlusLowRankLinear(
u_matrix=self._scale_u_matrix,
v_matrix=self._scale_v_matrix,
diag=self._scale_diag)
super().__init__(loc=loc, scale=scale)
@property
def scale_diag(self) -> Array:
"""Diagonal matrix that is added to the scale."""
return jnp.broadcast_to(
self._scale_diag, self.batch_shape + self.event_shape)
@property
def scale_u_matrix(self) -> Array:
"""Matrix `U` that defines the low-rank part of the scale matrix."""
return jnp.broadcast_to(
self._scale_u_matrix,
self.batch_shape + self._scale_u_matrix.shape[-2:])
@property
def scale_v_matrix(self) -> Array:
"""Matrix `V` that defines the low-rank part of the scale matrix."""
return jnp.broadcast_to(
self._scale_v_matrix,
self.batch_shape + self._scale_v_matrix.shape[-2:])
def __getitem__(self, index) -> 'MultivariateNormalDiagPlusLowRank':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return MultivariateNormalDiagPlusLowRank(
loc=self.loc[index],
scale_diag=self.scale_diag[index],
scale_u_matrix=self.scale_u_matrix[index],
scale_v_matrix=self.scale_v_matrix[index])
| distrax-master | distrax/_src/distributions/mvn_diag_plus_low_rank.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Softmax distribution."""
from typing import Any, Union
import chex
from distrax._src.distributions import categorical
from distrax._src.distributions import distribution
import jax.numpy as jnp
Array = chex.Array
class Softmax(categorical.Categorical):
"""Categorical implementing a softmax over logits, with given temperature.
Given a set of logits, the probability mass is distributed such that each
index `i` has probability `exp(logits[i]/τ)/Σ(exp(logits/τ)` where τ is a
scalar `temperature` parameter such that for τ→0, the distribution
becomes fully greedy, and for τ→∞ the distribution becomes fully uniform.
"""
def __init__(self,
logits: Array,
temperature: float = 1.,
dtype: Union[jnp.dtype, type[Any]] = int):
"""Initializes a Softmax distribution.
Args:
logits: Logit transform of the probability of each category.
temperature: Softmax temperature τ.
dtype: The type of event samples.
"""
self._temperature = temperature
self._unscaled_logits = logits
scaled_logits = logits / temperature
super().__init__(logits=scaled_logits, dtype=dtype)
@property
def temperature(self) -> float:
"""The softmax temperature parameter."""
return self._temperature
@property
def unscaled_logits(self) -> Array:
"""The logits of the distribution before the temperature scaling."""
return self._unscaled_logits
def __getitem__(self, index) -> 'Softmax':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Softmax(
logits=self.unscaled_logits[index],
temperature=self.temperature,
dtype=self.dtype)
| distrax-master | distrax/_src/distributions/softmax.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `greedy.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import greedy
from distrax._src.utils import equivalence
import jax.numpy as jnp
import numpy as np
class GreedyTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(greedy.Greedy)
self.preferences = jnp.array([0., 4., -1., 4.])
def test_parameters_from_preferences(self):
dist = self.distrax_cls(preferences=self.preferences)
expected_probs = jnp.array([0., 0.5, 0., 0.5])
self.assertion_fn(rtol=2e-3)(dist.logits, jnp.log(expected_probs))
self.assertion_fn(rtol=2e-3)(dist.probs, expected_probs)
def test_num_categories(self):
dist = self.distrax_cls(preferences=self.preferences)
np.testing.assert_equal(dist.num_categories, len(self.preferences))
@chex.all_variants
@parameterized.named_parameters(
('int32', jnp.int32),
('int64', jnp.int64),
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist = self.distrax_cls(preferences=self.preferences, dtype=dtype)
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
def test_jittable(self):
super()._test_jittable((np.array([0., 4., -1., 4.]),))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
)
def test_slice(self, slice_):
preferences = np.abs(np.random.randn(3, 4, 5))
dtype = jnp.float32
dist = self.distrax_cls(preferences, dtype=dtype)
dist_sliced = dist[slice_]
self.assertIsInstance(dist_sliced, greedy.Greedy)
self.assertion_fn(rtol=2e-3)(dist_sliced.preferences, preferences[slice_])
self.assertEqual(dist_sliced.dtype, dtype)
def test_slice_ellipsis(self):
preferences = np.abs(np.random.randn(3, 4, 5))
dtype = jnp.float32
dist = self.distrax_cls(preferences, dtype=dtype)
dist_sliced = dist[..., -1]
self.assertIsInstance(dist_sliced, greedy.Greedy)
self.assertion_fn(rtol=2e-3)(dist_sliced.preferences, preferences[:, -1])
self.assertEqual(dist_sliced.dtype, dtype)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/greedy_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `normal.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import normal
import jax
from jax.config import config as jax_config
import jax.numpy as jnp
def setUpModule():
jax_config.update('jax_enable_x64', True)
class NormalFloat64Test(chex.TestCase):
def _assert_dtypes(self, dist, dtype):
"""Asserts dist methods' outputs' datatypes."""
# Sanity check to make sure float64 is enabled.
x_64 = jnp.zeros([])
self.assertEqual(jnp.float64, x_64.dtype)
key = jax.random.PRNGKey(1729)
z, log_prob = self.variant(
lambda: dist.sample_and_log_prob(seed=key, sample_shape=[3]))()
z2 = self.variant(
lambda: dist.sample(seed=key, sample_shape=[3]))()
self.assertEqual(dtype, z.dtype)
self.assertEqual(dtype, z2.dtype)
self.assertEqual(dtype, log_prob.dtype)
self.assertEqual(dtype, self.variant(dist.log_prob)(z).dtype)
self.assertEqual(dtype, self.variant(dist.prob)(z).dtype)
self.assertEqual(dtype, self.variant(dist.cdf)(z).dtype)
self.assertEqual(dtype, self.variant(dist.log_cdf)(z).dtype)
self.assertEqual(dtype, self.variant(dist.entropy)().dtype)
self.assertEqual(dtype, self.variant(dist.mean)().dtype)
self.assertEqual(dtype, self.variant(dist.mode)().dtype)
self.assertEqual(dtype, self.variant(dist.median)().dtype)
self.assertEqual(dtype, self.variant(dist.stddev)().dtype)
self.assertEqual(dtype, self.variant(dist.variance)().dtype)
self.assertEqual(dtype, dist.loc.dtype)
self.assertEqual(dtype, dist.scale.dtype)
self.assertEqual(dtype, dist.dtype)
@chex.all_variants
@parameterized.named_parameters(
('float32', jnp.float32),
('float64', jnp.float64))
def test_dtype(self, dtype):
dist = normal.Normal(loc=jnp.zeros([], dtype), scale=jnp.ones([], dtype))
self._assert_dtypes(dist, dtype)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/normal_float64_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper to adapt a Distrax distribution for use in TFP."""
from typing import Dict, Optional, Sequence, Tuple, Union
import chex
from distrax._src.distributions import distribution
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
ArrayNumpy = chex.ArrayNumpy
Distribution = distribution.Distribution
IntLike = distribution.IntLike
PRNGKey = chex.PRNGKey
tangent_spaces = tfp.experimental.tangent_spaces
TangentSpace = tangent_spaces.TangentSpace
EventT = distribution.EventT
def tfp_compatible_distribution(
base_distribution: Distribution,
name: Optional[str] = None) -> distribution.DistributionT:
"""Create a TFP-compatible distribution from a Distrax distribution.
Given a Distrax distribution, return a wrapped distribution that behaves as a
TFP distribution, to be used in TFP meta-distributions. In particular, the
wrapped distribution implements the methods `allow_nan_stats`, `parameters`,
`name`, `batch_shape_tensor`, `reparameterization_type` and
`event_shape_tensor`; and the `batch_shape` and `event_shape` properties
return a TFP `TensorShape`.
The methods of the resulting distribution do not take a `name` argument,
unlike their TFP equivalents.
Args:
base_distribution: A Distrax distribution.
name: The distribution name.
Returns:
The wrapped distribution.
"""
name_ = name
class TFPCompatibleDistribution(base_distribution.__class__):
"""Class to wrap a Distrax distribution.
The wrapped class dynamically inherits from `base_distribution`, so that
computations involving the KL remain valid.
"""
def __init__(self):
pass
def __getattr__(self, name: str):
return getattr(base_distribution, name)
def __getitem__(self, index):
return tfp_compatible_distribution(base_distribution[index], name=name_)
@property
def allow_nan_stats(self) -> bool:
"""Proxy for the TFP property `allow_nan_stats`.
It always returns True.
"""
return True
@property
def batch_shape(self) -> tfp.tf2jax.TensorShape:
"""Returns a `TensorShape` with the `batch_shape` of the distribution."""
return tfp.tf2jax.TensorShape(base_distribution.batch_shape)
def batch_shape_tensor(self) -> Array:
"""See `Distribution.batch_shape`."""
return jnp.array(base_distribution.batch_shape, dtype=jnp.int32)
@property
def event_shape(self) -> tfp.tf2jax.TensorShape:
"""Returns a `TensorShape` with the `event_shape` of the distribution."""
return tfp.tf2jax.TensorShape(base_distribution.event_shape)
def event_shape_tensor(self) -> ArrayNumpy:
"""See `Distribution.event_shape`."""
return np.array(base_distribution.event_shape, dtype=jnp.int32)
@property
def experimental_shard_axis_names(self):
return []
@property
def name(self) -> str:
"""See `Distribution.name`."""
return name_ or f'TFPCompatible{base_distribution.name}'
@property
def reparameterization_type(self) -> tfd.ReparameterizationType:
"""Proxy for the TFP property `reparameterization_type`.
It always returns `tfd.NOT_REPARAMETERIZED`.
"""
return tfd.NOT_REPARAMETERIZED
def _sample_n(self, key: PRNGKey, n: int) -> EventT:
return base_distribution.sample(seed=key, sample_shape=(n,))
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return base_distribution.log_prob(value)
@property
def parameters(self) -> Dict[str, str]:
"""Returns a dictionary whose key 'name' maps to the distribution name."""
return {'name': self.name}
def sample(self,
sample_shape: Union[IntLike, Sequence[IntLike]] = (),
seed: Optional[Union[int, tfp.util.SeedStream]] = None,
**unused_kwargs) -> EventT:
"""See `Distribution.sample`."""
if not np.isscalar(sample_shape):
sample_shape = tuple(sample_shape)
return base_distribution.sample(sample_shape=sample_shape, seed=seed)
def experimental_local_measure(
self,
value: Array,
backward_compat: bool = True,
**unused_kwargs) -> Tuple[Array, TangentSpace]:
"""Returns a log probability density together with a `TangentSpace`.
See `tfd.distribution.Distribution.experimental_local_measure`, and
Radul and Alexeev, AISTATS 2021, “The Base Measure Problem and its
Solution”, https://arxiv.org/abs/2010.09647.
Args:
value: `float` or `double` `Array`.
backward_compat: unused
**unused_kwargs: unused
Returns:
log_prob: see `log_prob`.
tangent_space: `tangent_spaces.FullSpace()`, representing R^n with the
standard basis.
"""
del backward_compat
# We ignore the `backward_compat` flag and always act as though it's
# true because Distrax bijectors and distributions need not follow the
# base measure protocol from TFP.
del unused_kwargs
return self.log_prob(value), tangent_spaces.FullSpace()
return TFPCompatibleDistribution()
| distrax-master | distrax/_src/distributions/tfp_compatible_distribution.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `one_hot_categorical.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import categorical
from distrax._src.distributions import one_hot_categorical
from distrax._src.utils import equivalence
from distrax._src.utils import math
import jax
import jax.numpy as jnp
import numpy as np
import scipy
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
class OneHotCategoricalTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(one_hot_categorical.OneHotCategorical)
self.p = np.asarray([0.1, 0.4, 0.2, 0.3])
self.logits = np.log(self.p) - 1.0 # intended unnormalization
def test_parameters_from_probs(self):
dist = self.distrax_cls(probs=self.p)
self.assertion_fn(rtol=2e-3)(
dist.logits, math.normalize(logits=np.log(self.p)))
self.assertion_fn(rtol=2e-3)(dist.probs, math.normalize(probs=self.p))
def test_parameters_from_logits(self):
dist = self.distrax_cls(logits=self.logits)
self.assertion_fn(rtol=2e-3)(
dist.logits, math.normalize(logits=self.logits))
self.assertion_fn(rtol=2e-3)(dist.probs, math.normalize(probs=self.p))
@parameterized.named_parameters(
('probs and logits', {'logits': [0.1, -0.2], 'probs': [0.6, 0.4]}),
('both probs and logits are None', {'logits': None, 'probs': None}),
('bool dtype', {'logits': [0.1, -0.2], 'dtype': jnp.bool_}),
('complex64 dtype', {'logits': [0.1, -0.2], 'dtype': jnp.complex64}),
('complex128 dtype', {'logits': [0.1, -0.2], 'dtype': jnp.complex128}),
)
def test_raises_on_invalid_inputs(self, dist_params):
with self.assertRaises(ValueError):
self.distrax_cls(**dist_params)
@chex.all_variants
def test_negative_probs(self):
"""Check sample returns -1 if probs are negative after normalization."""
dist = self.distrax_cls(probs=np.asarray([[0.1, -0.4, 0.2, 0.3],
[0.1, 0.1, 0.6, 0.2]]))
sample_fn = self.variant(
lambda key: dist.sample(seed=key, sample_shape=100))
samples = sample_fn(self.key)
self.assertion_fn(rtol=2e-3)(samples[:, 0, :], -1)
np.testing.assert_array_compare(lambda x, y: x >= y, samples[:, 1, :], 0)
@chex.all_variants
def test_nan_probs(self):
"""Checks sample returns -1 if probs are nan after normalization."""
dist = self.distrax_cls(
probs=np.asarray([[-0.1, 0.1, 0.0, 0.0], [0.1, 0.1, 0.6, 0.2]]))
sample_fn = self.variant(
lambda key: dist.sample(seed=key, sample_shape=100))
samples = sample_fn(self.key)
self.assertion_fn(rtol=2e-3)(samples[:, 0, :], -1)
np.testing.assert_array_compare(lambda x, y: x >= y, samples[:, 1, :], 0)
@parameterized.named_parameters(
('from probs', False),
('from logits', True))
def test_num_categories(self, from_logits):
dist_params = {'logits': self.logits} if from_logits else {'probs': self.p}
dist = self.distrax_cls(**dist_params)
np.testing.assert_equal(dist.num_categories, len(self.p))
@parameterized.named_parameters(
('1d logits', {'logits': [0.0, 1.0, -0.5]}),
('1d probs', {'probs': [0.2, 0.5, 0.3]}),
('2d logits', {'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]}),
('2d probs', {'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]}),
)
def test_event_shape(self, distr_params):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
super()._test_event_shape((), distr_params)
@chex.all_variants
@parameterized.named_parameters(
('1d logits, no shape', {'logits': [0.0, 1.0, -0.5]}, ()),
('1d probs, no shape', {'probs': [0.2, 0.5, 0.3]}, ()),
('1d logits, int shape', {'logits': [0.0, 1.0, -0.5]}, 1),
('1d probs, int shape', {'probs': [0.2, 0.5, 0.3]}, 1),
('1d logits, 1-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (1,)),
('1d probs, 1-tuple shape', {'probs': [0.2, 0.5, 0.3]}, (1,)),
('1d logits, 2-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (5, 4)),
('1d probs, 2-tuple shape', {'probs': [0.2, 0.5, 0.3]}, (5, 4)),
('2d logits, no shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, ()),
('2d probs, no shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, ()),
('2d logits, int shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, 4),
('2d probs, int shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, 4),
('2d logits, 1-tuple shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, (5,)),
('2d probs, 1-tuple shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, (5,)),
('2d logits, 2-tuple shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, (5, 4)),
('2d probs, 2-tuple shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, (5, 4)),
)
def test_sample_shape(self, distr_params, sample_shape):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
super()._test_sample_shape(
dist_args=(),
dist_kwargs=distr_params,
sample_shape=sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('1d logits, no shape', {'logits': [0.0, 1.0, -0.5]}, ()),
('1d probs, no shape', {'probs': [0.2, 0.5, 0.3]}, ()),
('1d logits, int shape', {'logits': [0.0, 1.0, -0.5]}, 1),
('1d probs, int shape', {'probs': [0.2, 0.5, 0.3]}, 1),
('1d logits, 1-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (1,)),
('1d probs, 1-tuple shape', {'probs': [0.2, 0.5, 0.3]}, (1,)),
('1d logits, 2-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (5, 4)),
('1d probs, 2-tuple shape', {'probs': [0.2, 0.5, 0.3]}, (5, 4)),
('2d logits, no shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, ()),
('2d probs, no shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, ()),
('2d logits, int shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, 4),
('2d probs, int shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, 4),
('2d logits, 1-tuple shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, (5,)),
('2d probs, 1-tuple shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, (5,)),
('2d logits, 2-tuple shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, (5, 4)),
('2d probs, 2-tuple shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, (5, 4)),
)
def test_sample_and_log_prob(self, distr_params, sample_shape):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
super()._test_sample_and_log_prob(
dist_args=(),
dist_kwargs=distr_params,
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=2e-3))
@chex.all_variants
@parameterized.named_parameters(
('int32', jnp.int32),
('int64', jnp.int64),
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist_params = {'logits': self.logits, 'dtype': dtype}
dist = self.distrax_cls(**dist_params)
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
@chex.all_variants
@parameterized.named_parameters(
('from probs', False),
('from logits', True))
def test_sample_unique_values(self, from_logits):
dist_params = {'logits': self.logits} if from_logits else {'probs': self.p}
dist = self.distrax_cls(**dist_params)
sample_fn = self.variant(
lambda key: dist.sample(seed=key, sample_shape=100))
samples = sample_fn(self.key)
np.testing.assert_equal(np.unique(samples), np.arange(2))
@chex.all_variants
def test_sample_extreme_probs(self):
dist_params = {'probs': np.asarray([1., 0., 0., 0.])}
dist = self.distrax_cls(**dist_params)
sample_fn = self.variant(
lambda key: dist.sample(seed=key, sample_shape=100))
samples = sample_fn(self.key)
np.testing.assert_equal(np.unique(samples[..., 0]), 1)
np.testing.assert_equal(np.unique(samples[..., 1:]), 0)
@chex.all_variants
@parameterized.named_parameters(
('log_prob; 1d logits, 1 input',
'log_prob',
{'logits': [0.0, 0.5, -0.5]},
[1, 0, 0]),
('log_prob; 1d logits, 2 inputs',
'log_prob',
{'logits': [0.0, 0.5, -0.5]},
[[1, 0, 0], [0, 1, 0]]),
('log_prob; 2d logits, 2 inputs',
'log_prob',
{'logits': [[0.0, 0.5, -0.5], [-0.1, 0.1, 0.1]]},
[[1, 0, 0], [0, 1, 0]]),
('log_prob; 2d logits, rank-3 inputs',
'log_prob',
{'logits': [[0.0, 0.5, -0.5], [-0.1, 0.1, 0.1]]},
np.asarray([[1, 0, 0], [0, 1, 0]])[None, ...]),
('log_prob; 1d probs, 1 input',
'log_prob',
{'probs': [0.3, 0.2, 0.5]},
[1, 0, 0]),
('log_prob; 1d probs, 2 inputs',
'log_prob',
{'probs': [0.3, 0.2, 0.5]},
[[1, 0, 0], [0, 1, 0]]),
('log_prob; 2d probs, 2 inputs',
'log_prob',
{'probs': [[0.2, 0.4, 0.4], [0.1, 0.2, 0.7]]},
[[1, 0, 0], [0, 1, 0]]),
('log_prob; 2d probs, rank-3 inputs',
'log_prob',
{'probs': [[0.2, 0.4, 0.4], [0.1, 0.2, 0.7]]},
np.asarray([[1, 0, 0], [0, 1, 0]])[None, ...]),
('log_prob; unnormalized probs',
'log_prob',
{'probs': [0.1, 0.2, 0.3]},
[[0, 0, 1], [0, 1, 0], [1, 0, 0]]),
('prob; 1d logits, 1 input',
'prob',
{'logits': [0.0, 0.5, -0.5]},
[1, 0, 0]),
('prob; 1d logits, 2 inputs',
'prob',
{'logits': [0.0, 0.5, -0.5]},
[[1, 0, 0], [0, 1, 0]]),
('prob; 2d logits, 2 inputs',
'prob',
{'logits': [[0.0, 0.5, -0.5], [-0.1, 0.1, 0.1]]},
[[1, 0, 0], [0, 1, 0]]),
('prob; 2d logits, rank-3 inputs',
'prob',
{'logits': [[0.0, 0.5, -0.5], [-0.1, 0.1, 0.1]]},
np.asarray([[1, 0, 0], [0, 1, 0]])[None, ...]),
('prob; 1d probs, 1 input',
'prob',
{'probs': [0.3, 0.2, 0.5]},
[1, 0, 0]),
('prob; 1d probs, 2 inputs',
'prob',
{'probs': [0.3, 0.2, 0.5]},
[[1, 0, 0], [0, 1, 0]]),
('prob; 2d probs, 2 inputs',
'prob',
{'probs': [[0.2, 0.4, 0.4], [0.1, 0.2, 0.7]]},
[[1, 0, 0], [0, 1, 0]]),
('prob; 2d probs, rank-3 inputs',
'prob',
{'probs': [[0.2, 0.4, 0.4], [0.1, 0.2, 0.7]]},
np.asarray([[1, 0, 0], [0, 1, 0]])[None, ...]),
('prob; unnormalized probs',
'prob',
{'probs': [0.1, 0.2, 0.3]},
[[0, 0, 1], [0, 1, 0], [1, 0, 0]]),
)
def test_pdf(self, function_string, distr_params, value):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
value = np.array(value)
super()._test_attribute(
attribute_string=function_string,
dist_kwargs=distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=2e-3))
@chex.all_variants
@parameterized.named_parameters(
('log_prob; extreme probs',
'log_prob',
{'probs': [0.0, 1.0, 0.0]},
[[0, 1, 0], [1, 0, 0]],
np.asarray([0., -np.inf])),
('prob; extreme probs',
'prob',
{'probs': [0.0, 1.0, 0.0]},
[[0, 1, 0], [1, 0, 0]],
np.asarray([1., 0.])),
)
def test_pdf_extreme_probs(self, function_string, distr_params,
value, expected):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
value = np.array(value)
dist = self.distrax_cls(**distr_params)
self.assertion_fn(rtol=2e-3)(
self.variant(getattr(dist, function_string))(value), expected)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('entropy; from 2d logits',
'entropy', {'logits': [[0.0, 0.5, -0.5], [-0.2, 0.3, 0.5]]}),
('entropy; from 2d probs',
'entropy', {'probs': [[0.1, 0.5, 0.4], [0.2, 0.4, 0.4]]}),
('mode; from 2d logits',
'mode', {'logits': [[0.0, 0.5, -0.5], [-0.2, 0.3, 0.5]]}),
('mode; from 2d probs',
'mode', {'probs': [[0.1, 0.5, 0.4], [0.2, 0.4, 0.4]]}),
)
def test_method(self, function_string, distr_params):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
super()._test_attribute(
attribute_string=function_string,
dist_kwargs=distr_params,
call_args=(),
assertion_fn=self.assertion_fn(rtol=2e-3))
@chex.all_variants
@parameterized.named_parameters(
('from 2d logits', {
'logits': [[0.0, 0.5, -0.5], [-0.2, 0.3, 0.5]],
}, [[0, 1, 0], [1, 0, 0]]),
('from 2d probs', {
'probs': [[0.1, 0.5, 0.4], [0.2, 0.4, 0.4]],
}, [[0, 1, 0], [1, 0, 0]]),
)
def test_cdf(self, distr_params, values):
distr_params = {k: jnp.asarray(v) for k, v in distr_params.items()}
values = np.array(values)
dist = self.distrax_cls(**distr_params)
if 'probs' in distr_params:
probs = distr_params['probs']
else:
probs = scipy.special.softmax(distr_params['logits'], axis=-1)
expected = np.sum(np.cumsum(probs, axis=-1) * values, axis=-1)
self.assertion_fn(rtol=2e-3)(self.variant(dist.cdf)(values), expected)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax'))
def test_with_two_distributions(self, function_string, mode_string):
super()._test_with_two_distributions(
attribute_string=function_string,
mode_string=mode_string,
dist1_kwargs={'probs': np.array([[0.1, 0.5, 0.4], [0.2, 0.4, 0.4]])},
dist2_kwargs={'logits': np.array([0.0, 0.1, 0.1]),},
assertion_fn=self.assertion_fn(rtol=2e-3))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax'))
def test_with_categorical_and_one_hot_categorical(
self, function_string, mode_string):
dist1_params = {'probs': np.array([[0.1, 0.5, 0.4], [0.2, 0.4, 0.4]])}
dist2_params = {'logits': np.array([0.0, 0.1, 0.1]),}
dist1 = categorical.Categorical(**dist1_params)
tfp_dist1 = tfd.Categorical(**dist1_params)
dist2 = one_hot_categorical.OneHotCategorical(**dist2_params)
tfp_dist2 = tfd.OneHotCategorical(**dist2_params)
distrax_fn_1 = self.variant(getattr(dist1, function_string))
distrax_fn_2 = self.variant(getattr(dist2, function_string))
if mode_string == 'distrax_to_distrax':
comp_dist1_dist2 = distrax_fn_1(dist2)
comp_dist2_dist1 = distrax_fn_2(dist1)
elif mode_string == 'distrax_to_tfp':
comp_dist1_dist2 = distrax_fn_1(tfp_dist2)
comp_dist2_dist1 = distrax_fn_2(tfp_dist1)
elif mode_string == 'tfp_to_distrax':
comp_dist1_dist2 = getattr(tfp_dist1, function_string)(dist2)
comp_dist2_dist1 = getattr(tfp_dist2, function_string)(dist1)
# The target values (obtained with TFP-only methods) are obtained with two
# distributions of the same class (namely, Categorical) because TFP doesn't
# register KLs of the form KL(Categorical || OneHotCategorical).
tfp_dist2_aux = tfd.Categorical(**dist2_params)
tfp_comp_dist1_dist2 = getattr(tfp_dist1, function_string)(tfp_dist2_aux)
tfp_comp_dist2_dist1 = getattr(tfp_dist2_aux, function_string)(tfp_dist1)
self.assertion_fn(rtol=2e-3)(comp_dist1_dist2, tfp_comp_dist1_dist2)
self.assertion_fn(rtol=2e-3)(comp_dist2_dist1, tfp_comp_dist2_dist1)
def test_jittable(self):
super()._test_jittable((np.zeros((3,)),))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
)
def test_slice(self, slice_):
logits = jnp.array(np.random.randn(3, 4, 5))
probs = jax.nn.softmax(jnp.array(np.random.randn(3, 4, 5)), axis=-1)
dist1 = self.distrax_cls(logits=logits)
dist2 = self.distrax_cls(probs=probs)
dist1_sliced = dist1[slice_]
dist2_sliced = dist2[slice_]
self.assertion_fn(rtol=2e-3)(
jax.nn.softmax(dist1_sliced.logits, axis=-1),
jax.nn.softmax(logits[slice_], axis=-1))
self.assertion_fn(rtol=2e-3)(dist2_sliced.probs, probs[slice_])
self.assertIsInstance(dist1_sliced, one_hot_categorical.OneHotCategorical)
self.assertIsInstance(dist2_sliced, one_hot_categorical.OneHotCategorical)
def test_slice_ellipsis(self):
logits = jnp.array(np.random.randn(4, 4, 5))
probs = jax.nn.softmax(jnp.array(np.random.randn(4, 4, 5)), axis=-1)
dist1 = self.distrax_cls(logits=logits)
dist2 = self.distrax_cls(probs=probs)
self.assertion_fn(rtol=2e-3)(
jax.nn.softmax(dist1[..., -1].logits, axis=-1),
jax.nn.softmax(logits[:, -1], axis=-1))
self.assertion_fn(rtol=2e-3)(dist2[..., -1].probs, probs[:, -1])
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/one_hot_categorical_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gamma distribution."""
from typing import Tuple, Union
import chex
from distrax._src.distributions import distribution
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
PRNGKey = chex.PRNGKey
EventT = distribution.EventT
class Gamma(distribution.Distribution):
"""Gamma distribution with parameters `concentration` and `rate`."""
equiv_tfp_cls = tfd.Gamma
def __init__(self, concentration: Numeric, rate: Numeric):
"""Initializes a Gamma distribution.
Args:
concentration: Concentration parameter of the distribution.
rate: Inverse scale params of the distribution.
"""
super().__init__()
self._concentration = conversion.as_float_array(concentration)
self._rate = conversion.as_float_array(rate)
self._batch_shape = jax.lax.broadcast_shapes(
self._concentration.shape, self._rate.shape)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return self._batch_shape
@property
def concentration(self) -> Array:
"""Concentration of the distribution."""
return jnp.broadcast_to(self._concentration, self.batch_shape)
@property
def rate(self) -> Array:
"""Inverse scale of the distribution."""
return jnp.broadcast_to(self._rate, self.batch_shape)
def _sample_from_std_gamma(self, key: PRNGKey, n: int) -> Array:
out_shape = (n,) + self.batch_shape
dtype = jnp.result_type(self._concentration, self._rate)
return jax.random.gamma(
key, a=self._concentration, shape=out_shape, dtype=dtype
)
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
rnd = self._sample_from_std_gamma(key, n)
return rnd / self._rate
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return (
self._concentration * jnp.log(self._rate)
+ (self._concentration - 1) * jnp.log(value)
- self._rate * value
- jax.lax.lgamma(self._concentration)
)
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
log_rate = jnp.log(self._rate)
return (
self._concentration
- log_rate
+ jax.lax.lgamma(self._concentration)
+ (1.0 - self._concentration) * jax.lax.digamma(self._concentration)
)
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
return jax.lax.igamma(self._concentration, self._rate * value)
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return jnp.log(self.cdf(value))
def mean(self) -> Array:
"""Calculates the mean."""
return self._concentration / self._rate
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return jnp.sqrt(self._concentration) / self._rate
def variance(self) -> Array:
"""Calculates the variance."""
return self._concentration / jnp.square(self._rate)
def mode(self) -> Array:
"""Calculates the mode."""
mode = (self._concentration - 1.0) / self._rate
return jnp.where(self._concentration >= 1.0, mode, jnp.nan)
def __getitem__(self, index) -> 'Gamma':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Gamma(
concentration=self.concentration[index], rate=self.rate[index])
def _kl_divergence_gamma_gamma(
dist1: Union[Gamma, tfd.Gamma],
dist2: Union[Gamma, tfd.Gamma],
*unused_args,
**unused_kwargs,
) -> Array:
"""Batched KL divergence KL(dist1 || dist2) between two Gamma distributions.
Args:
dist1: A Gamma distribution.
dist2: A Gamma distribution.
Returns:
Batchwise `KL(dist1 || dist2)`.
"""
t1 = dist2.concentration * (jnp.log(dist1.rate) - jnp.log(dist2.rate))
t2 = jax.lax.lgamma(dist2.concentration) - jax.lax.lgamma(dist1.concentration)
t3 = (dist1.concentration - dist2.concentration) * jax.lax.digamma(
dist1.concentration)
t4 = (dist2.rate - dist1.rate) * (dist1.concentration / dist1.rate)
return t1 + t2 + t3 + t4
# Register the KL functions with TFP.
tfd.RegisterKL(Gamma, Gamma)(_kl_divergence_gamma_gamma)
tfd.RegisterKL(Gamma, Gamma.equiv_tfp_cls)(_kl_divergence_gamma_gamma)
tfd.RegisterKL(Gamma.equiv_tfp_cls, Gamma)(_kl_divergence_gamma_gamma)
| distrax-master | distrax/_src/distributions/gamma.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution representing a Bijector applied to a Distribution."""
from typing import Optional, Tuple, Union
from distrax._src.bijectors import bijector as bjct_base
from distrax._src.distributions import distribution as dist_base
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
PRNGKey = dist_base.PRNGKey
Array = dist_base.Array
DistributionLike = dist_base.DistributionLike
BijectorLike = bjct_base.BijectorLike
EventT = dist_base.EventT
class Transformed(dist_base.Distribution):
"""Distribution of a random variable transformed by a bijective function.
Let `X` be a continuous random variable and `Y = f(X)` be a random variable
transformed by a differentiable bijection `f` (a "bijector"). Given the
distribution of `X` (the "base distribution") and the bijector `f`, this class
implements the distribution of `Y` (also known as the pushforward of the base
distribution through `f`).
The probability density of `Y` can be computed by:
`log p(y) = log p(x) - log|det J(f)(x)|`
where `p(x)` is the probability density of `X` (the "base density") and
`J(f)(x)` is the Jacobian matrix of `f`, both evaluated at `x = f^{-1}(y)`.
Sampling from a Transformed distribution involves two steps: sampling from the
base distribution `x ~ p(x)` and then evaluating `y = f(x)`. The first step
is agnostic to the possible batch dimensions of the bijector `f(x)`. For
example:
```
dist = distrax.Normal(loc=0., scale=1.)
bij = distrax.ScalarAffine(shift=jnp.asarray([3., 3., 3.]))
transformed_dist = distrax.Transformed(distribution=dist, bijector=bij)
samples = transformed_dist.sample(seed=0, sample_shape=())
print(samples) # [2.7941577, 2.7941577, 2.7941577]
```
Note: the `batch_shape`, `event_shape`, and `dtype` properties of the
transformed distribution, as well as the `kl_divergence` method, are computed
on-demand via JAX tracing when requested. This assumes that the `forward`
function of the bijector is traceable; that is, it is a pure function that
does not contain run-time branching. Functions that do not strictly meet this
requirement can still be used, but we cannot guarantee that the shapes, dtype,
and KL computations involving the transformed distribution can be correctly
obtained.
"""
equiv_tfp_cls = tfd.TransformedDistribution
def __init__(self, distribution: DistributionLike, bijector: BijectorLike):
"""Initializes a Transformed distribution.
Args:
distribution: the base distribution. Can be either a Distrax distribution
or a TFP distribution.
bijector: a differentiable bijective transformation. Can be a Distrax
bijector, a TFP bijector, or a callable to be wrapped by `Lambda`.
"""
super().__init__()
distribution = conversion.as_distribution(distribution)
bijector = conversion.as_bijector(bijector)
event_shape = distribution.event_shape
# Check if event shape is a tuple of integers (i.e. not nested).
if not (isinstance(event_shape, tuple) and
all(isinstance(i, int) for i in event_shape)):
raise ValueError(
f"'Transformed' currently only supports distributions with Array "
f"events (i.e. not nested). Received '{distribution.name}' with "
f"event shape '{distribution.event_shape}'.")
if len(event_shape) != bijector.event_ndims_in:
raise ValueError(
f"Base distribution '{distribution.name}' has event shape "
f"{distribution.event_shape}, but bijector '{bijector.name}' expects "
f"events to have {bijector.event_ndims_in} dimensions. Perhaps use "
f"`distrax.Block` or `distrax.Independent`?")
self._distribution = distribution
self._bijector = bijector
self._batch_shape = None
self._event_shape = None
self._dtype = None
@property
def distribution(self):
"""The base distribution."""
return self._distribution
@property
def bijector(self):
"""The bijector representing the transformation."""
return self._bijector
def _infer_shapes_and_dtype(self):
"""Infer the batch shape, event shape, and dtype by tracing `forward`."""
dummy_shape = self.distribution.batch_shape + self.distribution.event_shape
dummy = jnp.zeros(dummy_shape, dtype=self.distribution.dtype)
shape_dtype = jax.eval_shape(self.bijector.forward, dummy)
self._dtype = shape_dtype.dtype
if self.bijector.event_ndims_out == 0:
self._event_shape = ()
self._batch_shape = shape_dtype.shape
else:
# pylint: disable-next=invalid-unary-operand-type
self._event_shape = shape_dtype.shape[-self.bijector.event_ndims_out:]
# pylint: disable-next=invalid-unary-operand-type
self._batch_shape = shape_dtype.shape[:-self.bijector.event_ndims_out]
@property
def dtype(self) -> jnp.dtype:
"""See `Distribution.dtype`."""
if self._dtype is None:
self._infer_shapes_and_dtype()
assert self._dtype is not None # By _infer_shapes_and_dtype()
return self._dtype
@property
def event_shape(self) -> Tuple[int, ...]:
"""See `Distribution.event_shape`."""
if self._event_shape is None:
self._infer_shapes_and_dtype()
assert self._event_shape is not None # By _infer_shapes_and_dtype()
return self._event_shape
@property
def batch_shape(self) -> Tuple[int, ...]:
"""See `Distribution.batch_shape`."""
if self._batch_shape is None:
self._infer_shapes_and_dtype()
assert self._batch_shape is not None # By _infer_shapes_and_dtype()
return self._batch_shape
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
x, ildj_y = self.bijector.inverse_and_log_det(value)
lp_x = self.distribution.log_prob(x)
lp_y = lp_x + ildj_y
return lp_y
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""Returns `n` samples."""
x = self.distribution.sample(seed=key, sample_shape=n)
y = jax.vmap(self.bijector.forward)(x)
return y
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""Returns `n` samples and their log probs.
This function is more efficient than calling `sample` and `log_prob`
separately, because it uses only the forward methods of the bijector. It
also works for bijectors that don't implement inverse methods.
Args:
key: PRNG key.
n: Number of samples to generate.
Returns:
A tuple of `n` samples and their log probs.
"""
x, lp_x = self.distribution.sample_and_log_prob(seed=key, sample_shape=n)
y, fldj = jax.vmap(self.bijector.forward_and_log_det)(x)
lp_y = jax.vmap(jnp.subtract)(lp_x, fldj)
return y, lp_y
def mean(self) -> Array:
"""Calculates the mean."""
if self.bijector.is_constant_jacobian:
return self.bijector.forward(self.distribution.mean())
else:
raise NotImplementedError(
"`mean` is not implemented for this transformed distribution, "
"because its bijector's Jacobian is not known to be constant.")
def mode(self) -> Array:
"""Calculates the mode."""
if self.bijector.is_constant_log_det:
return self.bijector.forward(self.distribution.mode())
else:
raise NotImplementedError(
"`mode` is not implemented for this transformed distribution, "
"because its bijector's Jacobian determinant is not known to be "
"constant.")
def entropy( # pylint: disable=arguments-differ
self,
input_hint: Optional[Array] = None) -> Array:
"""Calculates the Shannon entropy (in Nats).
Only works for bijectors with constant Jacobian determinant.
Args:
input_hint: an example sample from the base distribution, used to compute
the constant forward log-determinant. If not specified, it is computed
using a zero array of the shape and dtype of a sample from the base
distribution.
Returns:
the entropy of the distribution.
Raises:
NotImplementedError: if bijector's Jacobian determinant is not known to be
constant.
"""
if self.bijector.is_constant_log_det:
if input_hint is None:
shape = self.distribution.batch_shape + self.distribution.event_shape
input_hint = jnp.zeros(shape, dtype=self.distribution.dtype)
entropy = self.distribution.entropy()
fldj = self.bijector.forward_log_det_jacobian(input_hint)
return entropy + fldj
else:
raise NotImplementedError(
"`entropy` is not implemented for this transformed distribution, "
"because its bijector's Jacobian determinant is not known to be "
"constant.")
def _kl_divergence_transformed_transformed(
dist1: Union[Transformed, tfd.TransformedDistribution],
dist2: Union[Transformed, tfd.TransformedDistribution],
*unused_args,
input_hint: Optional[Array] = None,
**unused_kwargs,
) -> Array:
"""Obtains the KL divergence between two Transformed distributions.
This computes the KL divergence between two Transformed distributions with the
same bijector. If the two Transformed distributions do not have the same
bijector, an error is raised. To determine if the bijectors are equal, this
method proceeds as follows:
- If both bijectors are the same instance of a Distrax bijector, then they are
declared equal.
- If not the same instance, we check if they are equal according to their
`same_as` predicate.
- Otherwise, the string representation of the Jaxpr of the `forward` method
of each bijector is compared. If both string representations are equal, the
bijectors are declared equal.
- Otherwise, the bijectors cannot be guaranteed to be equal and an error is
raised.
Args:
dist1: A Transformed distribution.
dist2: A Transformed distribution.
input_hint: an example sample from the base distribution, used to trace the
`forward` method. If not specified, it is computed using a zero array of
the shape and dtype of a sample from the base distribution.
Returns:
Batchwise `KL(dist1 || dist2)`.
Raises:
NotImplementedError: If bijectors are not known to be equal.
ValueError: If the base distributions do not have the same `event_shape`.
"""
if dist1.distribution.event_shape != dist2.distribution.event_shape:
raise ValueError(
f"The two base distributions do not have the same event shape: "
f"{dist1.distribution.event_shape} and "
f"{dist2.distribution.event_shape}.")
bij1 = conversion.as_bijector(dist1.bijector) # conversion needed for TFP
bij2 = conversion.as_bijector(dist2.bijector)
# Check if the bijectors are different.
if bij1 != bij2 and not bij1.same_as(bij2):
if input_hint is None:
input_hint = jnp.zeros(
dist1.distribution.event_shape, dtype=dist1.distribution.dtype)
jaxpr_bij1 = jax.make_jaxpr(bij1.forward)(input_hint).jaxpr
jaxpr_bij2 = jax.make_jaxpr(bij2.forward)(input_hint).jaxpr
if str(jaxpr_bij1) != str(jaxpr_bij2):
raise NotImplementedError(
f"The KL divergence cannot be obtained because it is not possible to "
f"guarantee that the bijectors {dist1.bijector.name} and "
f"{dist2.bijector.name} of the Transformed distributions are "
f"equal. If possible, use the same instance of a Distrax bijector.")
return dist1.distribution.kl_divergence(dist2.distribution)
# Register the KL functions with TFP.
tfd.RegisterKL(Transformed, Transformed)(_kl_divergence_transformed_transformed)
tfd.RegisterKL(Transformed.equiv_tfp_cls, Transformed)(
_kl_divergence_transformed_transformed)
tfd.RegisterKL(Transformed, Transformed.equiv_tfp_cls)(
_kl_divergence_transformed_transformed)
| distrax-master | distrax/_src/distributions/transformed.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `dirichlet.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import beta
from distrax._src.distributions import dirichlet
from distrax._src.utils import equivalence
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
class DirichletTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(dirichlet.Dirichlet)
@parameterized.named_parameters(
('1d params', (3,), ()),
('2d params', (4, 3), (4,)),
)
def test_properties(self, concentration_shape, batch_shape):
rng = np.random.default_rng(42)
concentration = 0.1 + rng.uniform(size=concentration_shape)
dist = self.distrax_cls(concentration)
self.assertEqual(dist.event_shape, (3,))
self.assertEqual(dist.batch_shape, batch_shape)
self.assertion_fn(rtol=1e-4)(dist.concentration, concentration)
@parameterized.named_parameters(
('0d params', ()),
('1d params with K=1', (1,)),
('2d params with K=1', (4, 1)),
)
def test_raises_on_wrong_concentration(self, concentration_shape):
rng = np.random.default_rng(42)
concentration = 0.1 + rng.uniform(size=concentration_shape)
with self.assertRaises(ValueError):
self.distrax_cls(concentration)
@chex.all_variants
@parameterized.named_parameters(
('1d params, no shape', (3,), ()),
('1d params, int shape', (3,), 4),
('1d params, 1-tuple shape', (3,), (4,)),
('1d params, 2-tuple shape', (3,), (5, 4)),
('2d params, no shape', (2, 3), ()),
('2d params, int shape', (2, 3), 4),
('2d params, 1-tuple shape', (2, 3), (4,)),
('2d params, 2-tuple shape', (2, 3), (5, 4)),
)
def test_sample_shape(self, concentration_shape, sample_shape):
rng = np.random.default_rng(42)
concentration = 0.1 + rng.uniform(size=concentration_shape)
super()._test_sample_shape((concentration,), dict(), sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('float32', jnp.float32),
('float16', jnp.float16))
def test_sample_dtype(self, dtype):
dist = self.distrax_cls(concentration=jnp.ones((3,), dtype=dtype))
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
self.assertEqual(samples.dtype, dtype)
@chex.all_variants
@parameterized.named_parameters(
('1d params, no shape', (3,), ()),
('1d params, int shape', (3,), 4),
('1d params, 1-tuple shape', (3,), (4,)),
('1d params, 2-tuple shape', (3,), (5, 4)),
('2d params, no shape', (2, 3), ()),
('2d params, int shape', (2, 3), 4),
('2d params, 1-tuple shape', (2, 3), (4,)),
('2d params, 2-tuple shape', (2, 3), (5, 4)),
)
def test_sample_and_log_prob(self, concentration_shape, sample_shape):
rng = np.random.default_rng(42)
concentration = 0.1 + rng.uniform(size=concentration_shape)
super()._test_sample_and_log_prob(
dist_args=(concentration,),
dist_kwargs=dict(),
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants
@parameterized.named_parameters(
('1d params, 1d value', (3,), (3,)),
('1d params, 2d value', (3,), (4, 3)),
('2d params, 1d value', (4, 3), (3,)),
('2d params, 2d value', (4, 3), (4, 3)),
('2d params, 3d value', (4, 3), (5, 4, 3)),
)
def test_methods_with_value(self, concentration_shape, value_shape):
rng = np.random.default_rng(42)
concentration = np.abs(rng.normal(size=concentration_shape))
value = rng.uniform(size=value_shape)
value /= np.sum(value, axis=-1, keepdims=True)
for method in ['prob', 'log_prob']:
with self.subTest(method=method):
super()._test_attribute(
attribute_string=method,
dist_args=(concentration,),
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('1d params', (3,)),
('2d params', (4, 3)),
)
def test_method(self, concentration_shape):
rng = np.random.default_rng(42)
concentration = np.abs(rng.normal(size=concentration_shape))
for method in ['entropy', 'mean', 'variance', 'stddev', 'covariance']:
with self.subTest(method=method):
super()._test_attribute(
attribute_string=method,
dist_args=(concentration,),
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('without nans', (2., 3., 4.), (1. / 6., 2. / 6., 3. / 6.)),
('with nans', (0.5, 3., 4.), np.nan),
)
def test_mode(self, concentration, expected_result):
dist = self.distrax_cls(concentration)
result = self.variant(dist.mode)()
if np.any(np.isnan(expected_result)):
self.assertTrue(jnp.all(jnp.isnan(result)))
else:
self.assertion_fn(rtol=1e-3)(result, expected_result)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('distrax_to_distrax', 'distrax_to_distrax'),
('distrax_to_tfp', 'distrax_to_tfp'),
('tfp_to_distrax', 'tfp_to_distrax'),
)
def test_with_two_distributions(self, mode_string):
rng = np.random.default_rng(42)
concentration1 = np.abs(rng.normal(size=(4, 3, 2))).astype(np.float32)
concentration2 = np.abs(rng.normal(size=(3, 2))).astype(np.float32)
for method in ['kl_divergence', 'cross_entropy']:
with self.subTest(method=method):
super()._test_with_two_distributions(
attribute_string=method,
mode_string=mode_string,
dist1_kwargs={'concentration': concentration1},
dist2_kwargs={'concentration': concentration2},
assertion_fn=self.assertion_fn(rtol=3e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('distrax_to_distrax', 'distrax_to_distrax'),
('distrax_to_tfp', 'distrax_to_tfp'),
('tfp_to_distrax', 'tfp_to_distrax'),
)
def test_with_two_distributions_dirichlet_beta(self, mode_string):
rng = np.random.default_rng(42)
# Distribution 1 is Dirichlet.
alpha1 = rng.uniform(size=(4, 3)).astype(np.float32)
beta1 = rng.uniform(size=(4, 3)).astype(np.float32)
concentration1 = np.stack((alpha1, beta1), axis=-1)
distrax_dist1 = self.distrax_cls(concentration1)
tfp_dist1 = tfd.Dirichlet(concentration1)
# Distribution 2 is Beta.
alpha2 = rng.uniform(size=(3,)).astype(np.float32)
beta2 = rng.uniform(size=(3,)).astype(np.float32)
distrax_dist2 = beta.Beta(alpha2, beta2)
tfp_dist2 = tfd.Beta(alpha2, beta2)
for method in ['kl_divergence', 'cross_entropy']:
with self.subTest(method=method):
# Expected results are computed using TFP Beta-to-Beta KL divergence.
expected_result_1 = getattr(tfd.Beta(alpha1, beta1), method)(tfp_dist2)
expected_result_2 = getattr(tfp_dist2, method)(tfd.Beta(alpha1, beta1))
if mode_string == 'distrax_to_distrax':
result1 = self.variant(getattr(distrax_dist1, method))(distrax_dist2)
result2 = self.variant(getattr(distrax_dist2, method))(distrax_dist1)
elif mode_string == 'distrax_to_tfp':
result1 = self.variant(getattr(distrax_dist1, method))(tfp_dist2)
result2 = self.variant(getattr(distrax_dist2, method))(tfp_dist1)
elif mode_string == 'tfp_to_distrax':
result1 = self.variant(getattr(tfp_dist1, method))(distrax_dist2)
result2 = self.variant(getattr(tfp_dist2, method))(distrax_dist1)
self.assertion_fn(rtol=3e-2)(result1, expected_result_1)
self.assertion_fn(rtol=3e-2)(result2, expected_result_2)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('dirichlet_dirichlet', dirichlet.Dirichlet),
('beta_dirichlet', beta.Beta),
)
def test_kl_raises_on_wrong_dims(self, dist2_type):
rng = np.random.default_rng(42)
# Distribution 1 is Dirichlet.
concentration1 = np.abs(rng.normal(size=(5, 3)))
dist1 = self.distrax_cls(concentration1)
# Distribution 2 is either Dirichlet or Beta.
if dist2_type is dirichlet.Dirichlet:
dist2_kwargs = {'concentration': rng.uniform(size=(5, 4))}
elif dist2_type is beta.Beta:
dist2_kwargs = {'alpha': rng.uniform(size=(5,)),
'beta': rng.uniform(size=(5,))}
dist2 = dist2_type(**dist2_kwargs)
with self.assertRaises(ValueError):
self.variant(dist1.kl_divergence)(dist2)
with self.assertRaises(ValueError):
self.variant(dist2.kl_divergence)(dist1)
def test_jitable(self):
rng = np.random.default_rng(42)
concentration = np.abs(rng.normal(size=(4,)))
super()._test_jittable(
(concentration,), assertion_fn=self.assertion_fn(rtol=1e-3))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
)
def test_slice(self, slice_):
rng = np.random.default_rng(42)
concentration = np.abs(rng.normal(size=(6, 5, 4)))
dist = self.distrax_cls(concentration)
self.assertIsInstance(dist, self.distrax_cls)
self.assertion_fn(rtol=1e-3)(
dist[slice_].concentration, concentration[slice_])
def test_slice_ellipsis(self):
rng = np.random.default_rng(42)
concentration = np.abs(rng.normal(size=(6, 5, 4)))
dist = self.distrax_cls(concentration)
self.assertIsInstance(dist, self.distrax_cls)
self.assertion_fn(rtol=1e-3)(
dist[..., -1].concentration, concentration[:, -1, :])
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/dirichlet_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `quantized.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import independent
from distrax._src.distributions import quantized
from distrax._src.distributions import uniform
from distrax._src.utils import equivalence
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
class QuantizedTFPUniform(equivalence.EquivalenceTest):
"""Class to test distrax quantized distribution against TFP.
The quantized distribution takes a distribution as input. These can be either
distrax or TFP distributions. In this test, we set the base distribution to
be a TFP uniform one. Subsequent tests evaluate other combinations.
"""
def _make_tfp_base_distribution(self):
return tfd.Uniform(0., 100.)
_make_distrax_base_distribution = _make_tfp_base_distribution
def setUp(self):
super().setUp()
self._init_distr_cls(quantized.Quantized)
self.tfd_base_distribution = self._make_tfp_base_distribution()
self.distrax_base_distribution = self._make_distrax_base_distribution()
def test_event_shape(self):
super()._test_event_shape((self.distrax_base_distribution,),
dict(),
tfp_dist_args=(self.tfd_base_distribution,))
def test_batch_shape(self):
super()._test_batch_shape((self.distrax_base_distribution,),
dict(),
tfp_dist_args=(self.tfd_base_distribution,))
def test_low_and_high(self):
distr_params = (uniform.Uniform(0., 100.), 0.5, 90.5)
dist = self.distrax_cls(*distr_params)
self.assertion_fn(rtol=1e-2)(dist.low, 1.)
self.assertion_fn(rtol=1e-2)(dist.high, 90.)
@chex.all_variants
@parameterized.named_parameters(
('empty shape', ()),
('int shape', 10),
('2-tuple shape', (10, 20)),
)
def test_sample_shape(self, sample_shape):
super()._test_sample_shape((self.distrax_base_distribution,),
dict(),
tfp_dist_args=(self.tfd_base_distribution,),
sample_shape=sample_shape)
@chex.all_variants
def test_sample_dtype(self):
dist = self.distrax_cls(self.distrax_base_distribution)
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(dist.dtype, samples.dtype)
self.assertEqual(dist.dtype, self.distrax_base_distribution.dtype)
@chex.all_variants
@parameterized.named_parameters(
('no cutoffs, no shape', (None, None), ()),
('noop cutoffs, no shape', (-10., 200.), ()),
('low cutoff, no shape', (10., None), ()),
('high cutoff, no shape', (None, 50.), ()),
('both cutoffs, no shape', (10., 50.), ()),
('both cutoffs, int shape', (10., 50.), 5),
('no cutoffs, 2-tuple shape', (None, None), (5, 4)),
('noop cutoffs, 2-tuple shape', (-10., 200.), (5, 4)),
('low cutoff, 2-tuple shape', (10., None), (5, 4)),
('high cutoff, 2-tuple shape', (None, 50.), (5, 4)),
('both cutoff, 2-tuple shape', (10., 50.), (5, 4)),
)
def test_sample_and_log_prob(self, distr_params, sample_shape):
super()._test_sample_and_log_prob(
dist_args=(self.distrax_base_distribution,) + distr_params,
dist_kwargs=dict(),
tfp_dist_args=(self.tfd_base_distribution,) + distr_params,
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants
@parameterized.named_parameters(
('no cutoffs, integer', (None, None), 20),
('noop cutoffs, integer', (-10., 200.), 20),
('low cutoff, integer', (10., None), 20),
('high cutoff, integer', (None, 50.), 20),
('both cutoffs, integer', (10., 50.), 20),
('both cutoffs, integer greater than cutoff', (10., 50.), 70),
('both cutoffs, integer smaller than cutoff', (10., 50.), 5),
('both cutoffs, 1-d array', (10., 50.), np.array([20, 30])),
('no cutoffs, 1-d array', (None, None), np.array([20, 30])),
('noop cutoffs, 1-d array', (-10., 200.), np.array([20, 30])),
('low cutoffs, 1-d array', (10., None), np.array([20, 30])),
('high cutoffs, 1-d array', (None, 50.), np.array([20, 30])),
)
def test_method_with_value(self, distr_params, value):
for method in ['log_cdf', 'cdf', 'prob', 'survival_function',
'log_survival_function']:
with self.subTest(method):
super()._test_attribute(
attribute_string=method,
dist_args=(self.distrax_base_distribution,) + distr_params,
tfp_dist_args=(self.tfd_base_distribution,) + distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants
@parameterized.named_parameters(
('no cutoffs, integer', (None, None), 20),
('noop cutoffs, integer', (-10., 200.), 20),
('low cutoff, integer', (10., None), 20),
('high cutoff, integer', (None, 50.), 20),
('both cutoffs, integer', (10., 50.), 20),
('both cutoffs, 1-d array', (10., 50.), np.array([20, 30])),
('no cutoffs, 1-d array', (None, None), np.array([20, 30])),
('noop cutoffs, 1-d array', (-10., 200.), np.array([20, 30])),
('low cutoffs, 1-d array', (10., None), np.array([20, 30])),
('high cutoffs, 1-d array', (None, 50.), np.array([20, 30])),
)
def test_log_prob(self, distr_params, value):
"""Tests the `log_prob`.
We separate this test from `test_method_with_value` because the options
where `value` is outside the cutoff return `nan` in TFP but `-inf` in
Distrax.
Args:
distr_params: Parameters of the distribution.
value: The value where the `log_prob` is evaluated.
"""
super()._test_attribute(
attribute_string='log_prob',
dist_args=(self.distrax_base_distribution,) + distr_params,
tfp_dist_args=(self.tfd_base_distribution,) + distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants
@parameterized.named_parameters(
('below low', 10., 90., 8),
('above high', 10., 90., 95),
('below support', None, None, -10),
('above support', None, None, 101),
('within support, non-integer', None, None, 40.5),
)
def test_edge_cases(self, low, high, value):
distr_params = {
'distribution': self.distrax_base_distribution,
'low': low,
'high': high,
}
dist = self.distrax_cls(**distr_params)
np.testing.assert_allclose(self.variant(dist.log_prob)(value), -np.inf)
np.testing.assert_allclose(self.variant(dist.prob)(value), 0.)
@parameterized.named_parameters(
('low with cutoffs', (10., 50.), 'low'),
('high with cutoffs', (10., 50.), 'high'),
)
def test_method(self, distr_params, function_string):
super()._test_attribute(
attribute_string=function_string,
dist_args=(self.distrax_base_distribution,) + distr_params,
dist_kwargs=dict(),
tfp_dist_args=(self.tfd_base_distribution,) + distr_params,
assertion_fn=self.assertion_fn(rtol=1e-2))
def test_jittable(self):
super()._test_jittable((self.tfd_base_distribution, 0., 1.))
class QuantizedDistraxUniform(QuantizedTFPUniform):
def _make_distrax_base_distribution(self):
return uniform.Uniform(0., 100.)
def test_jittable(self):
super()._test_jittable((self.distrax_base_distribution, 0., 1.))
class QuantizedTFPUniform2D(equivalence.EquivalenceTest):
"""Class to test distrax quantized distribution against TFP.
The quantized distribution takes a distribution as input. These can be either
distrax or TFP distributions. In this test, we set the base distribution to
be a TFP uniform one with `batch_shape == (2,)`. Subsequent tests evaluate
other combinations.
"""
def _make_tfp_base_distribution(self):
return tfd.Uniform(low=[0., 10.], high=[100., 90.])
_make_distrax_base_distribution = _make_tfp_base_distribution
def setUp(self):
super().setUp()
self._init_distr_cls(quantized.Quantized)
self.tfd_base_distribution = self._make_tfp_base_distribution()
self.distrax_base_distribution = self._make_distrax_base_distribution()
def test_event_shape(self):
kwargs = {
'low': np.array([10., 30.], dtype=np.float32),
'high': np.array([80., 70.], dtype=np.float32),
}
super()._test_event_shape(dist_args=(self.distrax_base_distribution,),
dist_kwargs=kwargs,
tfp_dist_args=(self.tfd_base_distribution,))
def test_batch_shape(self):
kwargs = {
'low': np.array([10., 30.], dtype=np.float32),
'high': np.array([80., 70.], dtype=np.float32),
}
super()._test_batch_shape(dist_args=(self.distrax_base_distribution,),
dist_kwargs=kwargs,
tfp_dist_args=(self.tfd_base_distribution,))
@chex.all_variants
@parameterized.named_parameters(
('empty shape', ()),
('int shape', 10),
('2-tuple shape', (10, 20)),
)
def test_sample_shape(self, sample_shape):
kwargs = {
'low': np.array([10., 30.], dtype=np.float32),
'high': np.array([80., 70.], dtype=np.float32),
}
super()._test_sample_shape(dist_args=(self.distrax_base_distribution,),
dist_kwargs=kwargs,
tfp_dist_args=(self.tfd_base_distribution,),
sample_shape=sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('no cutoffs, no shape', (None, None), ()),
('scalar cutoffs, no shape', (20., 50.), ()),
('1d cutoffs, no shape', ([10., 10.], [20., 80.]), ()),
('mixed cutoffs, no shape', ([10., 10.], 20.), ()),
('no cutoffs, int shape', (None, None), 10),
('scalar cutoffs, int shape', (20., 50.), 10),
('1d cutoffs, int shape', ([10., 10.], [20., 80.]), 10),
('mixed cutoffs, int shape', ([10., 10.], 20.), 10),
('no cutoffs, 1d shape', (None, None), [10]),
('scalar cutoffs, 1d shape', (20., 50.), [10]),
('1d cutoffs, 1d shape', ([10., 10.], [20., 80.]), [10]),
('mixed cutoffs, 1d shape', ([10., 10.], 20.), [10]),
)
def test_sample_and_log_prob(self, distr_params, sample_shape):
distr_params = tuple(map(
lambda x: None if x is None else np.asarray(x, np.float32),
distr_params))
super()._test_sample_and_log_prob(
dist_args=(self.distrax_base_distribution,) + distr_params,
dist_kwargs=dict(),
tfp_dist_args=(self.tfd_base_distribution,) + distr_params,
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants
@parameterized.named_parameters(
('no cutoffs, scalar value', (None, None), 20),
('scalar cutoffs, scalar value', (20., 50.), 20),
('1d cutoffs, scalar value', ([10., 12.], [20., 80.]), 15),
('mixed cutoffs, scalar value', ([10., 15.], 20.), 18),
('no cutoffs, 1d value', (None, None), np.array([20, 30])),
('scalar cutoffs, 1d value', (20., 50.), np.array([20, 30])),
('1d cutoffs, 1d value', ([10., 20.], [20., 80.]), np.array([20, 20])),
('mixed cutoffs, 1d value', ([10., 15.], 20.), np.array([11, 20])),
('mixed cutoffs, 2d value',
([10., 15.], 80.), np.array([[15, 15], [10, 20], [15, 15]])),
)
def test_method_with_value(self, distr_params, value):
# For `prob`, `log_prob`, `survival_function`, and `log_survival_function`
# distrax and TFP agree on integer values. We do not test equivalence on
# non-integer values where they may disagree.
# We also do not test equivalence on values outside of the cutoff, where
# `log_prob` values can be different (`NaN` vs. `-jnp.inf`).
distr_params = tuple(map(
lambda x: None if x is None else np.asarray(x, np.float32),
distr_params))
for method in ['log_cdf', 'cdf', 'prob', 'log_prob', 'survival_function',
'log_survival_function']:
with self.subTest(method):
super()._test_attribute(
attribute_string=method,
dist_args=(self.distrax_base_distribution,) + distr_params,
tfp_dist_args=(self.tfd_base_distribution,) + distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=1e-2))
def test_jittable(self):
super()._test_jittable((self.tfd_base_distribution, 0., 1.))
class QuantizedDistraxUniform2D(QuantizedTFPUniform2D):
def _make_distrax_base_distribution(self):
return uniform.Uniform(
low=jnp.array([0.0, 10.0]), high=jnp.array([100.0, 90.0])
)
def test_jittable(self):
super()._test_jittable((self.distrax_base_distribution, 0., 1.))
class QuantizedInvalidParams(equivalence.EquivalenceTest):
"""Class to test invalid combinations of the input parameters."""
def setUp(self):
super().setUp()
self._init_distr_cls(quantized.Quantized)
def test_non_univariate(self):
self._test_raises_error(dist_kwargs={
'distribution': independent.Independent(
uniform.Uniform(np.array([0., 0.]), np.array([1., 1,])),
reinterpreted_batch_ndims=1),
})
def test_low_shape(self):
self._test_raises_error(dist_kwargs={
'distribution': uniform.Uniform(0., 1.),
'low': np.zeros((4,))
})
def test_high_shape(self):
self._test_raises_error(dist_kwargs={
'distribution': uniform.Uniform(0., 1.),
'high': np.ones((4,))
})
class QuantizedSlicingTest(parameterized.TestCase):
"""Class to test the `getitem` method."""
def setUp(self):
super().setUp()
self.uniform_low = np.random.randn(2, 3, 4)
self.uniform_high = self.uniform_low + np.abs(np.random.randn(2, 3, 4))
self.base = uniform.Uniform(self.uniform_low, self.uniform_high)
self.low = np.ceil(np.random.randn(3, 4))
self.high = np.floor(np.random.randn(3, 4))
self.dist = quantized.Quantized(self.base, self.low, self.high)
def assertion_fn(self, rtol):
return lambda x, y: np.testing.assert_allclose(x, y, rtol=rtol)
@parameterized.named_parameters(
('single element', 1, (3, 4)),
('range', slice(-1), (1, 3, 4)),
('range_2', (slice(None), slice(-1)), (2, 2, 4)),
('ellipsis', (Ellipsis, -1), (2, 3)),
)
def test_slice(self, slice_, expected_batch_shape):
sliced_dist = self.dist[slice_]
self.assertEqual(sliced_dist.batch_shape, expected_batch_shape)
self.assertEqual(sliced_dist.event_shape, self.dist.event_shape)
self.assertIsInstance(sliced_dist, quantized.Quantized)
self.assertIsInstance(sliced_dist.distribution, self.base.__class__)
self.assertion_fn(rtol=1e-2)(
sliced_dist.distribution.low, self.uniform_low[slice_])
self.assertion_fn(rtol=1e-2)(
sliced_dist.distribution.high, self.uniform_high[slice_])
self.assertion_fn(rtol=1e-2)(
sliced_dist.low,
np.broadcast_to(self.low, self.base.batch_shape)[slice_])
self.assertion_fn(rtol=1e-2)(
sliced_dist.high,
np.broadcast_to(self.high, self.base.batch_shape)[slice_])
class QuantizedSurvivalFunctionConsistencyTest(parameterized.TestCase):
"""Class to test whether `survival_function` = `1. - cdf`.
Test evaluates on both integer values and non-integer values.
"""
def setUp(self):
super().setUp()
self.base_distribution = uniform.Uniform(0., 10.)
self.values = np.linspace(-2., 12, num=57) # -2, -1.75, -1.5, ..., 12.
@chex.all_variants
@parameterized.named_parameters(
('no cutoffs', (None, None)),
('noop cutoffs', (-10., 20.)),
('low cutoff', (1., None)),
('high cutoff', (None, 5.)),
('both cutoffs', (1., 5.)),
)
def test_survival_function_cdf_consistency(self, dist_params):
dist = quantized.Quantized(self.base_distribution, *dist_params)
results = self.variant(
lambda x: dist.cdf(x) + dist.survival_function(x))(self.values)
np.testing.assert_allclose(results, np.ones_like(self.values), rtol=1e-2)
@chex.all_variants
@parameterized.named_parameters(
('no cutoffs', (None, None)),
('noop cutoffs', (-10., 20.)),
('low cutoff', (1., None)),
('high cutoff', (None, 5.)),
('both cutoffs', (1., 5.)),
)
def test_log_survival_function_log_cdf_consistency(self, dist_params):
def _sum_exps(dist, x):
return jnp.exp(dist.log_cdf(x)) + jnp.exp(dist.log_survival_function(x))
dist = quantized.Quantized(self.base_distribution, *dist_params)
results = self.variant(_sum_exps)(dist, self.values)
np.testing.assert_allclose(results, np.ones_like(self.values), rtol=1e-2)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/quantized_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `mvn_tri.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions.mvn_tri import MultivariateNormalTri
from distrax._src.utils import equivalence
import jax.numpy as jnp
import numpy as np
def _get_scale_tril_from_scale_triu(scale_triu: np.ndarray) -> np.ndarray:
scale_triu = np.triu(scale_triu)
scale_triu_t = np.vectorize(np.transpose, signature='(k,k)->(k,k)')(
scale_triu)
cov = np.matmul(scale_triu, scale_triu_t)
return np.linalg.cholesky(cov)
class MultivariateNormalTriTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(MultivariateNormalTri)
@parameterized.named_parameters(
('all inputs are None', {}),
('wrong dimension of loc', {
'loc': np.array(0.),
}),
('scale_tri is 0d', {
'scale_tri': np.array(1.),
}),
('scale_tri is 1d', {
'scale_tri': np.ones((4,)),
}),
('scale_tri is not square', {
'scale_tri': np.ones((4, 3)),
}),
('inconsistent loc and scale_tri', {
'loc': np.zeros((4,)),
'scale_tri': np.ones((5, 5)),
}),
)
def test_raises_on_wrong_inputs(self, dist_kwargs):
with self.assertRaises(ValueError):
self.distrax_cls(**dist_kwargs)
@parameterized.named_parameters(
('loc provided', {'loc': np.zeros((4,))}),
('scale_tri provided', {'scale_tri': np.eye(4)}),
)
def test_default_properties(self, dist_kwargs):
dist = self.distrax_cls(**dist_kwargs)
self.assertTrue(dist.is_lower)
self.assertion_fn(rtol=1e-3)(dist.loc, jnp.zeros((4,)))
self.assertion_fn(rtol=1e-3)(dist.scale_tri, jnp.eye(4))
@parameterized.named_parameters(
('unbatched', (), (4,), (4, 4), True),
('batched loc', (7,), (7, 4), (4, 4), True),
('batched scale_tri lower', (7,), (4,), (7, 4, 4), True),
('batched scale_tri upper', (7,), (4,), (7, 4, 4), False),
)
def test_properties(self, batch_shape, loc_shape, scale_tri_shape, is_lower):
rng = np.random.default_rng(2022)
loc = rng.normal(size=loc_shape)
scale_tri = rng.normal(size=scale_tri_shape)
dist = self.distrax_cls(loc=loc, scale_tri=scale_tri, is_lower=is_lower)
tri_fn = jnp.tril if is_lower else jnp.triu
self.assertEqual(dist.batch_shape, batch_shape)
self.assertEqual(dist.is_lower, is_lower)
self.assertion_fn(rtol=1e-3)(
dist.loc, jnp.broadcast_to(loc, batch_shape + (4,)))
self.assertion_fn(rtol=1e-3)(dist.scale_tri, jnp.broadcast_to(
tri_fn(scale_tri), batch_shape + (4, 4)))
@chex.all_variants
@parameterized.named_parameters(
('unbatched, no shape', (), (4,), (4, 4)),
('batched loc, no shape', (), (7, 4), (4, 4)),
('batched scale_tri, no shape', (), (4,), (7, 4, 4)),
('unbatched, with shape', (3,), (4,), (4, 4)),
('batched loc, with shape', (3,), (7, 4), (4, 4)),
('batched scale_tri, with shape', (3,), (4,), (7, 4, 4)),
)
def test_sample_shape(self, sample_shape, loc_shape, scale_tri_shape):
rng = np.random.default_rng(2022)
loc = rng.normal(size=loc_shape)
scale_tri = rng.normal(size=scale_tri_shape)
dist_kwargs = {'loc': loc, 'scale_tri': scale_tri}
tfp_dist_kwargs = {'loc': loc, 'scale_tril': scale_tri}
super()._test_sample_shape(
dist_args=(), dist_kwargs=dist_kwargs, tfp_dist_kwargs=tfp_dist_kwargs,
sample_shape=sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist_params = {
'loc': np.array([0., 0.], dtype),
'scale_tri': np.array([[1., 0.], [0., 1.]], dtype)}
dist = self.distrax_cls(**dist_params)
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
@chex.all_variants
@parameterized.named_parameters(
('unbatched, unbatched value', (4,), (4,), (4, 4), True),
('unbatched, unbatched value, upper', (4,), (4,), (4, 4), False),
('batched loc, unbatched value', (4,), (7, 4), (4, 4), True),
('batched scale_tri, unbatched value', (4,), (4,), (7, 4, 4), True),
('unbatched, batched value', (3, 7, 4), (4,), (4, 4), True),
('batched loc, batched value', (3, 7, 4), (7, 4), (4, 4), True),
('batched scale_tri, batched value', (3, 7, 4), (4,), (7, 4, 4), True),
('batched scale_tri, batched value, upper',
(3, 7, 4), (4,), (7, 4, 4), False),
)
def test_log_prob(self, value_shape, loc_shape, scale_tri_shape, is_lower):
rng = np.random.default_rng(2022)
loc = rng.normal(size=loc_shape)
scale_tri = rng.normal(size=scale_tri_shape)
value = rng.normal(size=value_shape)
dist_kwargs = {'loc': loc, 'scale_tri': scale_tri, 'is_lower': is_lower}
if is_lower:
tfp_dist_kwargs = {'loc': loc, 'scale_tril': scale_tri}
else:
scale_tril = _get_scale_tril_from_scale_triu(scale_tri)
tfp_dist_kwargs = {'loc': loc, 'scale_tril': scale_tril}
super()._test_attribute(
attribute_string='log_prob',
dist_kwargs=dist_kwargs,
tfp_dist_kwargs=tfp_dist_kwargs,
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=1e-3))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('unbatched', (4,), (4, 4)),
('batched loc', (7, 4), (4, 4)),
('batched scale_tri', (4,), (7, 4, 4)),
)
def test_method(self, loc_shape, scale_tri_shape):
rng = np.random.default_rng(2022)
loc = rng.normal(size=loc_shape)
scale_tri = rng.normal(size=scale_tri_shape)
for method in ['entropy', 'mean', 'stddev', 'variance',
'covariance', 'mode']:
for is_lower in [True, False]:
if method in ['stddev', 'covariance', 'variance']:
rtol = 2e-2 if is_lower else 5e-2
else:
rtol = 1e-3
dist_kwargs = {'loc': loc, 'scale_tri': scale_tri, 'is_lower': is_lower}
if is_lower:
tfp_dist_kwargs = {'loc': loc, 'scale_tril': scale_tri}
else:
scale_tril = _get_scale_tril_from_scale_triu(scale_tri)
tfp_dist_kwargs = {'loc': loc, 'scale_tril': scale_tril}
with self.subTest(method=method, is_lower=is_lower):
super()._test_attribute(
method,
dist_kwargs=dist_kwargs,
tfp_dist_kwargs=tfp_dist_kwargs,
assertion_fn=self.assertion_fn(rtol=rtol))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax'))
def test_with_two_distributions(self, function_string, mode_string):
rng = np.random.default_rng(2022)
loc1 = rng.normal(size=(5, 1, 4))
scale_tri1 = rng.normal(size=(3, 4, 4))
loc2 = rng.normal(size=(3, 4))
scale_tri2 = rng.normal(size=(4, 4))
for is_lower in [True, False]:
dist1_kwargs = {
'loc': loc1, 'scale_tri': scale_tri1, 'is_lower': is_lower}
dist2_kwargs = {
'loc': loc2, 'scale_tri': scale_tri2, 'is_lower': is_lower}
if is_lower:
tfp_dist1_kwargs = {'loc': loc1, 'scale_tril': scale_tri1}
tfp_dist2_kwargs = {'loc': loc2, 'scale_tril': scale_tri2}
else:
tfp_dist1_kwargs = {
'loc': loc1,
'scale_tril': _get_scale_tril_from_scale_triu(scale_tri1)
}
tfp_dist2_kwargs = {
'loc': loc2,
'scale_tril': _get_scale_tril_from_scale_triu(scale_tri2)
}
with self.subTest(is_lower=is_lower):
super()._test_with_two_distributions(
attribute_string=function_string,
mode_string=mode_string,
dist1_kwargs=dist1_kwargs,
dist2_kwargs=dist2_kwargs,
tfp_dist1_kwargs=tfp_dist1_kwargs,
tfp_dist2_kwargs=tfp_dist2_kwargs,
assertion_fn=self.assertion_fn(rtol=1e-3))
def test_jittable(self):
super()._test_jittable(
dist_kwargs={'loc': np.zeros((4,))},
assertion_fn=self.assertion_fn(rtol=1e-3))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
)
def test_slice(self, slice_):
rng = np.random.default_rng(2022)
loc = rng.normal(size=(6, 5, 4))
scale_tri = rng.normal(size=(4, 4))
for is_lower in [True, False]:
with self.subTest(is_lower=is_lower):
dist_kwargs = {'loc': loc, 'scale_tri': scale_tri, 'is_lower': is_lower}
dist = self.distrax_cls(**dist_kwargs)
self.assertEqual(dist[slice_].batch_shape, loc[slice_].shape[:-1])
self.assertEqual(dist[slice_].event_shape, dist.event_shape)
self.assertEqual(dist[slice_].is_lower, dist.is_lower)
self.assertion_fn(rtol=1e-3)(dist[slice_].mean(), loc[slice_])
def test_slice_ellipsis(self):
rng = np.random.default_rng(2022)
loc = rng.normal(size=(6, 5, 4))
scale_tri = rng.normal(size=(4, 4))
for is_lower in [True, False]:
with self.subTest(is_lower=is_lower):
dist_kwargs = {'loc': loc, 'scale_tri': scale_tri, 'is_lower': is_lower}
dist = self.distrax_cls(**dist_kwargs)
self.assertEqual(dist[..., -1].batch_shape, (6,))
self.assertEqual(dist[..., -1].event_shape, dist.event_shape)
self.assertEqual(dist[..., -1].is_lower, dist.is_lower)
self.assertion_fn(rtol=1e-3)(dist[..., -1].mean(), loc[:, -1, :])
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/mvn_tri_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple example of a flow model trained on MNIST."""
from typing import Any, Iterator, Mapping, Optional, Sequence, Tuple
from absl import app
from absl import flags
from absl import logging
import distrax
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import tensorflow_datasets as tfds
flags.DEFINE_integer("flow_num_layers", 8,
"Number of layers to use in the flow.")
flags.DEFINE_integer("mlp_num_layers", 2,
"Number of layers to use in the MLP conditioner.")
flags.DEFINE_integer("hidden_size", 500, "Hidden size of the MLP conditioner.")
flags.DEFINE_integer("num_bins", 4,
"Number of bins to use in the rational-quadratic spline.")
flags.DEFINE_integer("batch_size", 128,
"Batch size for training and evaluation.")
flags.DEFINE_float("learning_rate", 1e-4, "Learning rate for the optimizer.")
flags.DEFINE_integer("training_steps", 5000, "Number of training steps to run.")
flags.DEFINE_integer("eval_frequency", 100, "How often to evaluate the model.")
FLAGS = flags.FLAGS
Array = jnp.ndarray
PRNGKey = Array
Batch = Mapping[str, np.ndarray]
OptState = Any
MNIST_IMAGE_SHAPE = (28, 28, 1)
def make_conditioner(event_shape: Sequence[int],
hidden_sizes: Sequence[int],
num_bijector_params: int) -> hk.Sequential:
"""Creates an MLP conditioner for each layer of the flow."""
return hk.Sequential([
hk.Flatten(preserve_dims=-len(event_shape)),
hk.nets.MLP(hidden_sizes, activate_final=True),
# We initialize this linear layer to zero so that the flow is initialized
# to the identity function.
hk.Linear(
np.prod(event_shape) * num_bijector_params,
w_init=jnp.zeros,
b_init=jnp.zeros),
hk.Reshape(tuple(event_shape) + (num_bijector_params,), preserve_dims=-1),
])
def make_flow_model(event_shape: Sequence[int],
num_layers: int,
hidden_sizes: Sequence[int],
num_bins: int) -> distrax.Transformed:
"""Creates the flow model."""
# Alternating binary mask.
mask = jnp.arange(0, np.prod(event_shape)) % 2
mask = jnp.reshape(mask, event_shape)
mask = mask.astype(bool)
def bijector_fn(params: Array):
return distrax.RationalQuadraticSpline(
params, range_min=0., range_max=1.)
# Number of parameters for the rational-quadratic spline:
# - `num_bins` bin widths
# - `num_bins` bin heights
# - `num_bins + 1` knot slopes
# for a total of `3 * num_bins + 1` parameters.
num_bijector_params = 3 * num_bins + 1
layers = []
for _ in range(num_layers):
layer = distrax.MaskedCoupling(
mask=mask,
bijector=bijector_fn,
conditioner=make_conditioner(event_shape, hidden_sizes,
num_bijector_params))
layers.append(layer)
# Flip the mask after each layer.
mask = jnp.logical_not(mask)
# We invert the flow so that the `forward` method is called with `log_prob`.
flow = distrax.Inverse(distrax.Chain(layers))
base_distribution = distrax.Independent(
distrax.Uniform(
low=jnp.zeros(event_shape),
high=jnp.ones(event_shape)),
reinterpreted_batch_ndims=len(event_shape))
return distrax.Transformed(base_distribution, flow)
def load_dataset(split: tfds.Split, batch_size: int) -> Iterator[Batch]:
ds = tfds.load("mnist", split=split, shuffle_files=True)
ds = ds.shuffle(buffer_size=10 * batch_size)
ds = ds.batch(batch_size)
ds = ds.prefetch(buffer_size=5)
ds = ds.repeat()
return iter(tfds.as_numpy(ds))
def prepare_data(batch: Batch, prng_key: Optional[PRNGKey] = None) -> Array:
data = batch["image"].astype(np.float32)
if prng_key is not None:
# Dequantize pixel values {0, 1, ..., 255} with uniform noise [0, 1).
data += jax.random.uniform(prng_key, data.shape)
return data / 256. # Normalize pixel values from [0, 256) to [0, 1).
@hk.without_apply_rng
@hk.transform
def log_prob(data: Array) -> Array:
model = make_flow_model(
event_shape=data.shape[1:],
num_layers=FLAGS.flow_num_layers,
hidden_sizes=[FLAGS.hidden_size] * FLAGS.mlp_num_layers,
num_bins=FLAGS.num_bins)
return model.log_prob(data)
def loss_fn(params: hk.Params, prng_key: PRNGKey, batch: Batch) -> Array:
data = prepare_data(batch, prng_key)
# Loss is average negative log likelihood.
loss = -jnp.mean(log_prob.apply(params, data))
return loss
@jax.jit
def eval_fn(params: hk.Params, batch: Batch) -> Array:
data = prepare_data(batch) # We don't dequantize during evaluation.
loss = -jnp.mean(log_prob.apply(params, data))
return loss
def main(_):
optimizer = optax.adam(FLAGS.learning_rate)
@jax.jit
def update(params: hk.Params,
prng_key: PRNGKey,
opt_state: OptState,
batch: Batch) -> Tuple[hk.Params, OptState]:
"""Single SGD update step."""
grads = jax.grad(loss_fn)(params, prng_key, batch)
updates, new_opt_state = optimizer.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, new_opt_state
prng_seq = hk.PRNGSequence(42)
params = log_prob.init(next(prng_seq), np.zeros((1, *MNIST_IMAGE_SHAPE)))
opt_state = optimizer.init(params)
train_ds = load_dataset(tfds.Split.TRAIN, FLAGS.batch_size)
valid_ds = load_dataset(tfds.Split.TEST, FLAGS.batch_size)
for step in range(FLAGS.training_steps):
params, opt_state = update(params, next(prng_seq), opt_state,
next(train_ds))
if step % FLAGS.eval_frequency == 0:
val_loss = eval_fn(params, next(valid_ds))
logging.info("STEP: %5d; Validation loss: %.3f", step, val_loss)
if __name__ == "__main__":
app.run(main)
| distrax-master | examples/flow.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variational Autoencoder example on binarized MNIST dataset."""
from typing import Any, Iterator, Mapping, NamedTuple, Sequence, Tuple
from absl import app
from absl import flags
from absl import logging
import distrax
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import tensorflow_datasets as tfds
flags.DEFINE_integer("batch_size", 128, "Size of the batch to train on.")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate for the optimizer.")
flags.DEFINE_integer("training_steps", 5000, "Number of training steps to run.")
flags.DEFINE_integer("eval_frequency", 100, "How often to evaluate the model.")
FLAGS = flags.FLAGS
OptState = Any
PRNGKey = jnp.ndarray
Batch = Mapping[str, np.ndarray]
MNIST_IMAGE_SHAPE: Sequence[int] = (28, 28, 1)
def load_dataset(split: str, batch_size: int) -> Iterator[Batch]:
ds = tfds.load("binarized_mnist", split=split, shuffle_files=True)
ds = ds.shuffle(buffer_size=10 * batch_size)
ds = ds.batch(batch_size)
ds = ds.prefetch(buffer_size=5)
ds = ds.repeat()
return iter(tfds.as_numpy(ds))
class Encoder(hk.Module):
"""Encoder model."""
def __init__(self, hidden_size: int = 512, latent_size: int = 10):
super().__init__()
self._hidden_size = hidden_size
self._latent_size = latent_size
def __call__(self, x: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
x = hk.Flatten()(x)
x = hk.Linear(self._hidden_size)(x)
x = jax.nn.relu(x)
mean = hk.Linear(self._latent_size)(x)
log_stddev = hk.Linear(self._latent_size)(x)
stddev = jnp.exp(log_stddev)
return mean, stddev
class Decoder(hk.Module):
"""Decoder model."""
def __init__(
self,
hidden_size: int = 512,
output_shape: Sequence[int] = MNIST_IMAGE_SHAPE,
):
super().__init__()
self._hidden_size = hidden_size
self._output_shape = output_shape
def __call__(self, z: jnp.ndarray) -> jnp.ndarray:
z = hk.Linear(self._hidden_size)(z)
z = jax.nn.relu(z)
logits = hk.Linear(np.prod(self._output_shape))(z)
logits = jnp.reshape(logits, (-1, *self._output_shape))
return logits
class VAEOutput(NamedTuple):
variational_distrib: distrax.Distribution
likelihood_distrib: distrax.Distribution
image: jnp.ndarray
class VAE(hk.Module):
"""Main VAE model class, uses Encoder & Decoder under the hood."""
def __init__(
self,
latent_size: int = 10,
hidden_size: int = 512,
output_shape: Sequence[int] = MNIST_IMAGE_SHAPE,
):
super().__init__()
self._latent_size = latent_size
self._hidden_size = hidden_size
self._output_shape = output_shape
def __call__(self, x: jnp.ndarray) -> VAEOutput:
x = x.astype(jnp.float32)
# q(z|x) = N(mean(x), covariance(x))
mean, stddev = Encoder(self._hidden_size, self._latent_size)(x)
variational_distrib = distrax.MultivariateNormalDiag(
loc=mean, scale_diag=stddev)
z = variational_distrib.sample(seed=hk.next_rng_key())
# p(x|z) = \Prod Bernoulli(logits(z))
logits = Decoder(self._hidden_size, self._output_shape)(z)
likelihood_distrib = distrax.Independent(
distrax.Bernoulli(logits=logits),
reinterpreted_batch_ndims=len(self._output_shape)) # 3 non-batch dims
# Generate images from the likelihood
image = likelihood_distrib.sample(seed=hk.next_rng_key())
return VAEOutput(variational_distrib, likelihood_distrib, image)
def main(_):
latent_size = 10
model = hk.transform(
lambda x: VAE(latent_size)(x), # pylint: disable=unnecessary-lambda
apply_rng=True)
optimizer = optax.adam(FLAGS.learning_rate)
@jax.jit
def loss_fn(params: hk.Params, rng_key: PRNGKey, batch: Batch) -> jnp.ndarray:
"""Loss = -ELBO, where ELBO = E_q[log p(x|z)] - KL(q(z|x) || p(z))."""
outputs: VAEOutput = model.apply(params, rng_key, batch["image"])
# p(z) = N(0, I)
prior_z = distrax.MultivariateNormalDiag(
loc=jnp.zeros((latent_size,)),
scale_diag=jnp.ones((latent_size,)))
log_likelihood = outputs.likelihood_distrib.log_prob(batch["image"])
kl = outputs.variational_distrib.kl_divergence(prior_z)
elbo = log_likelihood - kl
return -jnp.mean(elbo)
@jax.jit
def update(
params: hk.Params,
rng_key: PRNGKey,
opt_state: OptState,
batch: Batch,
) -> Tuple[hk.Params, OptState]:
"""Single SGD update step."""
grads = jax.grad(loss_fn)(params, rng_key, batch)
updates, new_opt_state = optimizer.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, new_opt_state
rng_seq = hk.PRNGSequence(42)
params = model.init(next(rng_seq), np.zeros((1, *MNIST_IMAGE_SHAPE)))
opt_state = optimizer.init(params)
train_ds = load_dataset(tfds.Split.TRAIN, FLAGS.batch_size)
valid_ds = load_dataset(tfds.Split.TEST, FLAGS.batch_size)
for step in range(FLAGS.training_steps):
params, opt_state = update(params, next(rng_seq), opt_state, next(train_ds))
if step % FLAGS.eval_frequency == 0:
val_loss = loss_fn(params, next(rng_seq), next(valid_ds))
logging.info("STEP: %5d; Validation ELBO: %.3f", step, -val_loss)
if __name__ == "__main__":
app.run(main)
| distrax-master | examples/vae.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hidden Markov Model example detecting changepoints in the rate of a signal.
Adapted from https://github.com/probml/probml-notebooks/blob/main/notebooks/
hmm_poisson_changepoint_jax.ipynb
"""
import functools
from absl import app
from absl import flags
from absl import logging
import distrax
import jax
import jax.numpy as jnp
import optax
import scipy.stats
import tensorflow_probability as tfp
flags.DEFINE_list("true_rates", [40, 3, 20, 50],
"Sequence of Poisson rates for the data generating process.")
flags.DEFINE_list("true_durations", [10, 20, 5, 35],
"Sequence of durations for the data generating process. "
"Should be the same length as `true_rates`.")
flags.DEFINE_integer("fixed_num_states", 4,
"How many states to use for the fixed-count experiment.")
flags.DEFINE_list("state_sweep", [1, 2, 3, 4, 5, 6],
"Sweep of states to use for the multi-count experiment.")
flags.DEFINE_float("prior_change_prob", 0.05,
"Prior probability of state transition per unit time.")
flags.DEFINE_integer("n_steps", 201,
"Number of steps of gradient descent to fit the model.")
flags.DEFINE_integer("data_seed", 0, "Seed for the data generator.")
flags.DEFINE_integer("model_seed", 1, "Seed for the parameter generator.")
FLAGS = flags.FLAGS
def generate_data(true_rates, true_durations, random_state):
"""Generates data from a Poisson process with changing rates over time."""
return jnp.concatenate([
scipy.stats.poisson(rate).rvs(num_steps, random_state=random_state)
for (rate, num_steps) in zip(true_rates, true_durations)
]).astype(jnp.float32)
def build_latent_state(num_states, max_num_states, daily_change_prob):
""""Build an initial state probability vector and state transition matrix."""
# Give probability 0 to states outside of the current model.
def prob(s):
return jnp.where(s < num_states + 1, 1/num_states, 0.)
states = jnp.arange(1, max_num_states+1)
initial_state_probs = jax.vmap(prob)(states)
# Build a transition matrix that transitions only within the current
# `num_states` states.
def transition_prob(i, s):
return jnp.where((s <= num_states) & (i <= num_states) & (1 < num_states),
jnp.where(s == i, 1 - daily_change_prob,
daily_change_prob / (num_states - 1)),
jnp.where(s == i, 1, 0))
transition_probs = jax.vmap(
transition_prob, in_axes=(None, 0))(states, states)
return initial_state_probs, transition_probs
def make_hmm(log_rates, transition_probs, initial_state_probs):
"""Make a Hidden Markov Model with Poisson observation distribution."""
return distrax.HMM(
obs_dist=tfp.substrates.jax.distributions.Poisson(log_rate=log_rates),
trans_dist=distrax.Categorical(probs=transition_probs),
init_dist=distrax.Categorical(probs=initial_state_probs))
def get_durations(data):
durations = []
previous_value = None
for value in data:
if value != previous_value:
durations.append(1)
previous_value = value
else:
durations[-1] += 1
return durations
def get_changed_rates(data):
values = []
for value in data:
if not values or value != values[-1]:
values.append(value)
return values
def main(_):
#--------------------------------------------------
#-------------- Generate the data -----------------
#--------------------------------------------------
observed_counts = generate_data(FLAGS.true_rates,
FLAGS.true_durations,
FLAGS.data_seed)
#-----------------------------------------------------------------------
#-------------- Run a model with fixed number of states ----------------
#-----------------------------------------------------------------------
initial_state_probs, transition_probs = build_latent_state(
FLAGS.fixed_num_states, FLAGS.fixed_num_states, FLAGS.prior_change_prob)
logging.info("--------- Fixed number of states ---------")
logging.info("Initial state probs: %s", initial_state_probs)
logging.info("Transition matrix:\n%s", transition_probs)
rng_key = jax.random.PRNGKey(FLAGS.model_seed)
rng_key, rng_normal = jax.random.split(rng_key)
# Define a variable to represent the unknown log-rates.
trainable_log_rates = (
jnp.log(jnp.mean(observed_counts))
+ jax.random.normal(rng_normal, (FLAGS.fixed_num_states,)))
hmm = make_hmm(trainable_log_rates, transition_probs, initial_state_probs)
optimizer = optax.adam(1e-1)
# Define loss and update functions for doing gradient descent.
def loss_fn(trainable_log_rates, transition_probs, initial_state_probs):
"""Computes the loss for the model given the log-rates."""
hmm = make_hmm(trainable_log_rates, transition_probs, initial_state_probs)
rate_prior = distrax.LogStddevNormal(5, 5)
return -(jnp.sum(rate_prior.log_prob(jnp.exp(trainable_log_rates)))
+ hmm.forward(observed_counts)[0])
def update(opt_state, params, transition_probs, initial_state_probs):
"""Computes the gradient and updates the parameters of the model."""
loss, grads = jax.value_and_grad(loss_fn)(
params, transition_probs, initial_state_probs)
updates, opt_state = optimizer.update(grads, opt_state)
params = optax.apply_updates(params, updates)
return opt_state, params, loss
@functools.partial(jax.jit, static_argnums=3)
def fit(trainable_log_rates, transition_probs, initial_state_probs, n_steps):
"""Does n_steps of gradient descent on the model."""
opt_state = optimizer.init(trainable_log_rates)
def train_step(opt_state_params, _):
opt_state, params = opt_state_params
opt_state, params, loss = update(
opt_state, params, transition_probs, initial_state_probs)
return (opt_state, params), loss
steps = jnp.arange(n_steps)
(opt_state, trainable_log_rates), losses = jax.lax.scan(
train_step, (opt_state, trainable_log_rates), steps)
return trainable_log_rates, losses
# Do gradient descent to fit the model.
params, losses = fit(
trainable_log_rates, transition_probs, initial_state_probs, FLAGS.n_steps)
rates = jnp.exp(params)
hmm = make_hmm(params, transition_probs, initial_state_probs)
logging.info("Initial loss: %s", losses[0])
logging.info("Final loss: %s", losses[-1])
logging.info("Inferred rates: %s", rates)
logging.info("True rates: %s", FLAGS.true_rates)
_, _, posterior_probs, _ = hmm.forward_backward(observed_counts)
# Max marginals
most_probable_states = jnp.argmax(posterior_probs, axis=-1)
most_probable_rates = rates[most_probable_states]
logging.info("Inferred rates between change points (Max marginals): %s",
get_changed_rates(most_probable_rates))
logging.info("Inferred durations between change points (max marginals): %s",
get_durations(most_probable_states))
# Max probability trajectory (Viterbi)
most_probable_states = hmm.viterbi(observed_counts)
most_probable_rates = rates[most_probable_states]
logging.info("Inferred rates between change points (Viterbi): %s",
get_changed_rates(most_probable_rates))
logging.info("Inferred durations between change points (Viterbi): %s",
get_durations(most_probable_states))
#----------------------------------------------------------------------------
#-------- Run a sweep over models with different numbers of states ----------
#----------------------------------------------------------------------------
states = jnp.array(FLAGS.state_sweep)
# For each candidate model, build initial state prior and transition matrix
batch_initial_state_probs, batch_transition_probs = jax.vmap(
build_latent_state, in_axes=(0, None, None))(
states, max(FLAGS.state_sweep), FLAGS.prior_change_prob)
logging.info("----- Sweeping over models with different state counts -----")
logging.info("Shape of initial_state_probs: %s",
batch_initial_state_probs.shape)
logging.info("Shape of transition_probs: %s", batch_transition_probs.shape)
logging.info("Example initial_state_probs for num_states==%s: %s",
FLAGS.state_sweep[2], batch_initial_state_probs[2, :])
logging.info("Example transition_probs for num_states==%s:\n%s",
FLAGS.state_sweep[2], batch_transition_probs[2, :])
rng_key, rng_normal = jax.random.split(rng_key)
# Define a variable to represent the unknown log-rates.
trainable_log_rates = (
jnp.log(jnp.mean(observed_counts))
+ jax.random.normal(rng_normal, (max(FLAGS.state_sweep),)))
# Fit the model with gradient descent.
params, losses = jax.vmap(fit, in_axes=(None, 0, 0, None))(
trainable_log_rates, batch_transition_probs, batch_initial_state_probs,
FLAGS.n_steps)
rates = jnp.exp(params)
logging.info("Final loss for each model: %s", losses[:, -1])
for i, learned_model_rates in enumerate(rates):
logging.info("Rates for %s-state model: %s",
FLAGS.state_sweep[i], learned_model_rates[:i+1])
def posterior_marginals(
trainable_log_rates, initial_state_probs, transition_probs):
hmm = make_hmm(trainable_log_rates, transition_probs, initial_state_probs)
_, _, marginals, _ = hmm.forward_backward(observed_counts)
return marginals
posterior_probs = jax.vmap(posterior_marginals, in_axes=(0, 0, 0))(
params, batch_initial_state_probs, batch_transition_probs)
most_probable_states = jnp.argmax(posterior_probs, axis=-1)
for i, learned_model_rates in enumerate(rates):
logging.info("%s-state model:", FLAGS.state_sweep[i])
logging.info(
"Inferred rates between change points: %s",
get_changed_rates(learned_model_rates[most_probable_states[i]]))
logging.info(
"Inferred durations between change points: %s",
get_durations(most_probable_states[i]))
if __name__ == "__main__":
app.run(main)
| distrax-master | examples/hmm.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module setuptools script."""
from importlib import util
from setuptools import find_packages
from setuptools import setup
def get_version():
spec = util.spec_from_file_location('_metadata', 'alphastar/_metadata.py')
mod = util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod.__version__
with open('requirements.txt') as f:
required = f.read().splitlines()
LONG_DESCRIPTION = (
'This package provides libraries for ML-friendly Starcraft II trajectory '
'data generation, architectures and agents along with the entire training '
'and evaluation setup for training an offline RL agent.'
)
setup(
name='AlphaStar',
version=get_version(),
description='Package for offline RL agent training and evaluation on StarCraftII',
long_description=LONG_DESCRIPTION,
author='DeepMind',
license='Apache License, Version 2.0',
keywords='StarCraft AI',
url='https://github.com/deepmind/alphastar',
# This is important if you have some non-standard files as part of package.
include_package_data=True,
packages=find_packages(),
# dm-acme 0.2.4 until the clash of pybind11 absl status bindings with
# PySC2 is resolved.
install_requires=required,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| alphastar-main | setup.py |
# Copyright 2022 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Package metadata.
This is kept in a separate module so that it can be imported from setup.py, at
a time when the package dependencies may not have been installed yet.
"""
__version__ = '0.1.0' # https://www.python.org/dev/peps/pep-0440/ | alphastar-main | alphastar/_metadata.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from alphastar._metadata import __version__
| alphastar-main | alphastar/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom types used in Alphastar."""
from alphastar.types.types import ActionSpec
from alphastar.types.types import ArgumentName
from alphastar.types.types import NestedDict
from alphastar.types.types import ObsSpec
from alphastar.types.types import SpecDict
from alphastar.types.types import StrDict
from alphastar.types.types import StreamDict
from alphastar.types.types import StreamType
| alphastar-main | alphastar/types/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom Types."""
import itertools
from typing import AbstractSet
from typing import Any
from typing import Dict
from typing import Generic
from typing import Iterable
from typing import Iterator
from typing import Mapping
from typing import MutableMapping
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import TypeVar
from typing import Union
import chex
from dm_env import specs
import jax
import numpy as np
ArgumentName = str
ObsSpec = Mapping[str, specs.BoundedArray]
ActionSpec = Mapping[ArgumentName, specs.BoundedArray]
StreamType = Union[str, Sequence[str]]
_IndexType = Union[StreamType, Sequence[StreamType]]
T = TypeVar("T")
# TODO(b/207379690): use recursive types when supported
_NestedDictType = Dict[str, Any]
_NestedMappingType = Mapping[_IndexType, Any]
def _flatten_index(index: _IndexType) -> Sequence[str]:
if isinstance(index, str):
return (index,)
else:
return tuple(
itertools.chain.from_iterable([_flatten_index(y) for y in index]))
class StrKey(str):
"""A str StreamType which can be compared to Sequence[str] StreamType.
This is needed for the tree functions to work.
"""
def __lt__(self, other: StreamType):
if isinstance(other, str):
return str(self) < other
else:
return (self,) < other
def __gt__(self, other: StreamType):
if isinstance(other, str):
return str(self) > other
else:
return (self,) > other
class NestedDict(Generic[T], MutableMapping[str, Union[T, "NestedDict"]]):
"""A nested dict with convenience functions.
* The nested dict function gives access to nested elements as a sequence of
strings.
* These nested dicts can also be used to filter a superset into a subset of
nested elements with the filter function.
* This can be instantiated with any mapping of strings, or an iterable of
key value tuples where the values can themselves be recursively the values
that a nested dict can take.
Note that the type of values T should not be an instance of Mapping.
Example Usage:
Basic
foo_dict = NestedDict()
# Setting elements, possibly nested:
foo_dict['a'] = 100
foo_dict['b', 'c'] = 200
foo_dict['b', 'd'] = 300
# Getting elements:
print(foo_dict['b', 'c']) --> 200
print(foo_dict['b']) --> IndexError("Use get for partial indexing.")
print(foo_dict.get('b')) --> {'c': 200, 'd': 300}
print(foo_dict) --> {'a': 100, 'b': {'c': 200, 'd': 300}}
# Converting to a dict:
foo_dict.asdict() --> {'a': 100, 'b': {'c': 200, 'd': 300}}
# len function:
print(len(foo_dict)) --> 3
# Iterating:
foo_dict.keys() --> dict_keys(['a', ('b', 'c'), ('b', 'd')])
foo_dict.items() --> dict_items([('a', 100), (('b', 'c'), 200), (('b',
'd'), 300)])
foo_dict.shallow_keys() --> dict_keys(['a', 'b'])
Filter
dict1 = NestedDict([
(('foo', 'a'), 10), (('foo', 'b'), 11),
(('bar', 'c'), 11), (('bar', 'a'), 110)])
dict2 = NestedDict([('foo', NestedDict(dict(a=11)))])
dict3 = NestedDict([('foo', NestedDict(dict(a=100))),
('bar', NestedDict(dict(d=11)))])
dict4 = NestedDict([('foo', NestedDict(dict(a=100))),
('bar', NestedDict(dict(c=11)))])
dict1.filter(dict2).asdict() --> {'foo': {'a': 10}}
dict1.filter(dict4).asdict() --> {'bar': {'c': 11}, 'foo': {'a': 10}}
dict1.filter(dict3).asdict() --> KeyError - ('bar', 'd') not in dict1
"""
def __init__(self,
x: Union[Iterable[Tuple[StreamType, T]],
_NestedMappingType, "NestedDict[T]", None] = None):
self._data = dict() # type: Dict[str, Union[T, NestedDict[T]]]
x = x or {}
if isinstance(x, Mapping):
for k, v in x.items():
self[k] = v
else:
if not isinstance(x, Iterable):
raise ValueError(f"Input must be a Mapping or Iterable, got {x}.")
for k, v in x:
self[k] = v
def __contains__(self, k: _IndexType) -> bool:
k = _flatten_index(k)
if len(k) == 1:
return k[0] in self._data
else:
if k[0] in self._data and isinstance(self._data[k[0]], NestedDict):
return k[1:] in self._data[k[0]]
else:
return False
def get(self,
k: _IndexType,
*,
default: Optional[T] = None) -> Union[T, "NestedDict[T]"]:
"""Returns self[k], partial indexing allowed."""
if k not in self:
if default is not None:
return default
else:
raise KeyError(k)
k = _flatten_index(k)
if len(k) == 1:
return self._data[k[0]]
else:
return self._data[k[0]].get(k[1:])
def __getitem__(self, k: _IndexType) -> T:
output = self.get(k)
if isinstance(output, NestedDict):
raise IndexError("Use get for partial indexing.")
return output
def __setitem__(self, k: _IndexType, v: Union[T, _NestedMappingType]) -> None:
if not k:
raise IndexError("Use valid index value.")
k = _flatten_index(k)
v = NestedDict[T](v) if isinstance(v, Mapping) else v # type: Union[T, NestedDict[T]]
if len(k) == 1:
self._data[k[0]] = v
else:
if k[0] not in self._data:
self._data[k[0]] = NestedDict[T]()
if not isinstance(self._data[k[0]], NestedDict):
raise IndexError("Trying to assign nested values to a leaf.")
self._data[k[0]][k[1:]] = v
def __iter__(self) -> Iterator[StreamType]:
for k, v in self._data.items():
if isinstance(v, NestedDict):
for x in v:
if isinstance(x, Tuple):
yield (k,) + x
else:
yield (k, x)
else:
yield StrKey(k)
def __delitem__(self, k: _IndexType) -> None:
if k not in self:
raise KeyError(k)
k = _flatten_index(k)
if len(k) == 1:
del self._data[k[0]]
else:
del self._data[k[0]][k[1:]]
if not self._data[k[0]]:
del self._data[k[0]]
def __len__(self) -> int:
output = 0
for v in self.values():
if isinstance(v, NestedDict):
output += len(v)
else:
output += 1
return output
def __str__(self) -> str:
return str(self.asdict())
def filter(self,
other: Union[Sequence[StreamType], "NestedDict[Any]"],
ignore_missing: bool = False) -> "NestedDict[T]":
"""Returns a NestedDict with only entries present in `other`."""
output = NestedDict[T]()
if isinstance(other, Sequence):
keys = other
else:
keys = other.keys()
for k in keys:
if k not in self:
if not ignore_missing:
raise KeyError(k)
else:
output[k] = self.get(k)
return output
def asdict(self) -> _NestedDictType:
output = dict()
for k, v in self._data.items():
if isinstance(v, NestedDict):
output[k] = v.asdict()
else:
output[k] = v
return output
def copy(self) -> "NestedDict[T]":
output = NestedDict[T]()
for k, v in self.items():
output[k] = v
return output
def __copy__(self) -> "NestedDict[T]":
return self.copy()
def shallow_keys(self) -> AbstractSet[str]:
return self._data.keys()
StrDict = NestedDict[str]
StreamDict = NestedDict[chex.Array]
class SpecDict(NestedDict[specs.Array]):
"""A NestedDict containing a spec."""
def validate(self,
data: Union["SpecDict", StreamDict],
exact_match: bool = False,
num_leading_dims_to_ignore: int = 0,
error_prefix: Optional[str] = None) -> None:
"""Checks whether the data matches the spec.
Args:
data: The data which should match the spec. It can also be a spec
exact_match: If true, the data and the spec must be exactly identical.
Otherwise, the data is validated as long as it contains at least the
elements of the spec, but can contain more entries.
num_leading_dims_to_ignore: The first n dimensions of the data are not
part of the spec. They still must have the same size across data.
error_prefix: An optional string to append before the error message.
Raises:
ValueError: If the data doesn't match the spec.
"""
error_prefix = "" if error_prefix is None else f"[{error_prefix}] "
missing_keys = set(self.keys()).difference(set(data.keys()))
if missing_keys:
raise ValueError(f"{error_prefix}The data does not match the spec. Keys "
f"{missing_keys} are in the spec but not in the data.")
if exact_match:
data_spec_missing_keys = set(data.keys()).difference(set(self.keys()))
if data_spec_missing_keys:
raise ValueError(f"{error_prefix}The data does not match the spec. "
f"Keys {data_spec_missing_keys} are in the data but "
"not in the spec, and exact_match is set to True.")
if data:
try:
chex.assert_equal_shape_prefix(data.values(),
num_leading_dims_to_ignore)
except AssertionError as e:
raise ValueError(
f"{error_prefix}Leading dimensions don't match: {e}.") from e
for k, v in self.items():
data_to_validate = data[k]
if len(data_to_validate.shape) < num_leading_dims_to_ignore:
raise ValueError(f"{error_prefix}Error when validating spec {k}: not "
f"enough dimension (shape={data_to_validate.shape}).")
for _ in range(num_leading_dims_to_ignore):
data_to_validate = data_to_validate[0]
if isinstance(data_to_validate, np.ndarray):
try:
v.validate(data_to_validate)
except ValueError as e:
raise ValueError(
f"{error_prefix}Error when validating spec {k}: {e}") from e
else:
# We can't use validate if the array is not a numpy array
try:
chex.assert_type(data_to_validate, v.dtype)
chex.assert_shape(data_to_validate, v.shape)
except AssertionError as e:
raise ValueError(
f"{error_prefix}Error when validating spec {k}: {e}") from e
def copy(self) -> "SpecDict":
output = SpecDict()
for k, v in self.items():
output[k] = v
return output
def __copy__(self) -> "SpecDict":
return self.copy()
def _flatten_func(s):
d = s.asdict()
# While registering pytree node, flatten func outputs and unflatten func
# inputs are reversed as values followed by keys instead of the standard
# keys followed by values style.
return d.values(), d.keys()
def _unflatten_func(ks, vs):
return NestedDict(dict(zip(ks, vs)))
jax.tree_util.register_pytree_node(
NestedDict, flatten_func=_flatten_func, unflatten_func=_unflatten_func)
jax.tree_util.register_pytree_node(
SpecDict, flatten_func=_flatten_func, unflatten_func=_unflatten_func)
| alphastar-main | alphastar/types/types.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for types."""
import copy
from absl.testing import absltest
from absl.testing import parameterized
from alphastar import types
import chex
from dm_env import specs
import jax
import jax.numpy as jnp
import numpy as np
import tree
class EvaluatorTest(parameterized.TestCase):
"""Basic tests for types."""
def test_nested_dict_access(self):
x = types.NestedDict()
self.assertEmpty(x)
x['a'] = 100
x['b', 'c'] = 200
x['b', 'd'] = 300
with self.assertRaises(IndexError):
x[()] = 42
self.assertEqual(x['a'], 100)
self.assertEqual(x['b', 'c'], 200)
self.assertSetEqual(set(x.get('b').keys()), set(['c', 'd']))
self.assertEqual(x.get('b')['d'], 300)
with self.assertRaises(IndexError):
_ = x['b']
with self.assertRaises(KeyError):
_ = x['c']
with self.assertRaises(KeyError):
_ = x['b', 'e']
self.assertSetEqual(set(x.shallow_keys()), set(['a', 'b']))
self.assertIn('a', x)
self.assertIn(('a',), x)
self.assertIn('b', x)
self.assertIn(('b', 'c'), x)
self.assertNotIn(('b', 'c', 'e'), x)
self.assertNotIn('d', x)
self.assertNotIn(('e',), x)
self.assertSetEqual(set(iter(x)),
set(['a', ('b', 'c'), ('b', 'd')]))
self.assertLen(x, 3)
self.assertDictEqual(x.asdict(), {'a': 100, 'b': {'c': 200, 'd': 300}})
y = x.copy()
y['a'] = 500
y['b', 'c'] = 600
self.assertEqual(x.get('a'), 100)
self.assertEqual(x.get(('b', 'c')), 200)
x['b', 'e'] = 400
del x['b', 'c']
self.assertIn(('b', 'e'), x)
self.assertNotIn(('b', 'c'), x)
del x['b']
self.assertSetEqual(set(x.keys()), set(['a']))
x['z'] = {'a': 1, 'b': 2}
self.assertSetEqual(set(x.keys()), set(['a', ('z', 'a'), ('z', 'b')]))
with self.assertRaises(IndexError):
x['a', 'b'] = 4
def test_nested_dict_nested_index(self):
x = types.NestedDict()
x['a', ('b', 'c')] = 100
self.assertEqual(x['a', 'b', 'c'], 100)
self.assertSetEqual(set(x.keys()), set([('a', 'b', 'c')]))
y = types.NestedDict({('a', ('b', 'c')): 42})
self.assertDictEqual(y.asdict(), {'a': {'b': {'c': 42}}})
def test_set_difference(self):
x = types.NestedDict()
y = types.NestedDict()
x['a', ('b', 'c')] = 100
y['a', ('b', 'c')] = 200
x['d'] = 100
y['d'] = 200
x['e', ('f')] = 112
y['g', ('h')] = 220
self.assertSetEqual(set(x), set(x.keys()))
self.assertSetEqual(set(y), set(y.keys()))
self.assertNotEmpty(set(x).difference(set(y)))
def test_nested_dict_filter(self):
x = types.NestedDict()
x['a'] = 100
x['b', 'c'] = 200
x['b', 'd'] = 300
x['b', 'e'] = 400
x['f', 'g'] = 500
y = x.filter([('a',), ('b', 'c'), 'f'])
self.assertSetEqual(set(y.keys()), set(['a', ('b', 'c'), ('f', 'g')]))
y['a'] = 200
self.assertEqual(x['a'], 100)
z = x.filter(
types.NestedDict([('a', 42), (('b', 'd'), 4), (('b', 'e'), 3)]))
self.assertDictEqual(z.asdict(), {'a': 100, 'b': {'d': 300, 'e': 400}})
def test_nested_dict_init(self):
d1 = types.NestedDict(
[(('a', 'b'), 42), ('b', 3), (('a', 'c'), 4), (('d',), 5)])
self.assertDictEqual(d1.asdict(), {'b': 3, 'a': {'b': 42, 'c': 4}, 'd': 5})
d2 = types.NestedDict((('a', 3), (('b', 'c'), 4)))
self.assertDictEqual(d2.asdict(), {'a': 3, 'b': {'c': 4}})
d3 = types.NestedDict({'a': 3, 'b': {'c': 4, 'd': 5}})
self.assertDictEqual(d3.asdict(), {'a': 3, 'b': {'c': 4, 'd': 5}})
d4 = types.NestedDict({'a': 3, 'b': types.NestedDict((('c', 4), ('d', 5)))})
self.assertDictEqual(d4.asdict(), {'a': 3, 'b': {'c': 4, 'd': 5}})
d5 = types.NestedDict({'a': 3, ('b', 'c'): 4, ('b', 'd'): 5, ('e',): 6})
self.assertDictEqual(d5.asdict(), {'a': 3, 'b': {'c': 4, 'd': 5}, 'e': 6})
d6 = types.NestedDict(types.NestedDict({'a': 3, 'b': {'c': 4, 'd': 5}}))
self.assertDictEqual(d6.asdict(), {'a': 3, 'b': {'c': 4, 'd': 5}})
def test_spec_dict(self):
spec = types.SpecDict({
'a': specs.Array((), np.bool_),
('b', 'c'): specs.Array((4,), np.float32),
('b', 'd'): specs.BoundedArray((), np.int32, 0, 5)})
data1 = types.NestedDict({
'a': np.zeros((), np.bool_),
('b', 'c'): np.ones((4,), np.float32),
('b', 'd'): np.array(1, np.int32)})
spec.validate(data1)
data2 = types.NestedDict({
'a': np.zeros((), np.bool_),
('b', 'c'): np.ones((4,), np.float32),
('b', 'd'): np.ones((), np.int32),
('b', 'e'): np.ones((), np.int32),
'f': np.zeros((42, 3), np.float32)})
spec.validate(data2)
data3 = types.NestedDict({
'a': np.zeros((), np.int32),
('b', 'c'): np.ones((4,), np.float32),
('b', 'd'): np.ones((), np.int32)})
with self.assertRaises(ValueError):
# Wrong dtype
spec.validate(data3)
data4 = types.NestedDict({
'a': np.zeros((), np.bool_),
('b', 'd'): np.ones((), np.int32)})
with self.assertRaises(ValueError):
# Missing data
spec.validate(data4)
data5 = types.NestedDict({
'a': np.zeros((), np.bool_),
('b', 'c'): np.ones((4,), np.float32),
('b', 'd'): np.array(10, np.int32)})
with self.assertRaises(ValueError):
# Out of bounds
spec.validate(data5)
spec['b', 'd'].validate(np.array((10,)))
data6 = types.NestedDict({
'a': np.zeros((), np.bool_),
('b', 'c'): np.ones((4,), np.float32),
('b', 'd'): np.ones((), np.int32),
'f': np.zeros((42, 3), np.float32)})
with self.assertRaises(ValueError):
# Extra data
spec.validate(data6, exact_match=True)
data7 = types.NestedDict({
'a': np.zeros((4,), np.bool_),
('b', 'c'): np.ones((4, 4), np.float32),
('b', 'd'): np.ones((4,), np.int32)})
spec.validate(data7, num_leading_dims_to_ignore=1)
data8 = types.NestedDict({
'a': np.zeros((4,), np.bool_),
('b', 'c'): np.ones((4, 4), np.float32),
('b', 'd'): np.ones((5,), np.int32)})
with self.assertRaises(ValueError):
# Leading dimensions not matching
spec.validate(data8, num_leading_dims_to_ignore=1)
data9 = types.NestedDict({
'a': jnp.zeros((), jnp.bool_),
('b', 'c'): jnp.ones((4,), jnp.float32),
('b', 'd'): jnp.array(1, jnp.int32)})
spec.validate(data9)
def test_nested_dict_flatted(self):
x = types.NestedDict({'a': 4, ('b', 'c'): 5, ('b', 'd'): 6})
leaf_values, treedef = jax.tree_flatten(x)
self.assertDictEqual(x.asdict(),
jax.tree_unflatten(treedef, leaf_values).asdict())
self.assertSetEqual(set(jax.tree_leaves(x)), set([4, 5, 6]))
self.assertDictEqual(jax.tree_map(lambda y: y+1, x).asdict(),
{'a': 5, 'b': {'c': 6, 'd': 7}})
def test_tree_and_chex(self):
x = types.NestedDict({
'a': jnp.zeros((50,)),
'b': {'c': jnp.zeros((50, 11)), 'd': jnp.zeros((50, 11, 134))}
})
chex.assert_tree_shape_prefix(x, (50,))
x_plus_one = types.NestedDict({
'a': jnp.ones((50,)),
'b': {'c': jnp.ones((50, 11)), 'd': jnp.ones((50, 11, 134))}
})
chex.assert_trees_all_equal(tree.map_structure(lambda y: y+1, x),
x_plus_one)
chex.assert_trees_all_equal(tree.map_structure(lambda y: y+1, x).asdict(),
x_plus_one.asdict())
def test_copy(self):
x = types.NestedDict({'a': {'b': {'c': 1}}})
y1 = x.get(('a',)).copy()
y1['c'] = 2
self.assertEqual(x['a', 'b', 'c'], 1)
y2 = copy.copy(x.get(('a',)))
y2['c'] = 2
self.assertEqual(x['a', 'b', 'c'], 1)
y3 = copy.deepcopy(x.get(('a',)))
y3['b', 'c'] = 3
self.assertEqual(x['a', 'b', 'c'], 1)
if __name__ == '__main__':
absltest.main()
| alphastar-main | alphastar/types/types_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstraction for data logged by actors / evaluators."""
import abc
from typing import Any, Dict, Optional, Union
from alphastar import types
from alphastar.commons import log_utils
import dm_env
Number = Union[int, float]
class EpisodeLogger(abc.ABC):
@abc.abstractmethod
def register_step(self,
player: int,
step_type: dm_env.StepType,
prev_reward: float,
observation: types.StreamDict,
agent_output: Optional[types.StreamDict],
log: Optional[log_utils.Log],
static_log: Dict[str, Any]):
"""Registers a new step.
Args:
player: The zero-based index of the player registering this step.
step_type: The step type.
prev_reward: The reward received before the observation by the player.
observation: The observation for the player.
agent_output: The output produced by the agent at this time step, or None
if there is no output (last step, or bot/competitor).
log: What the player needs to log at this step, or None if nothing to log.
Note that because these logs are typically averaged over episodes, they
can only be numeric types.
static_log: Static log for the player. Static log is expected not to
change during the episode, but can be of any type.
"""
| alphastar-main | alphastar/loggers/episode_logger.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| alphastar-main | alphastar/loggers/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logger for logging evaluation episodes."""
import collections as py_collections
import time
from typing import Any, Callable, Dict, Optional
from acme.utils import loggers
from alphastar import types
from alphastar.commons import log_utils
from alphastar.commons import metrics
from alphastar.loggers import episode_logger as episode_logger_lib
from alphastar.modules import common as acme_common
import dm_env
import jax
import numpy as np
class EvalEpisodeLogger(episode_logger_lib.EpisodeLogger):
"""A logger for logging episodes during evaluation."""
def __init__(
self,
log_name: str,
log_to_csv: bool,
home_player_idx: int = 0,
agent_scope=None,
logger: Optional[loggers.Logger] = None,
print_fn: Optional[Callable[[str], None]] = None,
):
"""Initializes the evaluation episode logger.
Args:
log_name: Name of the log file.
log_to_csv: Boolean to decide if logging to csv is needed.
home_player_idx: Index for the home player.
agent_scope: Logger used to write agent outputs and observations
beyond what the standard logging does.
logger: An ACME logger object used for logging.
print_fn: The default print function used for in-process logging.
"""
super().__init__()
self._home_player_idx = home_player_idx
self._logger = logger
self._log_name = log_name
self._log_to_csv = log_to_csv
self._print_fn = print_fn or print
if self._logger is None:
self.make_default_logger()
self._new_episode()
self._agent_scope = agent_scope
def make_default_logger(
self, print_fn: Optional[Callable[[str], None]] = None,):
self._logger = acme_common.make_default_logger(
self._log_name, log_to_csv=self._log_to_csv, print_fn=self._print_fn)
def _new_episode(self):
self._start_time = time.time()
self._num_home_frames = 0
self._home_player_logs = py_collections.defaultdict(lambda: [])
def register_step(self,
player: int,
step_type: dm_env.StepType,
prev_reward: float,
observation: types.StreamDict,
agent_output: Optional[types.StreamDict],
log: Optional[log_utils.Log],
static_log: Dict[str, Any]):
if player == self._home_player_idx:
self._num_home_frames += 1
home_log = log or {}
for k, v in metrics.flatten_metrics(home_log).items():
self._home_player_logs[k].append(np.mean(v))
if self._num_home_frames % 50 == 1:
self._print_fn(
'Running episode, frame=%s, game_loop=%s, static_log=%s, log=%s.' %
(self._num_home_frames, observation['game_loop'], static_log, log))
if step_type == dm_env.StepType.LAST:
home_obs = observation
episode_length = np.squeeze(home_obs['game_loop'])
episode_length_minutes = episode_length / 22.4 / 60
avg_home_player_logs = {
k: np.mean(v) for k, v in self._home_player_logs.items()}
eval_time_seconds = time.time() - self._start_time
log_data = dict(
outcome=prev_reward,
num_frames_per_episode=self._num_home_frames,
episode_length_minutes=episode_length_minutes,
apm=self._num_home_frames / episode_length_minutes,
episode_eval_time_seconds=eval_time_seconds,
eval_frames_per_second=self._num_home_frames / eval_time_seconds,
**static_log,
**avg_home_player_logs)
self._logger.write(acme_common.flatten_metrics(log_data))
self._new_episode()
if self._agent_scope and agent_output:
obs_without_zerodimarrays = jax.tree_map(
lambda x: x.item() if x.ndim == 0 else x, observation)
self._agent_scope.write(
output=agent_output, observation=obs_without_zerodimarrays.asdict())
| alphastar-main | alphastar/loggers/eval_episode_logger.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics related functions."""
import functools
from typing import Callable, Dict, List, Optional, Tuple
from alphastar.commons import log_utils
import chex
import jax
import jax.numpy as jnp
def pnanreduce(x: jnp.DeviceArray,
reduce_fn: Callable[..., jnp.DeviceArray],
axis_name: Optional[str] = None,
axis_index_groups: Optional[List[List[int]]] = None
) -> Tuple[jnp.DeviceArray, jnp.DeviceArray]:
"""Gets the number of non-nan elements reduce them with reduce_fn.
Args:
x : Input array
reduce_fn : Function that is used to reduce the inputs.
axis_name: Object used to name a pmapped axis
axis_index_groups: Optional list of lists containing axis indices to do
the p-reduce operations over.
Returns:
Number of non-nan elements, non-nan elements reduced.
"""
valid_mask = jnp.logical_not(jnp.isnan(x))
valid_x = jnp.where(valid_mask, x, jnp.zeros_like(x))
valid_num = jax.lax.psum(
valid_mask, axis_name=axis_name, axis_index_groups=axis_index_groups)
valid_reduced = reduce_fn(
valid_x, axis_name=axis_name, axis_index_groups=axis_index_groups)
return valid_num, valid_reduced
def pnanmean(x: jnp.DeviceArray,
axis_name: Optional[str] = None,
axis_index_groups: Optional[List[List[int]]] = None
) -> jnp.DeviceArray:
"""Takes mean over non-nan elements.
Args:
x : Input array
axis_name: Object used to name a pmapped axis
axis_index_groups: Optional list of lists containing axis indices to do
the p-reduce operations over.
Returns:
Mean of non-nan elements reduced.
"""
valid_num, valid_sum = pnanreduce(
x, jax.lax.psum, axis_name, axis_index_groups)
return jnp.where(valid_num, valid_sum / valid_num, jnp.nan)
def pnantake(x: jnp.DeviceArray,
reduce_fn: Callable[..., jnp.DeviceArray],
axis_name: Optional[str] = None,
axis_index_groups: Optional[List[List[int]]] = None
) -> jnp.DeviceArray:
"""Takes a reduction over non-nan elements.
Args:
x : Input array
reduce_fn : Reduction operation to be applied over elements.
axis_name: Object used to name a pmapped axis
axis_index_groups: Optional list of lists containing axis indices to do
the p-reduce operations over.
Returns:
Result of reduction operation on non-nan elements.
"""
valid_num, valid_y = pnanreduce(x, reduce_fn, axis_name, axis_index_groups)
return jnp.where(valid_num, valid_y, jnp.nan)
P_REDUCE_FUNCTIONS = {
log_utils.ReduceType.MEAN: pnanmean,
log_utils.ReduceType.MIN: functools.partial(
pnantake, reduce_fn=jax.lax.pmin),
log_utils.ReduceType.MAX: functools.partial(
pnantake, reduce_fn=jax.lax.pmax),
log_utils.ReduceType.NUM: functools.partial(
pnantake, reduce_fn=jax.lax.psum),
log_utils.ReduceType.SUM: functools.partial(
pnantake, reduce_fn=jax.lax.psum),
log_utils.ReduceType.NON_REDUCED: lambda x: x}
def reduce_metrics(
metrics: log_utils.Log,
axis_name: Optional[str] = None,
axis_index_groups: Optional[List[List[int]]] = None,
local: bool = False,
) -> log_utils.Log:
"""Reduce metrics across devices.
Args:
metrics : A log object that contains metrics collected.
axis_name: Object used to name a pmapped axis
axis_index_groups: Optional list of lists containing axis indices to do
the p-reduce operations over.
local: Boolean that says whether metrics are run on a local device or a
p-mapped set of devices.
Returns:
A log object with reduced metrics.
"""
if axis_name is not None and local:
raise ValueError('Cannot specify both axis_name and local.')
elif axis_name is None and axis_index_groups is not None:
raise ValueError('Must provide axis name when using axis index groups.')
if local:
reduce_fns = log_utils.REDUCE_FUNCTIONS
else:
reduce_fns = {
k: functools.partial(
fn, axis_name=axis_name, axis_index_groups=axis_index_groups)
for k, fn in P_REDUCE_FUNCTIONS.items()}
return log_utils.reduce_logs(metrics, reduce_fns)
def flatten_metrics(metrics: log_utils.Log) -> Dict[str, chex.Array]:
"""Flattens metrics to a single level of nesting."""
output = {}
for k, v in metrics.items():
if isinstance(v, dict):
for k2, v2 in flatten_metrics(v).items():
output[f'{k}_{k2}'] = v2
else:
output[k] = v
return output
| alphastar-main | alphastar/commons/metrics.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logging utility functions."""
import enum
from typing import Callable, Dict, Mapping, Optional
from alphastar.collections import Struct
import chex
import jax
import jax.numpy as jnp
class ReduceType(str, enum.Enum):
MEAN = "mean"
MIN = "min"
MAX = "max"
NUM = "num"
SUM = "sum"
NON_REDUCED = "non_reduced"
# TODO(b/208619590): move Log to types
Log = Dict[str, Dict[ReduceType, chex.Array]]
ReduceFn = Callable[[chex.Array], chex.Array]
REDUCE_FUNCTIONS = Struct({
ReduceType.MEAN: jnp.nanmean,
ReduceType.MIN: jnp.nanmin,
ReduceType.MAX: jnp.nanmax,
ReduceType.NUM: jnp.nansum,
ReduceType.SUM: jnp.nansum,
ReduceType.NON_REDUCED: lambda x: jnp.reshape(x, -1)[0],
})
def reduce_logs(logs: Log,
reduce_fns: Mapping[ReduceType, ReduceFn] = REDUCE_FUNCTIONS
) -> Log:
"""Reduce the logs using the provided set of reduce functions."""
check_logs_rank(logs)
reduced_log = {}
for name, stats in logs.items():
reduced_log[name] = {}
for stat, value in stats.items():
reduced_log[name][stat] = reduce_fns[stat](value)
# If possible, recompute the mean as total_sum/total_num,
# it's less biased and has less nans (because we use masking).
if ReduceType.NUM in stats and ReduceType.SUM in stats:
stat_num = reduced_log[name][ReduceType.NUM]
stat_sum = reduced_log[name][ReduceType.SUM]
reduced_log[name][ReduceType.MEAN] = jnp.where(
stat_num, stat_sum / stat_num, jnp.nan)
check_logs_rank(reduced_log, None)
return reduced_log
def check_logs_rank(logs: Log, expected_rank: Optional[int] = None) -> None:
"""Checks that the logs have the correct rank.
Logs are expected to be nested dicts representing module/log/metric.
Args:
logs: The log nested dictionary.
expected_rank: The expected rank of each log element. Use None if unknown.
"""
if logs:
flat_logs = jax.tree_leaves(logs)
chex.assert_equal_shape(flat_logs)
if expected_rank is not None:
chex.assert_rank(flat_logs, expected_rank)
| alphastar-main | alphastar/commons/log_utils.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initializes common modules shared across training and evaluation."""
| alphastar-main | alphastar/commons/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from alphastar.commons import jax_utils
import jax
import jax.numpy as jnp
class JaxUtilsTest(absltest.TestCase):
def test_no_compilation_allowed(self):
@jax.jit
def model1(x):
return jnp.zeros_like(x)
@jax.jit
def model2(x):
return jnp.ones_like(x)
self.assertIsNotNone(model1(jnp.array([1, 2, 3])))
with jax_utils.no_jax_compilation_allowed():
self.assertIsNotNone(model1(jnp.array([1, 2, 3])))
with self.assertRaisesRegex(RuntimeError,
'compilation is not allowed in this scope'):
model1(jnp.array([1]))
with self.assertRaisesRegex(RuntimeError,
'compilation is not allowed in this scope'):
model2(jnp.array([1, 2, 3]))
if __name__ == '__main__':
absltest.main()
| alphastar-main | alphastar/commons/jax_utils_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility code related to JAX."""
import contextlib
import jax
_PREV_JAX_CONFIG = None
def disable_jax_optimizations():
global _PREV_JAX_CONFIG
_PREV_JAX_CONFIG = jax.config.values.copy()
jax.config.update('jax_disable_most_optimizations', True)
def restore_jax_config():
if _PREV_JAX_CONFIG:
jax.config.values.update(**_PREV_JAX_CONFIG)
def _disabled_backend_compile(*args, **kwargs):
raise RuntimeError('Attempt to compile a JAX program to XLA, but '
'compilation is not allowed in this scope. Typically '
'this is due to changes in input shapes or types, e.g. '
'specs used to generate dummy data not agreeing with the '
'actual data. Other cases could be if the program '
'contains modules that are always compiled at every call '
'if they are not jitted -- for example hk.scan')
# pylint: disable=protected-access
# pylint: disable=attribute-error
def jax_compilation_is_disabled():
return jax._src.dispatch.backend_compile is _disabled_backend_compile
@contextlib.contextmanager
def no_jax_compilation_allowed():
"""Prevents JAX compilation in the scope of the context."""
previous_backend_compile = jax._src.dispatch.backend_compile
jax._src.dispatch.backend_compile = _disabled_backend_compile
try:
yield
finally:
# Make sure nobody else has patched the same thing in the mean time.
assert jax_compilation_is_disabled()
jax._src.dispatch.backend_compile = previous_backend_compile
# pylint: enable=protected-access
# pylint: enable=attribute-error
| alphastar-main | alphastar/commons/jax_utils.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for sampling actions from logits proposed by a model."""
from typing import Callable, Optional
import chex
import haiku as hk
import jax
import jax.numpy as jnp
SampleFn = Callable[[chex.Array], chex.Array]
# TODO(b/207777292) : Add NaN checks to logits.
def mask_logits(logits: chex.Array,
mask: chex.Array,
unavailable_logits_bias: float = 1e10) -> chex.Array:
"""Mask logits with a tiny bias on elements to be masked."""
chex.assert_equal_shape([logits, mask])
chex.assert_type([logits, mask], [jnp.float32, jnp.bool_])
assert len(logits.shape) == len(mask.shape)
return jnp.where(mask, logits, -unavailable_logits_bias)
def apply_temperature_to_logits(logits: chex.Array,
temperature: Optional[float] = None
) -> chex.Array:
"""Apply a temperature (scale up the logits) to logits."""
chex.assert_type(logits, jnp.float32)
if temperature is not None:
logits /= temperature
return logits
def finalize_logits(logits: chex.Array,
mask: chex.Array,
temperature: Optional[float] = None) -> chex.Array:
"""Apply a temperature and mask logits."""
logits = apply_temperature_to_logits(logits, temperature)
return mask_logits(logits, mask)
def sample(logits: chex.Array,
temperature: Optional[float] = None) -> chex.Array:
"""Sample from logits, given a temperature."""
logits = apply_temperature_to_logits(logits, temperature)
logits = logits.astype(jnp.float32)
x = jax.random.categorical(hk.next_rng_key(), logits)
return x.astype(jnp.int32)
def nucleus_sample(
logits: chex.Array,
top_p: float = 1.0
) -> chex.Array:
"""Performs nucleus sampling on logits."""
sorted_logits = jax.lax.sort(logits, is_stable=False)
sorted_probs = jax.nn.softmax(sorted_logits)
threshold_idx = jnp.argmax(
jnp.cumsum(sorted_probs, -1) >= 1 - top_p, axis=-1)
threshold_largest_logits = jnp.take_along_axis(
sorted_logits, threshold_idx[..., jnp.newaxis], axis=-1)
assert threshold_largest_logits.shape == logits.shape[:-1] + (1,)
mask = logits >= threshold_largest_logits
logits = mask_logits(logits, mask) # Set unused logits to -inf.
return sample(logits)
| alphastar-main | alphastar/commons/sample.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Repository of Competitors and associated utilities."""
import enum
import itertools
from typing import Optional, Sequence
import numpy as np
from s2clientprotocol import common_pb2
from s2clientprotocol import sc2api_pb2
class CompetitorType(enum.Enum):
SHELL = 1, # A competitor built with agent shell with its own trained model.
PROTO = 2, # A competitor built from a proto file of agent properties.
BOT = 3, # A competitor that is a built in bot.
BINARY = 4 # A competitor binary which can join hosted games.
class BuiltInAI:
"""The built-in AI of a particular difficulty level."""
def __init__(self,
difficulty: str,
home_races: Sequence[str] = ("protoss", "terran", "zerg",
"random"),
competitor_race: Optional[str] = None,
sample_race: bool = True):
"""Initializes a built-in bot.
Args:
difficulty: The level of difficulty of the bot.
home_races: Set of home races that bot can play with.
competitor_race: Race for the competitor.
sample_race : Boolen to decide if we need to sample the race for the
competitor
Raises:
ValueError if competitor_race is not set and sample_race is False.
"""
self._difficulty = difficulty
self._home_races = home_races
if competitor_race:
self._competitor_race = competitor_race
elif not sample_race and len(home_races) > 1:
raise ValueError("Race is not sampled, but competitor_race "
"for BuiltInAI is not set!")
else:
self._competitor_race = np.random.choice(home_races)
@property
def type(self):
return CompetitorType.BOT
@property
def difficulty(self):
return self._difficulty
def intersect_home_races(self, opponent_away_races, ignore_if_not_compatible):
home_races = [x for x in self._home_races if x in opponent_away_races]
if home_races or not ignore_if_not_compatible:
self._home_races = home_races
def set_competitor_race(self, competitor_race):
self._competitor_race = competitor_race
@property
def home_races(self):
return self._home_races
@property
def away_races(self):
return ["protoss", "terran", "zerg", "random"]
@property
def competitor_race(self):
return self._competitor_race
_BUILT_IN_BOTS = [
"very_easy", "easy", "medium", "medium_hard", "hard", "harder",
"very_hard", "cheat_vision", "cheat_money", "cheat_insane"
]
_BOT_RACES = ["terran", "zerg", "protoss"]
BUILT_IN_COMPETITORS = {k: BuiltInAI(k) for k in _BUILT_IN_BOTS}
BUILT_IN_COMPETITORS.update({
"{}_{}".format(bot, race): BuiltInAI(bot, competitor_race=race)
for (bot, race) in itertools.product(_BUILT_IN_BOTS, _BOT_RACES)})
def race_string_to_enum(race):
return common_pb2.Race.Value(race.capitalize())
def difficulty_string_to_enum(difficulty):
return sc2api_pb2.Difficulty.Value(
"".join(s.capitalize() for s in difficulty.split("_")))
def is_built_in_bot(difficulty):
return difficulty in BUILT_IN_COMPETITORS
| alphastar-main | alphastar/commons/competitors.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for agents."""
import math
from alphastar import types
def get_world_size(action_spec: types.ActionSpec) -> int:
"""Gets number of horizontal and vertical pixels of the (square) world map."""
world_size = int(math.sqrt(action_spec["world"].maximum + 1))
assert world_size * world_size == action_spec["world"].maximum + 1
return world_size
class Argument:
"""List of starcraft action argument names."""
FUNCTION = "function"
DELAY = "delay"
QUEUED = "queued"
REPEAT = "repeat"
UNIT_TAGS = "unit_tags"
TARGET_UNIT_TAG = "target_unit_tag"
WORLD = "world"
| alphastar-main | alphastar/architectures/util.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| alphastar-main | alphastar/architectures/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Alphastar architectures."""
import functools
import itertools
import types
from alphastar.architectures import modular
from alphastar.architectures.dummy import dummy
from alphastar.architectures.standard import standard
from alphastar.architectures.standard.configs import full as full_config
from alphastar.architectures.standard.configs import lite as lite_config
ARCHITECTURES = types.MappingProxyType(
dict(
alphastar=dict(
dummy=dict(getter=dummy.get_alphastar_dummy, config=None),
lite=dict(
getter=standard.get_alphastar_standard,
config=lite_config.get_config()),
full=dict(
getter=standard.get_alphastar_standard,
config=full_config.get_config()),
)))
ARCHITECTURE_NAMES = tuple(
itertools.chain.from_iterable(
[[f'{k}.{name}' for name in v] for k, v in ARCHITECTURES.items()]))
def _check_architecture_supported(architecture_name: str):
"""Checks if architecture name is valid."""
if architecture_name not in ARCHITECTURE_NAMES:
raise ValueError(f'Unknown architecture {architecture_name}. Architecture '
f'name must be in {ARCHITECTURE_NAMES}.')
def get_architecture(architecture_name: str) -> modular.ArchitectureBuilder:
"""Gets an architecture to build based on the architecture name."""
_check_architecture_supported(architecture_name)
base, name = architecture_name.split('.', maxsplit=1)
getter, config = ARCHITECTURES[base][name]['getter'], ARCHITECTURES[base][
name]['config']
if config:
getter = functools.partial(getter, config=config)
return getter
def is_transformer_arch(arch_str):
"""Checks if an architecture has a Transformer module."""
return 'transformer' in arch_str
def get_default_config(architecture_name: str):
"""Gets the default architecture for standard architecture configs."""
_check_architecture_supported(architecture_name)
base, name = architecture_name.split('.', maxsplit=1)
return ARCHITECTURES[base][name]['config']
| alphastar-main | alphastar/architectures/architectures.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract classes for the AlphaStar components.
Components are the basic building blocks of the alphastar architecture.
They are typically assembled in a sequence, using `SequentialComponent`.
Components implement an `unroll` function, which takes inputs from the current
rollout, and prev_state from the previous rollout. It returns outputs for the
current rollout, and an optional state to be passed to the next rollout.
Inputs and outputs are types.StructDict objects (nested dictionary with
chex.Array as leaves).
The `unroll` function takes two arguments:
`inputs`: All the tensors corresponding to the current rollout. In a
sequential component, this contains the observations for this rollout, and
the outputs of all the previous components in the sequence. Since each input
is a rollout, all tensors have a first dimension with size `unroll_len`.
`prev_state`: The set of `state` tensors coming from the previous rollout
(eg. LSTM state). They only contain the state of the timestep immediately
before the first timestep of the current rollout (if there is no overlap
between rollouts, that's the last step of the previous rollout), so unlike
inputs, they do not have a first dimension with size `unroll_len`.
Note that this contains the states produced by all the components in the
previous rollout, including components which appear later in the sequence.
And the `unroll` function returns three arguments:
`outputs`: The set of tensors this component computes for this rollout, they
will be passed to the next components in the sequence and returned by the
SequentialComponent `unroll` function.
`next_state`: The set of tensors to pass to the next rollout components. All
the states passed to the next rollouts are aggregated into a single
types.StreamDict object before being passed to the next rollout.
`log`: A log_utils.Log object (dictionary of dictionary) containing logs. The
first level contains the name of the logs, the second contains the type of
reduce function to apply to the logs (see log_utils).
To implement this function, a subclass must implement `_unroll`, since the
`unroll` function performs additional types checks.
By default, the `prev_state` of the very first rollout contains only zeros. It
can be changed by subclassing the `_initial_state` function.
Components have input and output static typing. This is enforced through 4
properties which must be implemented for each module, returning a types.SpecDict
object (nested dictionaries with specs.Array as leaves):
`input_spec`: The spec of the `inputs` argument of the `unroll` function. This
must be a subset of what is passed as `inputs`, and the shapes and data
types must match. Only inputs specified in the `input_spec` will be visible
in the `_unroll` function.
`prev_state_spec`: The spec of the `prev_state` argument of the `unroll`
function. This must be a subset of what is passed as `prev_state`, and the
shapes and data types must match. Only inputs specified in the
`prev_state_spec` will be visible in the `_unroll` function.
`output_spec`: The spec of the `output` returned by the `_unroll` function.
The leaves must match exactly, and the shapes and types must match as well.
`next_state_spec`: The spec of the `next_state` returned by the `_unroll`
function. The leaves must match exactly, and the shapes and types must match
as well. Note that since `prev_state` contains the states passed by all the
components from the previous rollout, `next_state_spec` has no reason to
match `prev_state_spec`. For instance, one component could use the state
passed by another component, or pass a state without using it (so that other
components use it).
For convenience, a `BatchedComponent` class can be used instead of the base
`Component`. Instead of `_unroll`, the subclasses must implement a `_forward`
function. The difference is that the `_forward` function operates on a single
timestep instead of a rollout. This has two consequences:
* These components do not use `prev_state` and `next_state`.
* The tensors in the `inputs` argument (and returned in `outputs`) do not have
the first dimension with size `rollout_len`.
"""
import abc
from typing import Callable, Optional, Tuple
from alphastar import types
from alphastar.commons import log_utils
import chex
import haiku as hk
import jax
import jax.numpy as jnp
ForwardOutputType = Tuple[types.StreamDict, log_utils.Log]
UnrollOutputType = Tuple[types.StreamDict, types.StreamDict, log_utils.Log]
class Component(abc.ABC):
"""Basic AlphaStar component (see module docstring)."""
def __init__(self, name: Optional[str] = None):
self._name = name or "Component"
@property
def name(self) -> str:
return self._name
@property
@abc.abstractmethod
def input_spec(self) -> types.SpecDict:
"""Returns the spec of the input of this module."""
@property
@abc.abstractmethod
def prev_state_spec(self) -> types.SpecDict:
"""Returns the spec of the prev_state of this module."""
@property
@abc.abstractmethod
def output_spec(self) -> types.SpecDict:
"""Returns the spec of the output of this module."""
@property
@abc.abstractmethod
def next_state_spec(self) -> types.SpecDict:
"""Returns the spec of the next_state of this module."""
def _initial_state(self) -> types.StreamDict:
"""Initial state of the component.
If this component returns a next_state in its unroll function, then
this function provides the initial state. By default, we use zeros,
but it can be overridden for custom initial state.
Subclasses should override this function instead of `initial_state`, which
adds additional checks.
Returns:
A Dict containing the state before the first step.
"""
return jax.tree_map(
lambda spec: jnp.zeros(shape=spec.shape, dtype=spec.dtype),
self.next_state_spec)
def initial_state(self) -> types.StreamDict:
"""Initial state of the component.
If this component returns a next_state in its unroll function, then
this function provides the initial state. By default, we use zeros,
but it can be overridden for custom initial state.
Returns:
A Dict containing the state before the first step.
"""
initial_state = self._initial_state()
self.next_state_spec.validate(
initial_state, error_prefix=f"{self.name} initial_state")
return initial_state
@abc.abstractmethod
def _unroll(self,
inputs: types.StreamDict,
prev_state: types.StreamDict) -> UnrollOutputType:
"""Computes the output of the module over unroll_len timesteps.
Call with a unroll_len=1 for a single step.
Subclasses should override this function instead of `unroll`, which
adds additional checks.
Args:
inputs: A StreamDict containing [unroll_len, ...] tensors.
prev_state: A StreamDict containing [...] tensors, containing the
next_state of the last timestep of the previous unroll.
Returns:
outputs: A StreamDict containing [unroll_len, ...] tensors.
next_state: A dict containing [...] tensors representing the
state to be passed as the first state of the next rollout.
If overlap_len is 0, this is the last state of this rollout.
More generally, this is the (unroll_len - overlap_len)-th state.
logs: A dict containing [unroll_len] tensors to be logged.
"""
def unroll(self,
inputs: types.StreamDict,
prev_state: types.StreamDict) -> UnrollOutputType:
"""Computes the output of the module over unroll_len timesteps.
Call with a unroll_len=1 for a single step.
Args:
inputs: A StreamDict containing [unroll_len, ...] tensors.
prev_state: A StreamDict containing [...] tensors, containing the
next_state of the last timestep of the previous unroll.
Returns:
outputs: A StreamDict containing [unroll_len, ...] tensors.
next_state: A dict containing [...] tensors representing the
state to be passed as the first state of the next rollout.
If overlap_len is 0, this is the last state of this rollout.
More generally, this is the (unroll_len - overlap_len)-th state.
logs: A dict containing [unroll_len] tensors to be logged.
"""
if inputs:
try:
chex.assert_equal_shape(jax.tree_leaves(inputs), dims=0)
except AssertionError as e:
raise AssertionError(f"{self.name}: {e}") from e
self.input_spec.validate(inputs,
num_leading_dims_to_ignore=1,
error_prefix=f"{self.name} inputs")
self.prev_state_spec.validate(prev_state,
error_prefix=f"{self.name} prev_state")
# We hide inputs not specified in input_spec to prevent accidental use.
inputs = inputs.filter(self.input_spec)
prev_state = prev_state.filter(self.prev_state_spec)
with hk.experimental.name_scope(self.name):
outputs, next_state, logs = self._unroll(inputs, prev_state)
self.output_spec.validate(outputs,
num_leading_dims_to_ignore=1,
error_prefix=f"{self.name} outputs")
self.next_state_spec.validate(next_state,
error_prefix=f"{self.name} next_state")
return outputs, next_state, logs
ArchitectureBuilder = Callable[[types.SpecDict, types.ActionSpec, bool],
Component]
class BatchedComponent(Component):
"""A Component which is not using the unroll dimension.
This is a helper module to write simpler components.
Such a component computes a function _forward such that
unroll(x)[t] = _forward(x[t]) where t=0..unroll_len-1.
Such a module must be stateless.
"""
@property
def prev_state_spec(self) -> types.SpecDict:
return types.SpecDict()
@property
def next_state_spec(self) -> types.SpecDict:
return types.SpecDict()
def _unroll(self,
inputs: types.StreamDict,
prev_state: types.StreamDict) -> UnrollOutputType:
del prev_state
outputs, logs = jax.vmap(self._forward)(inputs)
return outputs, types.StreamDict(), logs
@abc.abstractmethod
def _forward(self, inputs: types.StreamDict) -> ForwardOutputType:
"""Computes the output of this module for each timestep.
Args:
inputs: A StreamDict containing [...] tensors.
Returns:
outputs: A StreamDict containing [...] tensors.
logs: A dict containing [...] tensors to be logged.
"""
class SequentialComponent(Component):
"""A component made of a sequence of components.
Components are added with the `append` method and form a sequence, such
that the inputs of the i-th component contain of the union of the inputs
of the SequentialComponent, and the output of the first to the (i-1)-th
components (if two inputs have the same name, the most recent hides the older
ones).
On the other hand, the states of the previous rollout (`prev_state`) are
agnostic to the order of the components since the states of all of the
components are aggregated before being passed from the previous rollout (and
two next states cannot have the same name).
For instance, given two components `component_a` and `component_b`, and inputs
`rollout1` and `rollout2`:
```
sequence = SequentialComponent()
sequence.append(component_a)
sequence.append(component_b)
state0 = sequence.initial_state()
outputs1, state1, log1 = sequence.unroll(rollout1, state0)
outputs2, state2, log2 = sequence.unroll(rollout2, state1)
```
is equivalent to:
```
def merge(a, b):
merged = a.copy()
for k, v in b.items():
merged[k] = v
return merged
state0a = component_a.initial_state()
state0b = component_b.initial_state()
state0 = merge(state0a, state0b)
outputs1a, state1a, log1a = component_a.unroll(rollout1, state0)
inputs1b = merged(rollout1, outputs1a)
outputs1b, state1b, log1b = component_b.unroll(inputs1b, state0)
outputs1 = merge(outputs1a, outputs1b)
state1 = merge(state1a, state1b)
log1 = merge(log1a, log1b)
outputs2a, state2a, log2a = component_a.unroll(rollout2, state1)
inputs2b = merged(rollout2, outputs2a)
outputs2b, state2b, log2b = component_b.unroll(inputs2b, state1)
outputs2 = merge(outputs2a, outputs2b)
state2 = merge(state2a, state2b)
log2 = merge(log2a, log2b)
```
"""
def __init__(self,
name: Optional[str] = None):
super().__init__(name=name)
self._components = []
self._input_spec = types.SpecDict()
self._output_spec = types.SpecDict()
self._prev_state_spec = types.SpecDict()
self._next_state_spec = types.SpecDict()
self._input_spec_src = types.StrDict()
self._output_spec_src = types.StrDict()
self._prev_state_spec_src = types.StrDict()
self._next_state_spec_src = types.StrDict()
def append(self, component: Component) -> None:
"""Adds a component to the sequence.
If performs a spec check on the inputs and outputs:
* For each input spec `x` in `component.input_spec`, we use the following
process:
1) If the name of `x` is in `self.input_spec` or in the output spec of
any previously appended component, then we check that the shape and
data type match. If not, a ValueError is raised.
2) If the name of `x` does is not in the output spec of any previously
added component, then this input must be specified as an input of
the `SequentialComponent`, so we add it to `self.input_spec`.
* For each output spec `x` in `component.output_spec`, we use the
following process:
1) If the name appears in `self.input_spec` or in the output spec of
any previously added component, then we check that the shape and
data type match. If not, a ValueError is raised.
2) `x` is added to `self.output_spec`.
* For each previous state input `x` in `component.prev_state_spec`, we
check that the spec matches any previous state with the same name
specified by previously added component. If not, a ValueError is raised.
We then add `x` to `self.prev_state_spec`.
* For each next state output `x` in `component.next_state_spec`, we check
that no previously added component specified a next state with the same
name. If not, a ValueError is raised. We then add `x` to
`self.next_state_spec`.
Args:
component: The `Component` to append at the end of the sequence of
components contained by this module.
Raises:
ValueError: If any of the spec check specified above fails.
"""
self._components.append(component)
# input spec:
for spec_name, spec in component.input_spec.items():
if spec_name in self._output_spec:
if spec != self._output_spec[spec_name]:
raise ValueError(
f"Input {spec_name} matches the name of an output of a "
f"previously added component ({self._output_spec_src[spec_name]})"
", but their specs do not match."
f"Input spec: {spec}. "
f"Previous output spec: {self._output_spec[spec_name]}.")
elif spec_name in self._input_spec:
if spec != self._input_spec[spec_name]:
raise ValueError(
f"Input {spec_name} matches the name of an input of a "
f"previously added component ({self._input_spec_src[spec_name]})"
", but their specs do not match."
f"Input spec: {spec}. "
f"Previous input spec: {self._input_spec[spec_name]}.")
else:
self._input_spec_src[spec_name] = component.name
self._input_spec[spec_name] = spec
# prev_state spec:
for spec_name, spec in component.prev_state_spec.items():
if spec_name in self._prev_state_spec:
if spec != self._prev_state_spec[spec_name]:
raise ValueError(
f"Previous state {spec_name} matches the name of a previous "
"state used by a previously added component "
f"({self._prev_state_spec_src[spec_name]})"
", but their specs do not match."
f"prev_state spec: {spec}. "
f"Previous prev_state spec: {self._prev_state_spec[spec_name]}.")
else:
self._prev_state_spec_src[spec_name] = component.name
self._prev_state_spec[spec_name] = spec
# output spec
for spec_name, spec in component.output_spec.items():
if spec_name in self._output_spec:
if spec != self._output_spec[spec_name]:
raise ValueError(
f"Output {spec_name} matches the name of an output of a "
f"previously added component ({self._output_spec_src[spec_name]})"
", but their specs do not match."
f"Output spec: {spec}. "
f"Previous output spec: {self._output_spec[spec_name]}.")
elif spec_name in self._input_spec:
if spec != self._input_spec[spec_name]:
raise ValueError(
f"Output {spec_name} matches the name of an input of a "
f"previously added component ({self._input_spec_src[spec_name]})"
", but their specs do not match."
f"Output spec: {spec}. "
f"Previous input spec: {self._input_spec[spec_name]}.")
self._output_spec_src[spec_name] = component.name
self._output_spec[spec_name] = spec
# next_state spec:
for spec_name, spec in component.next_state_spec.items():
if spec_name in self._next_state_spec:
raise ValueError(
f"Next state {spec_name} is used by two components: "
f"{self._next_state_spec_src[spec_name]} and {component.name}.")
else:
self._next_state_spec_src[spec_name] = component.name
self._next_state_spec[spec_name] = spec
@property
def input_spec(self) -> types.SpecDict:
return self._input_spec
@property
def prev_state_spec(self) -> types.SpecDict:
return self._prev_state_spec
@property
def output_spec(self) -> types.SpecDict:
return self._output_spec
@property
def next_state_spec(self) -> types.SpecDict:
return self._next_state_spec
def _initial_state(self) -> types.StreamDict:
initial_state = types.StreamDict()
initial_state_src = types.NestedDict[str]()
for component in self._components:
for state_name, state in component.initial_state().items():
if state_name in initial_state:
# This should never happen, since we already check next_state.
raise ValueError(
f"Initial state {state_name} is defined by {component.name} but "
f"was already defined by {initial_state_src[state_name]}.")
else:
initial_state_src[state_name] = component.name
initial_state[state_name] = state
# Make sure that initial_state matches next_state_spec:
self.next_state_spec.validate(initial_state,
exact_match=True,
error_prefix=f"{self.name} Initial state")
return initial_state
def _unroll(self,
inputs: types.StreamDict,
prev_state: types.StreamDict) -> UnrollOutputType:
inputs = inputs.copy()
outputs, next_state, logs = types.StreamDict(), types.StreamDict(), {}
for component in self._components:
comp_outputs, comp_next_state, comp_logs = component.unroll(
inputs, prev_state)
inputs.update(comp_outputs)
outputs.update(comp_outputs)
next_state.update(comp_next_state)
for log_name, log in comp_logs.items():
logs[f"[{component.name}] {log_name}"] = log
return outputs, next_state, logs
| alphastar-main | alphastar/architectures/modular.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| alphastar-main | alphastar/architectures/dummy/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dummy."""
from absl.testing import absltest
from absl.testing import parameterized
from alphastar.architectures.dummy import dummy
from dm_env import specs
import haiku as hk
import jax
import jax.numpy as jnp
class DummyTest(parameterized.TestCase):
"""Basic tests for the dummy architecture."""
@parameterized.parameters(True, False)
def test_forward(self, is_training: bool):
"""Test that the forward pass does not crash, and has correct shapes."""
batch_size = 2
unroll_len = 3 if is_training else 1
max_num_selected_units = 4
obs_spec = {'player': specs.Array((7,), jnp.int32)}
action_spec = {
'function': specs.BoundedArray((), jnp.int32, minimum=0, maximum=10),
'delay': specs.BoundedArray((), jnp.int32, minimum=0, maximum=6),
'queued': specs.BoundedArray((), jnp.int32, minimum=0, maximum=2),
'repeat': specs.BoundedArray((), jnp.int32, minimum=0, maximum=4),
'unit_tags': specs.BoundedArray(
(max_num_selected_units,), jnp.int32, minimum=0, maximum=10),
'target_unit_tag': specs.BoundedArray(
(), jnp.int32, minimum=0, maximum=10),
'world': specs.BoundedArray((), jnp.int32, minimum=0, maximum=25)}
input_spec = {
'observation': obs_spec,
'step_type': specs.BoundedArray((), jnp.int32, minimum=0, maximum=2)}
if is_training:
input_spec['behaviour_features'] = {'action': action_spec}
alphastar = dummy.get_alphastar_dummy(
input_spec=input_spec,
action_spec=action_spec,
is_training=is_training,
overlap_len=0,
burnin_len=0)
def make_input(spec):
return jnp.zeros((batch_size, unroll_len) + spec.shape, spec.dtype)
inputs = jax.tree_map(make_input, alphastar.input_spec)
rng_key = jax.random.PRNGKey(42)
initial_state_init, initial_state_apply = hk.transform(
jax.vmap(alphastar.initial_state, axis_size=batch_size))
initial_state = initial_state_apply(initial_state_init(rng_key), rng_key)
forward_init, forward_apply = hk.transform(
jax.vmap(alphastar.unroll))
params = forward_init(rng_key, inputs, initial_state)
outputs, next_state, _ = forward_apply(
params, rng_key, inputs, initial_state)
for v in outputs.values():
self.assertEqual(v.shape[:2], (batch_size, unroll_len))
alphastar.output_spec.validate(outputs, num_leading_dims_to_ignore=2)
for v in next_state.values():
self.assertEqual(v.shape[0], batch_size)
alphastar.next_state_spec.validate(next_state, num_leading_dims_to_ignore=1)
if __name__ == '__main__':
absltest.main()
| alphastar-main | alphastar/architectures/dummy/dummy_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AlphaStar dummy (v3) architecture.
Minimal architecture, should not train (or terribly) but the interface
is correct.
"""
from typing import Mapping, Optional
from alphastar import types
from alphastar.architectures import modular
from alphastar.architectures import util
from alphastar.architectures.components import common
from alphastar.architectures.components import units
from alphastar.architectures.components import vector
from alphastar.commons import sample
import jax.numpy as jnp
def _get_vector_head(argument_name: types.ArgumentName,
action_spec: types.ActionSpec,
vector_stream_size: int,
is_training: bool,
sample_fn: sample.SampleFn
) -> modular.Component:
"""Produces logits and action for a vector argument."""
num_logits = action_spec[argument_name].maximum + 1
component = modular.SequentialComponent(name=f"{argument_name}_head")
component.append(vector.Logits(
name=f"{argument_name}_logits",
logits_output_name=("logits", argument_name),
mask_output_name=("masks", argument_name),
num_logits=num_logits,
input_size=vector_stream_size,
input_name="vector_stream",
num_linear_layers=1))
if is_training:
component.append(common.ActionFromBehaviourFeatures(
name=f"{argument_name}_action",
argument_name=argument_name))
else:
component.append(common.Sample(
name=f"{argument_name}_sample",
argument_name=argument_name,
num_logits=num_logits,
sample_fn=sample_fn))
if argument_name == util.Argument.FUNCTION:
component.append(common.ArgumentMasks(
name="argument_masks",
action_spec=action_spec))
return component
def _get_unit_tags_head(action_spec: types.ActionSpec,
vector_stream_size: int,
is_training: bool,
sample_fn: sample.SampleFn) -> modular.Component:
"""Produces logits and action for unit_tag argument."""
num_logits = action_spec["unit_tags"].maximum + 1
max_num_selected_units = action_spec["unit_tags"].shape[0]
max_num_observed_units = int(action_spec["unit_tags"].maximum)
inner_component = modular.SequentialComponent(
name="unit_tags_inner_component")
inner_component.append(vector.Logits(
name="unit_tags_logits",
logits_output_name=("logits", util.Argument.UNIT_TAGS),
mask_output_name=("masks", util.Argument.UNIT_TAGS),
num_logits=num_logits,
input_size=vector_stream_size,
input_name="vector_stream",
num_linear_layers=1))
if is_training:
inner_component.append(common.ActionFromBehaviourFeatures(
name="unit_tags_action",
argument_name=util.Argument.UNIT_TAGS))
else:
inner_component.append(common.Sample(
name="unit_tags_sample",
argument_name=util.Argument.UNIT_TAGS,
num_logits=num_logits,
sample_fn=sample_fn))
unit_tags_head_per_step_inputs = []
if is_training:
unit_tags_head_per_step_inputs.append(
("behaviour_features", "action", util.Argument.UNIT_TAGS))
return units.UnitTagsHead(
name="unit_tags_head",
inner_component=inner_component,
constant_inputs=["vector_stream"],
carries=[],
per_step_inputs=unit_tags_head_per_step_inputs,
per_step_outputs=[("logits", util.Argument.UNIT_TAGS),
("masks", util.Argument.UNIT_TAGS),
("action", util.Argument.UNIT_TAGS)],
max_num_selected_units=max_num_selected_units,
max_num_observed_units=max_num_observed_units,
action_output=("action", util.Argument.UNIT_TAGS))
def get_alphastar_dummy(
input_spec: types.SpecDict,
action_spec: types.ActionSpec,
is_training: bool,
overlap_len: int,
burnin_len: int,
sample_fns: Optional[Mapping[types.ArgumentName, sample.SampleFn]] = None,
name: str = "alpha_star_dummy",
**unused_kwargs
) -> modular.Component:
"""Returns the alphastar dummy architecture."""
del overlap_len
del burnin_len
obs_spec = input_spec.get("observation")
vector_stream_size = 1024
if sample_fns is None:
sample_fns = {k: sample.sample for k in action_spec}
component = modular.SequentialComponent(name=name)
# Encoders:
component.append(vector.VectorEncoder(
name="player_encoder",
input_name=("observation", "player"),
output_name="vector_stream",
num_features=obs_spec["player"].shape[0],
output_size=vector_stream_size,
fun=jnp.log1p))
# Heads:
for arg in [util.Argument.FUNCTION,
util.Argument.DELAY,
util.Argument.QUEUED,
util.Argument.REPEAT,
util.Argument.TARGET_UNIT_TAG,
util.Argument.WORLD]:
component.append(_get_vector_head(
argument_name=arg,
action_spec=action_spec,
vector_stream_size=vector_stream_size,
is_training=is_training,
sample_fn=sample_fns[arg]))
component.append(_get_unit_tags_head(
action_spec=action_spec,
vector_stream_size=vector_stream_size,
is_training=is_training,
sample_fn=sample_fns[util.Argument.UNIT_TAGS]))
return component
| alphastar-main | alphastar/architectures/dummy/dummy.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Torso blocks."""
from typing import Sequence
from alphastar import types
from alphastar.architectures import modular
from alphastar.architectures import util
from alphastar.architectures.components import merge
from alphastar.architectures.components import units
from alphastar.architectures.components import vector
from alphastar.architectures.components import visual
import ml_collections
def get_torso(obs_spec: types.ObsSpec,
action_spec: types.ActionSpec,
vector_stream_size: int,
units_stream_size: int,
visual_stream_sizes: Sequence[int],
config: ml_collections.ConfigDict,
) -> modular.Component:
"""Gets the agent torso, where streams are combined and processed."""
max_num_observed_units = obs_spec["raw_units"].shape[0]
num_raw_unit_features = obs_spec["raw_units"].shape[1]
spatial_size = obs_spec["minimap_height_map"].shape[0]
first_downscale_factor = config.visual_downscale.downscale_factor
component = modular.SequentialComponent(name="torso")
component.append(vector.Resnet(
name="vector_resnet_1",
input_size=vector_stream_size,
input_name="vector_stream",
output_name="vector_stream",
**config.vector_resnet_1))
component.append(units.Transformer(
name="units_transformer",
max_num_observed_units=obs_spec["raw_units"].shape[0],
units_stream_size=units_stream_size,
input_name="units_stream",
output_name="units_stream",
**config.units_transformer))
component.append(units.ToVisualScatter(
name="scatter",
input_name="units_stream",
output_name=f"visual_stream_ds{first_downscale_factor}_from_units",
max_num_observed_units=max_num_observed_units,
num_raw_unit_features=num_raw_unit_features,
units_stream_size=units_stream_size,
units_world_dim=util.get_world_size(action_spec),
output_spatial_size=spatial_size // first_downscale_factor,
output_features_size=visual_stream_sizes[0],
**config.scatter))
component.append(merge.SumMerge(
name="scatter_merge",
input_names=[f"visual_stream_ds{first_downscale_factor}",
f"visual_stream_ds{first_downscale_factor}_from_units"],
output_name=f"visual_stream_ds{first_downscale_factor}",
stream_shape=(spatial_size // first_downscale_factor,
spatial_size // first_downscale_factor,
visual_stream_sizes[0])))
for i in range(len(visual_stream_sizes) - 1):
downscale_factor = config.visual_downscale.downscale_factor
input_downscale_factor = int(downscale_factor**(i + 1))
output_downscale_factor = int(downscale_factor**(i + 2))
component.append(visual.Downscale(
name=f"visual_downscale_ds{output_downscale_factor}",
input_name=f"visual_stream_ds{input_downscale_factor}",
output_name=f"visual_stream_ds{output_downscale_factor}",
input_spatial_size=spatial_size // input_downscale_factor,
input_features_size=visual_stream_sizes[i],
output_features_size=visual_stream_sizes[i + 1],
**config.visual_downscale))
component.append(visual.Resnet(
name="visual_resnet",
input_name=f"visual_stream_ds{output_downscale_factor}",
output_name=f"visual_stream_ds{output_downscale_factor}",
input_spatial_size=spatial_size // output_downscale_factor,
input_features_size=visual_stream_sizes[2],
**config.visual_resnet))
component.append(visual.ToVector(
name="visual_to_vector",
input_name=f"visual_stream_ds{output_downscale_factor}",
output_name="vector_stream_from_visual",
input_spatial_size=spatial_size // output_downscale_factor,
input_features_size=visual_stream_sizes[2],
vector_stream_size=vector_stream_size,
**config.visual_to_vector))
component.append(units.ToVector(
name="torso_units_to_vector",
input_name="units_stream",
output_name="vector_stream_from_units",
max_num_observed_units=max_num_observed_units,
units_stream_size=units_stream_size,
units_hidden_sizes=(units_stream_size * 2,),
vector_stream_size=vector_stream_size))
component.append(merge.VectorMerge(
name="vector_merge",
input_sizes={"vector_stream": vector_stream_size,
"vector_stream_from_units": vector_stream_size,
"vector_stream_from_visual": vector_stream_size},
output_name="vector_stream",
output_size=vector_stream_size,
**config.vector_merge))
component.append(vector.Resnet(
name="vector_resnet_2",
input_size=vector_stream_size,
input_name="vector_stream",
output_name="vector_stream",
**config.vector_resnet_2))
return component
| alphastar-main | alphastar/architectures/standard/torso.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AlphaStar standard v3 architecture."""
from alphastar import types
from alphastar.architectures import modular
from alphastar.architectures import util
from alphastar.architectures.standard import encoders
from alphastar.architectures.standard import heads
from alphastar.architectures.standard import torso
import ml_collections
def get_alphastar_standard(
input_spec: types.SpecDict,
action_spec: types.ActionSpec,
is_training: bool,
overlap_len: int,
burnin_len: int,
config: ml_collections.ConfigDict,
name: str = 'alpha_star'
) -> modular.Component:
"""Returns the alphastar lite architecture."""
del burnin_len
obs_spec = input_spec.get('observation')
component = modular.SequentialComponent(name=name)
# Encoders:
component.append(encoders.get_vector_encoder(
obs_spec=obs_spec,
action_spec=action_spec,
is_training=is_training,
vector_stream_size=config.vector_stream_size,
config=config.encoders.vector))
component.append(encoders.get_units_encoder(
obs_spec=obs_spec,
action_spec=action_spec,
units_stream_size=config.units_stream_size,
config=config.encoders.units))
component.append(encoders.get_visual_encoder(
obs_spec=obs_spec,
action_spec=action_spec,
visual_features_size=config.visual_stream_sizes[0],
config=config.encoders.visual))
# Torso:
component.append(torso.get_torso(
obs_spec=obs_spec,
action_spec=action_spec,
vector_stream_size=config.vector_stream_size,
units_stream_size=config.units_stream_size,
visual_stream_sizes=config.visual_stream_sizes,
config=config.torso))
# Heads:
for arg in [util.Argument.FUNCTION,
util.Argument.DELAY,
util.Argument.QUEUED,
util.Argument.REPEAT]:
component.append(heads.get_vector_head(
argument_name=arg,
action_spec=action_spec,
vector_stream_size=config.vector_stream_size,
is_training=is_training,
overlap_len=overlap_len,
config=config.heads[arg]))
component.append(heads.get_unit_tags_head(
obs_spec=obs_spec,
action_spec=action_spec,
vector_stream_size=config.vector_stream_size,
units_stream_size=config.units_stream_size,
is_training=is_training,
config=config.heads.unit_tags))
component.append(heads.get_target_unit_tag_head(
obs_spec=obs_spec,
action_spec=action_spec,
vector_stream_size=config.vector_stream_size,
units_stream_size=config.units_stream_size,
is_training=is_training,
config=config.heads.target_unit_tag))
component.append(heads.get_world_head(
obs_spec=obs_spec,
action_spec=action_spec,
vector_stream_size=config.vector_stream_size,
visual_stream_sizes=config.visual_stream_sizes,
is_training=is_training,
config=config.heads.world))
return component
| alphastar-main | alphastar/architectures/standard/standard.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| alphastar-main | alphastar/architectures/standard/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoder blocks."""
from alphastar import types
from alphastar.architectures import modular
from alphastar.architectures import util
from alphastar.architectures.components import common
from alphastar.architectures.components import merge
from alphastar.architectures.components import units
from alphastar.architectures.components import vector
from alphastar.architectures.components import visual
import jax.numpy as jnp
import ml_collections
def _get_prev_action_encoder(action_spec: types.ActionSpec,
argument_name: types.ArgumentName,
is_training: bool,
vector_stream_size: int,
) -> modular.Component:
"""Gets encoder for a single previous (scalar) action."""
component = modular.SequentialComponent(
name=f"prev_{argument_name}_encoder")
component.append(common.FeatureFromPrevState(
name=f"{argument_name}_from_prev_state",
input_name=("action", argument_name),
output_name=("prev_action", argument_name),
is_training=is_training,
stream_shape=(),
stream_dtype=jnp.int32))
if argument_name == util.Argument.FUNCTION:
component.append(common.ArgumentMasks(
name="argument_masks",
input_name=("prev_action", util.Argument.FUNCTION),
output_name="prev_argument_masks",
action_spec=action_spec))
component.append(vector.Embedding(
name=f"prev_{argument_name}_embedding",
input_name=("prev_action", argument_name),
output_name=("prev_action_embeddings", argument_name),
num_classes=action_spec[argument_name].maximum + 1,
output_size=vector_stream_size,
mask_name=("prev_argument_masks", argument_name)))
return component
def get_vector_encoder(obs_spec: types.ObsSpec,
action_spec: types.ActionSpec,
is_training: bool,
vector_stream_size: int,
config: ml_collections.ConfigDict
) -> modular.Component:
"""The full vector encoder, encodes the vector inputs into vector_stream."""
component = modular.SequentialComponent(name="vector_encoder")
component.append(vector.ClockFeatureEncoder(
name="game_loop",
input_name=("observation", "game_loop"),
output_name="game_loop_embedding",
output_size=vector_stream_size,
**config.game_loop))
component.append(vector.VectorEncoder(
name="unit_counts_bow",
input_name=("observation", "unit_counts_bow"),
output_name="unit_counts_bow_embedding",
num_features=obs_spec["unit_counts_bow"].shape[0],
output_size=vector_stream_size,
**config.unit_counts_bow))
component.append(vector.VectorEncoder(
name="player",
input_name=("observation", "player"),
output_name="player_embedding",
num_features=obs_spec["player"].shape[0],
output_size=vector_stream_size,
**config.player))
component.append(vector.Embedding(
name="mmr",
input_name=("observation", "mmr"),
output_name="mmr_embedding",
output_size=vector_stream_size,
**config.mmr))
component.append(vector.Embedding(
name="home_race_requested",
input_name=("observation", "home_race_requested"),
output_name="home_race_req_embedding",
num_classes=5,
output_size=vector_stream_size))
component.append(vector.Embedding(
name="away_race_requested",
input_name=("observation", "away_race_requested"),
output_name="away_race_req_embedding",
num_classes=5,
output_size=vector_stream_size))
component.append(vector.Embedding(
name="away_race_observed",
input_name=("observation", "away_race_observed"),
output_name="away_race_obs_embedding",
num_classes=5,
output_size=vector_stream_size))
component.append(vector.FixedLengthToMask(
name="upgrades_to_mask_encoder",
input_name=("observation", "upgrades_fixed_length"),
output_name="upgrades_boolean_mask",
input_size=obs_spec["upgrades_fixed_length"].shape[0],
num_classes=obs_spec["upgrades_fixed_length"].maximum + 1))
component.append(vector.BinaryVectorEmbedding(
name="upgrades_encoder",
input_name="upgrades_boolean_mask",
output_name="upgrades_embedding",
input_size=obs_spec["upgrades_fixed_length"].maximum + 1,
output_size=vector_stream_size))
prev_actions_embeddings = []
for arg in [util.Argument.FUNCTION,
util.Argument.DELAY,
util.Argument.QUEUED,
util.Argument.REPEAT]:
component.append(_get_prev_action_encoder(
action_spec=action_spec,
argument_name=arg,
is_training=is_training,
vector_stream_size=vector_stream_size))
prev_actions_embeddings.append(("prev_action_embeddings", arg))
component.append(merge.SumMerge(
name="vector_encoder_merge",
input_names=["game_loop_embedding", "unit_counts_bow_embedding",
"player_embedding", "mmr_embedding",
"home_race_req_embedding", "away_race_req_embedding",
"away_race_obs_embedding",
"upgrades_embedding"] + prev_actions_embeddings,
output_name="vector_stream",
stream_shape=(vector_stream_size,)))
return component
def get_units_encoder(obs_spec: types.ObsSpec,
action_spec: types.ActionSpec,
units_stream_size: int,
config: ml_collections.ConfigDict
) -> modular.Component:
"""The full units encoder, encodes the units inputs into units_stream."""
component = modular.SequentialComponent(name="units_encoder")
component.append(units.UnitsEncoder(
name="raw_units",
output_name="units_stream",
max_num_observed_units=obs_spec["raw_units"].shape[0],
num_raw_unit_features=obs_spec["raw_units"].shape[1],
units_stream_size=units_stream_size,
action_spec=action_spec,
**config.raw_units))
return component
def get_visual_encoder(obs_spec: types.ObsSpec,
action_spec: types.ActionSpec,
visual_features_size: int,
config: ml_collections.ConfigDict
) -> modular.Component:
"""The full visual encoder, encodes the visual inputs into visual_stream."""
world_size = util.get_world_size(action_spec)
minimap_features = [
"minimap_height_map",
"minimap_visibility_map",
"minimap_creep",
"minimap_player_relative",
"minimap_alerts",
"minimap_pathable",
"minimap_buildable"]
spatial_size = obs_spec[minimap_features[0]].shape[0]
# We assume all visual features have the same spatial size:
for feature_name in minimap_features:
assert obs_spec[feature_name].shape[0] == spatial_size
assert obs_spec[feature_name].shape[1] == spatial_size
component = modular.SequentialComponent(name="visual_encoder")
streams_to_merge = []
for feature_name in minimap_features:
if feature_name == "minimap_height_map":
component.append(visual.FeatureEncoder(
name="minimap_height_map",
input_name=("observation", feature_name),
output_name=f"{feature_name}_embedding",
input_spatial_size=spatial_size,
input_feature_size=None,
downscale_factor=config.downscale_factor,
output_features_size=visual_features_size,
**config.minimap_height_map))
else:
component.append(visual.Embedding(
name=feature_name,
input_name=("observation", feature_name),
output_name=f"{feature_name}_embedding",
input_spatial_size=spatial_size,
downscale_factor=config.downscale_factor,
num_classes=obs_spec[feature_name].maximum + 1,
output_features_size=visual_features_size,
**config[feature_name]))
streams_to_merge.append(f"{feature_name}_embedding")
component.append(visual.CameraEncoder(
name="camera_encoder",
output_name="camera_embedding",
input_spatial_size=world_size,
downscale_factor=config.downscale_factor * world_size // spatial_size,
output_features_size=visual_features_size))
streams_to_merge.append("camera_embedding")
component.append(merge.SumMerge(
name="visual_encoder_merge",
input_names=streams_to_merge,
output_name=f"visual_stream_ds{config.downscale_factor}",
stream_shape=(spatial_size // config.downscale_factor,
spatial_size // config.downscale_factor,
visual_features_size)))
return component
| alphastar-main | alphastar/architectures/standard/encoders.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dummy."""
from absl.testing import absltest
from absl.testing import parameterized
from alphastar.architectures.components import test_utils
from alphastar.architectures.standard import standard
from alphastar.architectures.standard.configs import lite as config_lite
class LiteTest(test_utils.ComponentTest):
"""Basic tests for the standard architecture."""
@parameterized.parameters(True, False)
def test_forward(self, is_training: bool):
"""Test that the forward pass does not crash, and has correct shapes."""
batch_size = 2
unroll_len = 3 if is_training else 1
overlap_len = 1 if is_training else 0
burnin_len = 0
input_spec, action_spec = test_utils.get_test_specs(is_training)
if is_training:
input_spec['behaviour_features'] = {'action': action_spec}
alphastar = standard.get_alphastar_standard(
input_spec=input_spec,
action_spec=action_spec,
is_training=is_training,
overlap_len=overlap_len,
burnin_len=burnin_len,
config=config_lite.get_config())
self._test_component(
alphastar, batch_size=batch_size, unroll_len=unroll_len)
if __name__ == '__main__':
absltest.main()
| alphastar-main | alphastar/architectures/standard/standard_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Head (logits and sampling) blocks."""
from typing import Sequence
from alphastar import types
from alphastar.architectures import modular
from alphastar.architectures import util
from alphastar.architectures.components import common
from alphastar.architectures.components import merge
from alphastar.architectures.components import units
from alphastar.architectures.components import vector
from alphastar.architectures.components import visual
import jax.numpy as jnp
import ml_collections
def _get_sampling(argument_name: types.ArgumentName,
num_logits: int,
is_training: bool,
config: ml_collections.ConfigDict) -> modular.Component:
"""Sampling module during inference, or teacher forcing during training."""
if is_training:
return common.ActionFromBehaviourFeatures(
name="action_from_behaviour_features",
argument_name=argument_name,
max_action_value=num_logits - 1)
else:
return common.Sample(
name="sample",
argument_name=argument_name,
num_logits=num_logits,
**config.sample)
def get_vector_head(argument_name: types.ArgumentName,
action_spec: types.ActionSpec,
vector_stream_size: int,
is_training: bool,
overlap_len: int,
config: ml_collections.ConfigDict
) -> modular.Component:
"""Produce logits and action for an argument and embed it to vector_stream."""
num_logits = action_spec[argument_name].maximum + 1
component = modular.SequentialComponent(name=f"{argument_name}_head")
component.append(vector.Resnet(
name="resnet",
input_size=vector_stream_size,
input_name="vector_stream",
output_name=f"{argument_name}_logits_vector_stream",
**config.resnet))
component.append(vector.Logits(
name="logits",
logits_output_name=("logits", argument_name),
mask_output_name=("masks", argument_name),
num_logits=num_logits,
input_size=vector_stream_size,
input_name=f"{argument_name}_logits_vector_stream",
**config.logits))
component.append(_get_sampling(
argument_name=argument_name,
num_logits=num_logits,
is_training=is_training,
config=config.sampling))
if argument_name == util.Argument.FUNCTION:
component.append(common.ArgumentMasks(
name="argument_masks",
action_spec=action_spec))
component.append(common.FeatureToNextState(
name="action_to_next_state",
input_name=("action", argument_name),
output_name=("action", argument_name),
stream_shape=(),
stream_dtype=jnp.int32,
overlap_len=overlap_len))
component.append(vector.Embedding(
name="action_embedding",
input_name=("action", argument_name),
output_name=f"{argument_name}_embedding",
mask_name=("argument_masks", argument_name),
num_classes=num_logits,
output_size=vector_stream_size))
component.append(merge.VectorMerge(
name="embedding_merge",
input_sizes={"vector_stream": vector_stream_size,
f"{argument_name}_embedding": vector_stream_size},
output_name="vector_stream",
output_size=vector_stream_size,
**config.embedding_merge))
return component
def get_unit_tags_head(obs_spec: types.ObsSpec,
action_spec: types.ActionSpec,
vector_stream_size: int,
units_stream_size: int,
is_training: bool,
config: ml_collections.ConfigDict) -> modular.Component:
"""Produce logits and action for unit_tags and embed it to vector_stream."""
num_logits = action_spec["unit_tags"].maximum + 1
max_num_selected_units = action_spec["unit_tags"].shape[0]
max_num_observed_units = obs_spec["raw_units"].shape[0]
assert num_logits == max_num_observed_units + 1
inner_component = modular.SequentialComponent(name="inner_component")
inner_component.append(units.PointerLogits(
name="logits",
max_num_observed_units=max_num_observed_units,
num_raw_unit_features=obs_spec["raw_units"].shape[1],
logits_output_name=("pre_logits", util.Argument.UNIT_TAGS),
mask_output_name=("pre_masks", util.Argument.UNIT_TAGS),
query_input_size=vector_stream_size,
keys_input_size=config.keys_mlp.layer_sizes[-1],
key_size=config.keys_mlp.layer_sizes[-1],
unit_tags_masking=units.UnitTagsMasking.SELECTABLE,
query_input_name="unit_tags_query",
keys_input_name="unit_tags_keys",
**config.inner_component.logits))
inner_component.append(units.FinalizeUnitTagsLogits(
name="finalize_unit_tags_logits",
input_logits_name=("pre_logits", util.Argument.UNIT_TAGS),
input_mask_name=("pre_masks", util.Argument.UNIT_TAGS),
output_logits_name=("logits", util.Argument.UNIT_TAGS),
output_mask_name=("masks", util.Argument.UNIT_TAGS),
max_num_observed_units=max_num_observed_units,
vector_input_name="unit_tags_query",
vector_input_size=vector_stream_size))
inner_component.append(_get_sampling(
argument_name=util.Argument.UNIT_TAGS,
num_logits=num_logits,
is_training=is_training,
config=config.inner_component.sampling))
inner_component.append(units.PointerEmbedding(
name="embedding",
num_embeddings=max_num_observed_units,
embeddings_size=config.small_embeddings_mlp.layer_sizes[-1],
index_input_name=("action", util.Argument.UNIT_TAGS),
embeddings_input_name="small_unit_tag_embeddings",
output_name="unit_tags_embedding"))
inner_component.append(merge.VectorMerge(
name="embedding_merge",
input_sizes={
"unit_tags_query": vector_stream_size,
"unit_tags_embedding": config.small_embeddings_mlp.layer_sizes[-1]},
output_name="unit_tags_query",
output_size=vector_stream_size,
**config.inner_component.embedding_merge))
component = modular.SequentialComponent(name="unit_tag_head")
component.append(vector.Resnet(
name="query_resnet",
input_size=vector_stream_size,
input_name="vector_stream",
output_name="unit_tags_query",
**config.query_resnet))
component.append(units.MLP(
name="keys_mlp",
max_num_observed_units=max_num_observed_units,
units_stream_size=units_stream_size,
input_name="units_stream",
output_name="unit_tags_keys",
**config.keys_mlp))
component.append(units.MLP(
name="small_embeddings_mlp",
max_num_observed_units=max_num_observed_units,
units_stream_size=units_stream_size,
input_name="units_stream",
output_name="small_unit_tag_embeddings",
**config.small_embeddings_mlp))
component.append(units.MLP(
name="large_embeddings_mlp",
max_num_observed_units=max_num_observed_units,
units_stream_size=units_stream_size,
input_name="units_stream",
output_name="large_unit_tag_embeddings",
**config.large_embeddings_mlp))
unit_tags_head_per_step_inputs = []
if is_training:
unit_tags_head_per_step_inputs.append(
("behaviour_features", "action", util.Argument.UNIT_TAGS))
component.append(units.UnitTagsHead(
name="recurrent_unit_tags_head",
inner_component=inner_component,
constant_inputs=["unit_tags_keys",
"small_unit_tag_embeddings",
("observation", "raw_units")],
carries=["unit_tags_query"],
per_step_inputs=unit_tags_head_per_step_inputs,
per_step_outputs=[("logits", util.Argument.UNIT_TAGS),
("masks", util.Argument.UNIT_TAGS),
("action", util.Argument.UNIT_TAGS)],
max_num_selected_units=max_num_selected_units,
max_num_observed_units=max_num_observed_units,
action_output=("action", util.Argument.UNIT_TAGS)))
component.append(units.BinaryVectorPointerEmbedding(
name="embedding",
num_embeddings=max_num_observed_units,
embeddings_size=config.large_embeddings_mlp.layer_sizes[-1],
mask_input_name="selected_unit_tags",
embeddings_input_name="large_unit_tag_embeddings",
output_name="unit_tags_embedding"))
component.append(merge.VectorMerge(
name="embedding_merge",
input_sizes={
"vector_stream": vector_stream_size,
"unit_tags_embedding": config.large_embeddings_mlp.layer_sizes[-1]},
output_name="vector_stream",
output_size=vector_stream_size,
**config.embedding_merge))
component.append(merge.UnitsMerge(
name="selected_units_merge",
input_sizes={"units_stream": units_stream_size,
"selected_unit_tags": None},
input_dtypes={"selected_unit_tags": jnp.bool_},
max_num_observed_units=max_num_observed_units,
output_name="units_stream_after_embedding",
output_size=units_stream_size,
**config.selected_units_merge))
return component
def get_target_unit_tag_head(obs_spec: types.ObsSpec,
action_spec: types.ActionSpec,
vector_stream_size: int,
units_stream_size: int,
is_training: bool,
config: ml_collections.ConfigDict
) -> modular.Component:
"""Produce logits and action for target_unit_tag."""
num_logits = action_spec["target_unit_tag"].maximum + 1
assert num_logits == obs_spec["raw_units"].shape[0]
component = modular.SequentialComponent(name="target_unit_tag_head")
component.append(vector.Resnet(
name="resnet",
input_size=vector_stream_size,
input_name="vector_stream",
output_name="target_unit_tag_query",
**config.resnet))
component.append(units.PointerLogits(
name="logits",
max_num_observed_units=obs_spec["raw_units"].shape[0],
num_raw_unit_features=obs_spec["raw_units"].shape[1],
logits_output_name=("logits", util.Argument.TARGET_UNIT_TAG),
mask_output_name=("masks", util.Argument.TARGET_UNIT_TAG),
query_input_size=vector_stream_size,
keys_input_size=units_stream_size,
unit_tags_masking=units.UnitTagsMasking.TARGETABLE_WITH_CAMERA,
query_input_name="target_unit_tag_query",
keys_input_name="units_stream_after_embedding",
**config.logits))
component.append(_get_sampling(
argument_name=util.Argument.TARGET_UNIT_TAG,
num_logits=num_logits,
is_training=is_training,
config=config.sampling))
return component
def get_world_head(obs_spec: types.ObsSpec,
action_spec: types.ActionSpec,
vector_stream_size: int,
visual_stream_sizes: Sequence[int],
is_training: bool,
config: ml_collections.ConfigDict
) -> modular.Component:
"""Produce logits and action for an argument and embed it to vector stream."""
num_logits = action_spec["world"].maximum + 1
world_size = util.get_world_size(action_spec)
spatial_size = obs_spec["minimap_height_map"].shape[0]
component = modular.SequentialComponent(name="world_head")
full_downscale_factor = (
config.visual_upscale.upscale_factor ** len(visual_stream_sizes))
component.append(vector.ToVisual(
name="vector_to_visual",
input_name="vector_stream",
output_name=f"visual_stream_ds{full_downscale_factor}_from_vector",
input_size=vector_stream_size,
output_spatial_size=spatial_size // full_downscale_factor,
output_features_size=visual_stream_sizes[-1],
**config.vector_to_visual))
component.append(merge.SumMerge(
name="vector_to_visual_merge",
input_names=[
f"visual_stream_ds{full_downscale_factor}_from_vector",
f"visual_stream_ds{full_downscale_factor}"],
output_name=f"visual_stream_ds{full_downscale_factor}",
stream_shape=(spatial_size // full_downscale_factor,
spatial_size // full_downscale_factor,
visual_stream_sizes[-1]),
))
component.append(visual.Resnet(
name="resnet",
input_name=f"visual_stream_ds{full_downscale_factor}",
output_name=f"visual_stream_ds{full_downscale_factor}",
input_spatial_size=spatial_size // full_downscale_factor,
input_features_size=visual_stream_sizes[-1],
**config.resnet))
for i in range(len(visual_stream_sizes) - 1, 0, -1):
us = config.visual_upscale.upscale_factor
ids = int(us**(i + 1)) # input downsampling factor
ods = int(us**i) # output downsampling factor
component.append(visual.Upscale(
name=f"downscale_ds{ods}",
input_name=f"visual_stream_ds{ids}",
output_name=f"visual_stream_ds{ods}_upsample",
input_spatial_size=spatial_size // ids,
input_features_size=visual_stream_sizes[i],
output_features_size=visual_stream_sizes[i - 1],
**config.visual_upscale))
component.append(merge.SumMerge(
name=f"visual_head_merge_ds{ods}",
input_names=[
f"visual_stream_ds{ods}", f"visual_stream_ds{ods}_upsample"],
output_name=f"visual_stream_ds{ods}",
stream_shape=(spatial_size // ods,
spatial_size // ods,
visual_stream_sizes[i - 1]),
))
component.append(visual.Upscale(
name="downscale_ds1",
input_name=f"visual_stream_ds{ods}",
output_name="visual_stream_ds1",
input_spatial_size=spatial_size // ods,
input_features_size=visual_stream_sizes[0],
output_features_size=config.visual_feature_size_ds1,
**config.visual_upscale))
component.append(visual.Logits(
name="logits",
input_name="visual_stream_ds1",
input_spatial_size=spatial_size,
input_features_size=config.visual_feature_size_ds1,
logits_output_name=("logits", util.Argument.WORLD),
mask_output_name=("masks", util.Argument.WORLD),
upscale_factor=world_size // spatial_size,
kernel_size=world_size // spatial_size,
**config.logits))
component.append(_get_sampling(
argument_name=util.Argument.WORLD,
num_logits=num_logits,
is_training=is_training,
config=config.sampling))
return component
| alphastar-main | alphastar/architectures/standard/heads.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| alphastar-main | alphastar/architectures/standard/configs/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config for the full version of the standard v3 architecture."""
from alphastar.architectures.components import merge
from alphastar.commons import sample
import jax.numpy as jnp
import ml_collections
def get_config() -> ml_collections.ConfigDict:
"""The config for the standard full architecture."""
config = ml_collections.ConfigDict()
config.encoders = ml_collections.ConfigDict()
config.encoders.vector = ml_collections.ConfigDict()
config.encoders.vector.game_loop = ml_collections.ConfigDict()
config.encoders.vector.unit_counts_bow = ml_collections.ConfigDict()
config.encoders.vector.player = ml_collections.ConfigDict()
config.encoders.vector.mmr = ml_collections.ConfigDict()
config.encoders.units = ml_collections.ConfigDict()
config.encoders.units.raw_units = ml_collections.ConfigDict()
config.encoders.visual = ml_collections.ConfigDict()
config.encoders.visual.minimap_height_map = ml_collections.ConfigDict()
config.encoders.visual.minimap_visibility_map = ml_collections.ConfigDict()
config.encoders.visual.minimap_creep = ml_collections.ConfigDict()
config.encoders.visual.minimap_player_relative = ml_collections.ConfigDict()
config.encoders.visual.minimap_alerts = ml_collections.ConfigDict()
config.encoders.visual.minimap_pathable = ml_collections.ConfigDict()
config.encoders.visual.minimap_buildable = ml_collections.ConfigDict()
config.torso = ml_collections.ConfigDict()
config.torso.vector_resnet_1 = ml_collections.ConfigDict()
config.torso.units_transformer = ml_collections.ConfigDict()
config.torso.scatter = ml_collections.ConfigDict()
config.torso.visual_downscale = ml_collections.ConfigDict()
config.torso.visual_resnet = ml_collections.ConfigDict()
config.torso.visual_to_vector = ml_collections.ConfigDict()
config.torso.units_to_vector = ml_collections.ConfigDict()
config.torso.vector_merge = ml_collections.ConfigDict()
config.torso.vector_resnet_2 = ml_collections.ConfigDict()
config.heads = ml_collections.ConfigDict()
config.heads.function = ml_collections.ConfigDict()
config.heads.function.resnet = ml_collections.ConfigDict()
config.heads.function.logits = ml_collections.ConfigDict()
config.heads.function.sampling = ml_collections.ConfigDict()
config.heads.function.sampling.sample = ml_collections.ConfigDict()
config.heads.function.embedding_merge = ml_collections.ConfigDict()
config.heads.delay = ml_collections.ConfigDict()
config.heads.delay.resnet = ml_collections.ConfigDict()
config.heads.delay.logits = ml_collections.ConfigDict()
config.heads.delay.sampling = ml_collections.ConfigDict()
config.heads.delay.sampling.sample = ml_collections.ConfigDict()
config.heads.delay.embedding_merge = ml_collections.ConfigDict()
config.heads.queued = ml_collections.ConfigDict()
config.heads.queued.resnet = ml_collections.ConfigDict()
config.heads.queued.logits = ml_collections.ConfigDict()
config.heads.queued.sampling = ml_collections.ConfigDict()
config.heads.queued.sampling.sample = ml_collections.ConfigDict()
config.heads.queued.embedding_merge = ml_collections.ConfigDict()
config.heads.repeat = ml_collections.ConfigDict()
config.heads.repeat.resnet = ml_collections.ConfigDict()
config.heads.repeat.logits = ml_collections.ConfigDict()
config.heads.repeat.sampling = ml_collections.ConfigDict()
config.heads.repeat.sampling.sample = ml_collections.ConfigDict()
config.heads.repeat.embedding_merge = ml_collections.ConfigDict()
config.heads.unit_tags = ml_collections.ConfigDict()
config.heads.unit_tags.query_resnet = ml_collections.ConfigDict()
config.heads.unit_tags.keys_mlp = ml_collections.ConfigDict()
config.heads.unit_tags.small_embeddings_mlp = ml_collections.ConfigDict()
config.heads.unit_tags.large_embeddings_mlp = ml_collections.ConfigDict()
config.heads.unit_tags.inner_component = ml_collections.ConfigDict()
config.heads.unit_tags.inner_component.logits = ml_collections.ConfigDict()
config.heads.unit_tags.inner_component.sampling = ml_collections.ConfigDict()
config.heads.unit_tags.inner_component.sampling.sample = (
ml_collections.ConfigDict())
config.heads.unit_tags.inner_component.embedding_merge = (
ml_collections.ConfigDict())
config.heads.unit_tags.embedding_merge = ml_collections.ConfigDict()
config.heads.unit_tags.selected_units_merge = ml_collections.ConfigDict()
config.heads.target_unit_tag = ml_collections.ConfigDict()
config.heads.target_unit_tag.resnet = ml_collections.ConfigDict()
config.heads.target_unit_tag.logits = ml_collections.ConfigDict()
config.heads.target_unit_tag.sampling = ml_collections.ConfigDict()
config.heads.target_unit_tag.sampling.sample = ml_collections.ConfigDict()
config.heads.world = ml_collections.ConfigDict()
config.heads.world.vector_to_visual = ml_collections.ConfigDict()
config.heads.world.resnet = ml_collections.ConfigDict()
config.heads.world.visual_upscale = ml_collections.ConfigDict()
config.heads.world.logits = ml_collections.ConfigDict()
config.heads.world.sampling = ml_collections.ConfigDict()
config.heads.world.sampling.sample = ml_collections.ConfigDict()
# Streams:
config.vector_stream_size = 1024
config.units_stream_size = 256
config.visual_stream_sizes = [64, 128, 128]
# Encoders:
config.encoders.vector.game_loop.encoding_size = 512
config.encoders.vector.game_loop.t_min = 1
config.encoders.vector.game_loop.t_max = 100_000
config.encoders.vector.unit_counts_bow.fun = jnp.sqrt
config.encoders.vector.player.fun = jnp.log1p
config.encoders.vector.mmr.num_classes = 7
config.encoders.vector.mmr.fun = lambda x: x * 0.001
# Note: these numbers are environment dependent:
config.encoders.units.raw_units.num_unit_types = 256
config.encoders.units.raw_units.num_buff_types = 46
config.encoders.visual.downscale_factor = 2
config.encoders.visual.minimap_height_map.fun = lambda x: x / 255.
config.encoders.visual.minimap_height_map.kernel_size = 2
config.encoders.visual.minimap_visibility_map.kernel_size = 2
config.encoders.visual.minimap_creep.kernel_size = 2
config.encoders.visual.minimap_player_relative.kernel_size = 2
config.encoders.visual.minimap_alerts.kernel_size = 2
config.encoders.visual.minimap_pathable.kernel_size = 2
config.encoders.visual.minimap_buildable.kernel_size = 2
# Torso:
config.torso.vector_resnet_1.num_resblocks = 4
config.torso.vector_resnet_1.use_layer_norm = True
config.torso.units_transformer.transformer_num_layers = 3
config.torso.units_transformer.transformer_num_heads = 2
config.torso.units_transformer.transformer_key_size = 128
config.torso.units_transformer.transformer_value_size = 128
config.torso.units_transformer.resblocks_num_before = 2
config.torso.units_transformer.resblocks_num_after = 2
config.torso.units_transformer.resblocks_hidden_size = 128
config.torso.units_transformer.use_layer_norm = True
config.torso.scatter.units_hidden_sizes = [64]
config.torso.scatter.kernel_size = 3
config.torso.scatter.use_layer_norm = True
config.torso.visual_downscale.downscale_factor = 2
config.torso.visual_downscale.kernel_size = 4
config.torso.visual_resnet.num_resblocks = 4
config.torso.visual_resnet.kernel_size = 3
config.torso.visual_resnet.use_layer_norm = True
config.torso.visual_resnet.num_hidden_feature_planes = None
config.torso.visual_to_vector.hidden_feature_sizes = [128]
config.torso.visual_to_vector.downscale_factor = 2
config.torso.visual_to_vector.use_layer_norm = True
config.torso.visual_to_vector.kernel_size = 4
config.torso.units_to_vector.units_hidden_sizes = [256, 1024]
config.torso.units_to_vector.use_layer_norm = True
config.torso.vector_merge.gating_type = merge.GatingType.POINTWISE
config.torso.vector_resnet_2.num_resblocks = 8
config.torso.vector_resnet_2.use_layer_norm = True
# Heads:
config.heads.function.resnet.num_resblocks = 16
config.heads.function.resnet.use_layer_norm = True
config.heads.function.logits.num_linear_layers = 1
config.heads.function.logits.use_layer_norm = True
config.heads.function.sampling.sample.sample_fn = sample.sample
config.heads.function.embedding_merge.gating_type = merge.GatingType.POINTWISE
config.heads.delay.resnet.num_resblocks = 8
config.heads.delay.resnet.use_layer_norm = True
config.heads.delay.logits.num_linear_layers = 1
config.heads.delay.logits.use_layer_norm = True
config.heads.delay.sampling.sample.sample_fn = sample.sample
config.heads.delay.embedding_merge.gating_type = merge.GatingType.POINTWISE
config.heads.queued.resnet.num_resblocks = 4
config.heads.queued.resnet.use_layer_norm = True
config.heads.queued.logits.num_linear_layers = 1
config.heads.queued.logits.use_layer_norm = True
config.heads.queued.sampling.sample.sample_fn = sample.sample
config.heads.queued.embedding_merge.gating_type = merge.GatingType.POINTWISE
config.heads.repeat.resnet.num_resblocks = 4
config.heads.repeat.resnet.use_layer_norm = True
config.heads.repeat.logits.num_linear_layers = 1
config.heads.repeat.logits.use_layer_norm = True
config.heads.repeat.sampling.sample.sample_fn = sample.sample
config.heads.repeat.embedding_merge.gating_type = merge.GatingType.POINTWISE
config.heads.unit_tags.query_resnet.num_resblocks = 8
config.heads.unit_tags.query_resnet.use_layer_norm = True
config.heads.unit_tags.keys_mlp.layer_sizes = [256, 64]
config.heads.unit_tags.small_embeddings_mlp.layer_sizes = [128, 32]
config.heads.unit_tags.large_embeddings_mlp.layer_sizes = [256, 256]
config.heads.unit_tags.inner_component.logits.num_layers_query = 1
config.heads.unit_tags.inner_component.logits.num_layers_keys = 0
config.heads.unit_tags.inner_component.logits.use_layer_norm = True
config.heads.unit_tags.inner_component.sampling.sample.sample_fn = sample.sample
config.heads.unit_tags.inner_component.embedding_merge.gating_type = merge.GatingType.NONE
config.heads.unit_tags.embedding_merge.gating_type = merge.GatingType.GLOBAL
config.heads.unit_tags.selected_units_merge.gating_type = merge.GatingType.GLOBAL
config.heads.target_unit_tag.resnet.num_resblocks = 8
config.heads.target_unit_tag.resnet.use_layer_norm = True
config.heads.target_unit_tag.logits.num_layers_query = 1
config.heads.target_unit_tag.logits.num_layers_keys = 3
config.heads.target_unit_tag.logits.key_size = 64
config.heads.target_unit_tag.logits.use_layer_norm = True
config.heads.target_unit_tag.sampling.sample.sample_fn = sample.sample
config.heads.world.visual_feature_size_ds1 = 8
config.heads.world.vector_to_visual.hidden_feature_sizes = [128, 128]
config.heads.world.vector_to_visual.upscale_factor = 2
config.heads.world.vector_to_visual.use_layer_norm = True
config.heads.world.vector_to_visual.kernel_size = 4
config.heads.world.resnet.num_resblocks = 4
config.heads.world.resnet.kernel_size = 3
config.heads.world.resnet.use_layer_norm = True
config.heads.world.resnet.num_hidden_feature_planes = None
config.heads.world.visual_upscale.upscale_factor = 2
config.heads.world.visual_upscale.kernel_size = 4
config.heads.world.logits.use_layer_norm = True
config.heads.world.logits.use_depth_to_space = True
config.heads.world.sampling.sample.sample_fn = sample.sample
return config
| alphastar-main | alphastar/architectures/standard/configs/full.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config for a "lite" version of the standard v3 architecture."""
from alphastar.architectures.components import merge
from alphastar.commons import sample
import jax.numpy as jnp
import ml_collections
def get_config() -> ml_collections.ConfigDict:
"""The config for the standard lite architecture."""
config = ml_collections.ConfigDict()
config.encoders = ml_collections.ConfigDict()
config.encoders.vector = ml_collections.ConfigDict()
config.encoders.vector.game_loop = ml_collections.ConfigDict()
config.encoders.vector.unit_counts_bow = ml_collections.ConfigDict()
config.encoders.vector.player = ml_collections.ConfigDict()
config.encoders.vector.mmr = ml_collections.ConfigDict()
config.encoders.units = ml_collections.ConfigDict()
config.encoders.units.raw_units = ml_collections.ConfigDict()
config.encoders.visual = ml_collections.ConfigDict()
config.encoders.visual.minimap_height_map = ml_collections.ConfigDict()
config.encoders.visual.minimap_visibility_map = ml_collections.ConfigDict()
config.encoders.visual.minimap_creep = ml_collections.ConfigDict()
config.encoders.visual.minimap_player_relative = ml_collections.ConfigDict()
config.encoders.visual.minimap_alerts = ml_collections.ConfigDict()
config.encoders.visual.minimap_pathable = ml_collections.ConfigDict()
config.encoders.visual.minimap_buildable = ml_collections.ConfigDict()
config.torso = ml_collections.ConfigDict()
config.torso.vector_resnet_1 = ml_collections.ConfigDict()
config.torso.units_transformer = ml_collections.ConfigDict()
config.torso.scatter = ml_collections.ConfigDict()
config.torso.visual_downscale = ml_collections.ConfigDict()
config.torso.visual_resnet = ml_collections.ConfigDict()
config.torso.visual_to_vector = ml_collections.ConfigDict()
config.torso.units_to_vector = ml_collections.ConfigDict()
config.torso.vector_merge = ml_collections.ConfigDict()
config.torso.vector_resnet_2 = ml_collections.ConfigDict()
config.heads = ml_collections.ConfigDict()
config.heads.function = ml_collections.ConfigDict()
config.heads.function.resnet = ml_collections.ConfigDict()
config.heads.function.logits = ml_collections.ConfigDict()
config.heads.function.sampling = ml_collections.ConfigDict()
config.heads.function.sampling.sample = ml_collections.ConfigDict()
config.heads.function.embedding_merge = ml_collections.ConfigDict()
config.heads.delay = ml_collections.ConfigDict()
config.heads.delay.resnet = ml_collections.ConfigDict()
config.heads.delay.logits = ml_collections.ConfigDict()
config.heads.delay.sampling = ml_collections.ConfigDict()
config.heads.delay.sampling.sample = ml_collections.ConfigDict()
config.heads.delay.embedding_merge = ml_collections.ConfigDict()
config.heads.queued = ml_collections.ConfigDict()
config.heads.queued.resnet = ml_collections.ConfigDict()
config.heads.queued.logits = ml_collections.ConfigDict()
config.heads.queued.sampling = ml_collections.ConfigDict()
config.heads.queued.sampling.sample = ml_collections.ConfigDict()
config.heads.queued.embedding_merge = ml_collections.ConfigDict()
config.heads.repeat = ml_collections.ConfigDict()
config.heads.repeat.resnet = ml_collections.ConfigDict()
config.heads.repeat.logits = ml_collections.ConfigDict()
config.heads.repeat.sampling = ml_collections.ConfigDict()
config.heads.repeat.sampling.sample = ml_collections.ConfigDict()
config.heads.repeat.embedding_merge = ml_collections.ConfigDict()
config.heads.unit_tags = ml_collections.ConfigDict()
config.heads.unit_tags.query_resnet = ml_collections.ConfigDict()
config.heads.unit_tags.keys_mlp = ml_collections.ConfigDict()
config.heads.unit_tags.small_embeddings_mlp = ml_collections.ConfigDict()
config.heads.unit_tags.large_embeddings_mlp = ml_collections.ConfigDict()
config.heads.unit_tags.inner_component = ml_collections.ConfigDict()
config.heads.unit_tags.inner_component.logits = ml_collections.ConfigDict()
config.heads.unit_tags.inner_component.sampling = ml_collections.ConfigDict()
config.heads.unit_tags.inner_component.sampling.sample = (
ml_collections.ConfigDict())
config.heads.unit_tags.inner_component.embedding_merge = (
ml_collections.ConfigDict())
config.heads.unit_tags.embedding_merge = ml_collections.ConfigDict()
config.heads.unit_tags.selected_units_merge = ml_collections.ConfigDict()
config.heads.target_unit_tag = ml_collections.ConfigDict()
config.heads.target_unit_tag.resnet = ml_collections.ConfigDict()
config.heads.target_unit_tag.logits = ml_collections.ConfigDict()
config.heads.target_unit_tag.sampling = ml_collections.ConfigDict()
config.heads.target_unit_tag.sampling.sample = ml_collections.ConfigDict()
config.heads.world = ml_collections.ConfigDict()
config.heads.world.vector_to_visual = ml_collections.ConfigDict()
config.heads.world.resnet = ml_collections.ConfigDict()
config.heads.world.visual_upscale = ml_collections.ConfigDict()
config.heads.world.logits = ml_collections.ConfigDict()
config.heads.world.sampling = ml_collections.ConfigDict()
config.heads.world.sampling.sample = ml_collections.ConfigDict()
# Streams:
config.vector_stream_size = 128
config.units_stream_size = 32
config.visual_stream_sizes = [4, 8, 16]
# Encoders:
config.encoders.vector.game_loop.encoding_size = 512
config.encoders.vector.game_loop.t_min = 1
config.encoders.vector.game_loop.t_max = 100_000
config.encoders.vector.unit_counts_bow.fun = jnp.sqrt
config.encoders.vector.player.fun = jnp.log1p
config.encoders.vector.mmr.num_classes = 7
config.encoders.vector.mmr.fun = lambda x: x * 0.001
# Note: these numbers are environment dependent:
config.encoders.units.raw_units.num_unit_types = 256
config.encoders.units.raw_units.num_buff_types = 46
config.encoders.visual.downscale_factor = 2
config.encoders.visual.minimap_height_map.fun = lambda x: x / 255.
config.encoders.visual.minimap_height_map.kernel_size = 2
config.encoders.visual.minimap_visibility_map.kernel_size = 2
config.encoders.visual.minimap_creep.kernel_size = 2
config.encoders.visual.minimap_player_relative.kernel_size = 2
config.encoders.visual.minimap_alerts.kernel_size = 2
config.encoders.visual.minimap_pathable.kernel_size = 2
config.encoders.visual.minimap_buildable.kernel_size = 2
# Torso:
config.torso.vector_resnet_1.num_resblocks = 2
config.torso.vector_resnet_1.use_layer_norm = True
config.torso.units_transformer.transformer_num_layers = 1
config.torso.units_transformer.transformer_num_heads = 2
config.torso.units_transformer.transformer_key_size = 32
config.torso.units_transformer.transformer_value_size = 16
config.torso.units_transformer.resblocks_num_before = 1
config.torso.units_transformer.resblocks_num_after = 1
config.torso.units_transformer.resblocks_hidden_size = None
config.torso.units_transformer.use_layer_norm = True
config.torso.scatter.units_hidden_sizes = [16]
config.torso.scatter.kernel_size = 3
config.torso.scatter.use_layer_norm = True
config.torso.visual_downscale.downscale_factor = 2
config.torso.visual_downscale.kernel_size = 2
config.torso.visual_resnet.num_resblocks = 2
config.torso.visual_resnet.kernel_size = 3
config.torso.visual_resnet.use_layer_norm = True
config.torso.visual_resnet.num_hidden_feature_planes = None
config.torso.visual_to_vector.hidden_feature_sizes = [32, 64]
config.torso.visual_to_vector.downscale_factor = 2
config.torso.visual_to_vector.use_layer_norm = True
config.torso.visual_to_vector.kernel_size = 2
config.torso.units_to_vector.units_hidden_sizes = [64]
config.torso.units_to_vector.use_layer_norm = True
config.torso.vector_merge.gating_type = merge.GatingType.POINTWISE
config.torso.vector_resnet_2.num_resblocks = 2
config.torso.vector_resnet_2.use_layer_norm = True
# Heads:
config.heads.function.resnet.num_resblocks = 4
config.heads.function.resnet.use_layer_norm = True
config.heads.function.logits.num_linear_layers = 1
config.heads.function.logits.use_layer_norm = True
config.heads.function.sampling.sample.sample_fn = sample.sample
config.heads.function.embedding_merge.gating_type = merge.GatingType.GLOBAL
config.heads.delay.resnet.num_resblocks = 2
config.heads.delay.resnet.use_layer_norm = True
config.heads.delay.logits.num_linear_layers = 1
config.heads.delay.logits.use_layer_norm = True
config.heads.delay.sampling.sample.sample_fn = sample.sample
config.heads.delay.embedding_merge.gating_type = merge.GatingType.GLOBAL
config.heads.queued.resnet.num_resblocks = 0
config.heads.queued.resnet.use_layer_norm = True
config.heads.queued.logits.num_linear_layers = 1
config.heads.queued.logits.use_layer_norm = True
config.heads.queued.sampling.sample.sample_fn = sample.sample
config.heads.queued.embedding_merge.gating_type = merge.GatingType.GLOBAL
config.heads.repeat.resnet.num_resblocks = 0
config.heads.repeat.resnet.use_layer_norm = True
config.heads.repeat.logits.num_linear_layers = 1
config.heads.repeat.logits.use_layer_norm = True
config.heads.repeat.sampling.sample.sample_fn = sample.sample
config.heads.repeat.embedding_merge.gating_type = merge.GatingType.GLOBAL
config.heads.unit_tags.query_resnet.num_resblocks = 1
config.heads.unit_tags.query_resnet.use_layer_norm = True
config.heads.unit_tags.keys_mlp.layer_sizes = [64, 16]
config.heads.unit_tags.small_embeddings_mlp.layer_sizes = [32, 8]
config.heads.unit_tags.large_embeddings_mlp.layer_sizes = [64, 64]
config.heads.unit_tags.inner_component.logits.num_layers_query = 1
config.heads.unit_tags.inner_component.logits.num_layers_keys = 0
config.heads.unit_tags.inner_component.logits.use_layer_norm = True
config.heads.unit_tags.inner_component.sampling.sample.sample_fn = sample.sample
config.heads.unit_tags.inner_component.embedding_merge.gating_type = merge.GatingType.NONE
config.heads.unit_tags.embedding_merge.gating_type = merge.GatingType.GLOBAL
config.heads.unit_tags.selected_units_merge.gating_type = merge.GatingType.GLOBAL
config.heads.target_unit_tag.resnet.num_resblocks = 1
config.heads.target_unit_tag.resnet.use_layer_norm = True
config.heads.target_unit_tag.logits.num_layers_query = 1
config.heads.target_unit_tag.logits.num_layers_keys = 1
config.heads.target_unit_tag.logits.key_size = 16
config.heads.target_unit_tag.logits.use_layer_norm = True
config.heads.target_unit_tag.sampling.sample.sample_fn = sample.sample
config.heads.world.visual_feature_size_ds1 = 2
config.heads.world.vector_to_visual.hidden_feature_sizes = [32, 32]
config.heads.world.vector_to_visual.upscale_factor = 2
config.heads.world.vector_to_visual.use_layer_norm = True
config.heads.world.vector_to_visual.kernel_size = 2
config.heads.world.resnet.num_resblocks = 2
config.heads.world.resnet.kernel_size = 3
config.heads.world.resnet.use_layer_norm = True
config.heads.world.resnet.num_hidden_feature_planes = None
config.heads.world.visual_upscale.upscale_factor = 2
config.heads.world.visual_upscale.kernel_size = 2
config.heads.world.logits.use_layer_norm = True
config.heads.world.logits.use_depth_to_space = False
config.heads.world.sampling.sample.sample_fn = sample.sample
return config
| alphastar-main | alphastar/architectures/standard/configs/lite.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for merge."""
from typing import Sequence
from absl.testing import absltest
from absl.testing import parameterized
from alphastar import types
from alphastar.architectures.components import merge
from alphastar.architectures.components import test_utils
import jax.numpy as jnp
class MergeTest(test_utils.ComponentTest):
"""Basic tests for the merge components."""
@parameterized.product(
is_training=[True, False],
input_names=[['stream'], ['stream1', 'stream2']],
stream_shape=[(1,), (3, 4), (2, 1, 4)])
def test_SumMerge(self,
is_training: bool,
input_names: Sequence[types.StreamType],
stream_shape: Sequence[int]):
component = merge.SumMerge(
input_names=input_names,
output_name='stream',
stream_shape=stream_shape)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
input_sizes_and_dtypes=[
({'stream': 4}, None),
({'stream': 4}, {'stream': jnp.int32}),
({'stream1': 3, 'stream2': None}, None),
({'stream1': 3, 'stream2': 4, 'stream3': 1}, None),
({'stream1': None, 'stream2': None, 'stream3': 3},
{'stream1': jnp.bool_, 'stream2': jnp.int32}),],
output_size=[1, 3],
gating_type=list(merge.GatingType),
use_layer_norm=[True, False])
def test_VectorMerge(
self,
is_training: bool,
input_sizes_and_dtypes,
output_size: int,
gating_type: merge.GatingType,
use_layer_norm: bool = True):
component = merge.VectorMerge(
input_sizes=input_sizes_and_dtypes[0],
output_name='stream',
output_size=output_size,
gating_type=gating_type,
use_layer_norm=use_layer_norm,
input_dtypes=input_sizes_and_dtypes[1])
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.product(
is_training=[True, False],
input_sizes_and_dtypes=[
({'stream': 4}, None),
({'stream': 4}, {'stream': jnp.int32}),
({'stream1': 3, 'stream2': None}, None),
({'stream1': 3, 'stream2': 4, 'stream3': 1}, None),
({'stream1': None, 'stream2': None, 'stream3': 3},
{'stream1': jnp.bool_, 'stream2': jnp.int32}),],
output_size=[1, 3],
gating_type=list(merge.GatingType),
use_layer_norm=[True, False])
def test_UnitsMerge(
self,
is_training: bool,
input_sizes_and_dtypes,
output_size: int,
gating_type: merge.GatingType,
use_layer_norm: bool = True):
input_spec, _ = test_utils.get_test_specs(is_training)
component = merge.UnitsMerge(
max_num_observed_units=input_spec['observation', 'raw_units'].shape[0],
input_sizes=input_sizes_and_dtypes[0],
output_name='stream',
output_size=output_size,
gating_type=gating_type,
use_layer_norm=use_layer_norm,
input_dtypes=input_sizes_and_dtypes[1])
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
if __name__ == '__main__':
absltest.main()
| alphastar-main | alphastar/architectures/components/merge_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to test components."""
from typing import Tuple
from absl.testing import parameterized
from alphastar import types
from alphastar.architectures import modular
from alphastar.unplugged.data import util as data_util
from dm_env import specs
import haiku as hk
import jax
import jax.numpy as jnp
def get_test_specs(is_training: bool
) -> Tuple[types.SpecDict, types.ActionSpec]:
"""Return some input_spec and action_spec for testing."""
max_num_selected_units = 4
obs_spec = types.SpecDict()
obs_spec['away_race_observed'] = specs.Array((), jnp.int32)
obs_spec['away_race_requested'] = specs.Array((), jnp.int32)
obs_spec['camera'] = specs.Array((256, 256), jnp.int32)
obs_spec['camera_position'] = specs.Array((2,), jnp.int32)
obs_spec['camera_size'] = specs.Array((2,), jnp.int32)
obs_spec['game_loop'] = specs.Array((), jnp.int32)
obs_spec['home_race_requested'] = specs.Array((), jnp.int32)
obs_spec['minimap_alerts'] = specs.BoundedArray(
(128, 128), jnp.int32, minimum=0, maximum=5)
obs_spec['minimap_buildable'] = specs.BoundedArray(
(128, 128), jnp.int32, minimum=0, maximum=1)
obs_spec['minimap_creep'] = specs.BoundedArray(
(128, 128), jnp.int32, minimum=0, maximum=2)
obs_spec['minimap_height_map'] = specs.BoundedArray(
(128, 128), jnp.int32, minimum=0, maximum=255)
obs_spec['minimap_pathable'] = specs.BoundedArray(
(128, 128), jnp.int32, minimum=0, maximum=1)
obs_spec['minimap_player_relative'] = specs.BoundedArray(
(128, 128), jnp.int32, minimum=0, maximum=3)
obs_spec['minimap_visibility_map'] = specs.BoundedArray(
(128, 128), jnp.int32, minimum=0, maximum=2)
obs_spec['mmr'] = specs.Array((), jnp.int32)
obs_spec['player'] = specs.Array((7,), jnp.int32)
obs_spec['raw_units'] = specs.Array((10, 47), jnp.int32)
obs_spec['unit_counts_bow'] = specs.Array((5,), jnp.int32)
obs_spec['upgrades_fixed_length'] = specs.BoundedArray(
(8,), jnp.int32, minimum=0, maximum=13)
action_spec = types.SpecDict()
action_spec['function'] = specs.BoundedArray(
(), jnp.int32, minimum=0, maximum=10)
action_spec['delay'] = specs.BoundedArray(
(), jnp.int32, minimum=0, maximum=6)
action_spec['queued'] = specs.BoundedArray(
(), jnp.int32, minimum=0, maximum=2)
action_spec['repeat'] = specs.BoundedArray(
(), jnp.int32, minimum=0, maximum=4)
action_spec['unit_tags'] = specs.BoundedArray(
(max_num_selected_units,), jnp.int32, minimum=0, maximum=10)
action_spec['target_unit_tag'] = specs.BoundedArray(
(), jnp.int32, minimum=0, maximum=9)
action_spec['world'] = specs.BoundedArray(
(), jnp.int32, minimum=0, maximum=256**2 - 1)
input_spec = types.SpecDict()
input_spec['observation'] = obs_spec
input_spec['step_type'] = specs.BoundedArray(
(), jnp.int32, minimum=0, maximum=2)
if is_training:
input_spec['behaviour_features', 'action'] = action_spec.copy()
# Delay behaviour feature input is unbounded:
input_spec['behaviour_features', 'action', 'delay'] = specs.BoundedArray(
(), jnp.int32, minimum=0, maximum=1_000_000_000)
return input_spec, action_spec
class ComponentTest(parameterized.TestCase):
"""Basic class to test component input/output consistency."""
def _test_component(self,
component: modular.Component,
batch_size: int = 2,
unroll_len: int = 3):
"""Test that the forward pass does not crash, and has correct shapes."""
inputs = data_util.get_dummy_observation(
component.input_spec, batch_size=batch_size, unroll_len=unroll_len)
prev_state = data_util.get_dummy_observation(
component.prev_state_spec, batch_size=batch_size, unroll_len=None)
rng_key = jax.random.PRNGKey(42)
initial_state_init, initial_state_apply = hk.transform(
jax.vmap(component.initial_state, axis_size=batch_size))
initial_state = initial_state_apply(initial_state_init(rng_key), rng_key)
component.next_state_spec.validate(
initial_state, num_leading_dims_to_ignore=1)
forward_init, forward_apply = hk.transform_with_state(
jax.vmap(component.unroll))
params, hk_state = forward_init(rng_key, inputs, prev_state)
(outputs, next_state, _), _ = forward_apply(
params, hk_state, rng_key, inputs, prev_state)
for v in outputs.values():
self.assertEqual(v.shape[:2], (batch_size, unroll_len))
component.output_spec.validate(outputs, num_leading_dims_to_ignore=2)
for v in next_state.values():
self.assertEqual(v.shape[0], batch_size)
component.next_state_spec.validate(next_state, num_leading_dims_to_ignore=1)
| alphastar-main | alphastar/architectures/components/test_utils.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Components to merge streams."""
import enum
from typing import List, Mapping, Optional, Sequence
from alphastar import types
from alphastar.architectures import modular
from alphastar.architectures.components import util
from dm_env import specs
import haiku as hk
import jax
import jax.numpy as jnp
class GatingType(enum.Enum):
"""Defines how the tensors are gated and aggregated in modules."""
NONE = 'none'
GLOBAL = 'global'
POINTWISE = 'pointwise'
class SumMerge(modular.BatchedComponent):
"""Merge streams using a simple sum (faster than Merge, for large stream).
Streams must have the same size.
This module can merge any type of stream (vector, units or visual).
"""
def __init__(self,
input_names: Sequence[types.StreamType],
output_name: types.StreamType,
stream_shape: Sequence[int],
name: Optional[str] = None):
"""Initializes SumMerge module.
Args:
input_names: The name of the inputs to sum. They must all have shape
`stream_shape` and dtype float32.
output_name: The name to give to the output of this module, of shape
`stream_shape` and dtype float32.
stream_shape: The shape of the inputs and outputs.
name: The name of this component.
"""
super().__init__(name=name)
if not input_names:
raise ValueError('input_names cannot be empty')
self._input_names = input_names
self._output_name = output_name
self._stream_spec = specs.Array(shape=stream_shape, dtype=jnp.float32)
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
name: self._stream_spec for name in self._input_names})
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({self._output_name: self._stream_spec})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
outputs = types.StreamDict({
self._output_name: sum([inputs[name] for name in self._input_names])})
return outputs, {}
class VectorMerge(modular.BatchedComponent):
"""Merge vector streams.
Streams are first transformed through layer normalization, relu and linear
layers, then summed, so they don't need to have the same size.
Gating can also be used before the sum.
If gating_type is not none, the sum is weighted using a softmax
of the intermediate activations labelled above.
"""
def __init__(self,
input_sizes: Mapping[types.StreamType, Optional[int]],
output_name: types.StreamType,
output_size: int,
gating_type: GatingType = GatingType.NONE,
use_layer_norm: bool = True,
input_dtypes: Optional[Mapping[types.StreamType,
jnp.dtype]] = None,
name: Optional[str] = None):
"""Initializes VectorMerge module.
Args:
input_sizes: A dictionary mapping input names to their size (a single
integer for 1d inputs, or None for 0d inputs).
If an input size is None, we assume it's ().
output_name: The name to give to the output of this module, of shape
[output_size] and dtype float32.
output_size: The size of the output vector.
gating_type: The type of gating mechanism to use.
use_layer_norm: Whether to use layer normalization.
input_dtypes: An optional dictionary with the dtypes of the inputs. If an
input is missing from this dictionary, its dtype is assumed to be
float32.
name: The name of this component.
"""
super().__init__(name=name)
if not input_sizes:
raise ValueError('input_names cannot be empty')
self._input_sizes = input_sizes
self._output_name = output_name
self._output_size = output_size
self._gating_type = gating_type
self._use_layer_norm = use_layer_norm
self._input_dtypes = dict(input_dtypes or {})
missing_inputs = set(self._input_dtypes).difference(set(self._input_sizes))
if missing_inputs:
raise ValueError(f'Inputs {missing_inputs} are in input_dtypes but not '
'in input_sizes')
for name in self._input_sizes:
if name not in self._input_dtypes:
self._input_dtypes[name] = jnp.float32
@property
def input_spec(self) -> types.SpecDict:
spec = types.SpecDict()
for name, size in self._input_sizes.items():
if size is not None:
spec[name] = specs.Array((size,), self._input_dtypes[name])
else:
spec[name] = specs.Array((), self._input_dtypes[name])
return spec
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict(
{self._output_name: specs.Array((self._output_size,), jnp.float32)})
def _compute_gate(
self,
inputs_to_gate: List[types.StreamDict],
init_gate: List[types.StreamDict]
):
w_init = hk.initializers.RandomNormal(stddev=0.005)
b_init = hk.initializers.Constant(0.)
if self._gating_type is GatingType.GLOBAL:
gate_size = 1
elif self._gating_type is GatingType.POINTWISE:
gate_size = self._output_size
else:
raise ValueError(f'Gating type {self._gating_type} is not supported')
if len(inputs_to_gate) == 2:
# more efficient than the general version below
gate = [hk.Linear(gate_size, w_init=w_init, b_init=b_init)(y)
for y in init_gate]
gate = sum(gate)
sigmoid = jax.nn.sigmoid(gate)
gate = [sigmoid, 1. - sigmoid]
else:
gate = [
hk.Linear(
len(inputs_to_gate) * gate_size, w_init=w_init, b_init=b_init)(y)
for y in init_gate
]
gate = sum(gate)
gate = jnp.reshape(gate, [len(inputs_to_gate), gate_size])
gate = jax.nn.softmax(gate, axis=0)
gate = [gate[i] for i in range(gate.shape[0])]
return gate
def _encode(self, inputs: types.StreamDict):
gate, outputs = [], []
for name, size in self._input_sizes.items():
feature = inputs[name]
if size is None:
feature = feature[jnp.newaxis]
feature = util.astype(feature, jnp.float32)
if self._use_layer_norm:
feature = util.vector_layer_norm(feature)
feature = jax.nn.relu(feature)
gate.append(feature)
outputs.append(hk.Linear(self._output_size)(feature))
return gate, outputs
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
gate, outputs = self._encode(inputs)
if len(outputs) == 1:
# Special case of 1-D inputs that do not need any gating.
output = outputs[0]
elif self._gating_type is GatingType.NONE:
output = sum(outputs)
else:
gate = self._compute_gate(outputs, gate)
data = [g * d for g, d in zip(gate, outputs)]
output = sum(data)
outputs = types.StreamDict({self._output_name: output})
return outputs, {}
class UnitsMerge(VectorMerge):
"""Merge units streams. Applies VectorMerge unit-wise."""
def __init__(
self,
input_sizes: Mapping[types.StreamType, Optional[int]],
max_num_observed_units: int,
output_name: types.StreamType,
output_size: int,
gating_type: GatingType = GatingType.NONE,
use_layer_norm: bool = True,
input_dtypes: Optional[Mapping[types.StreamType, jnp.dtype]] = None,
name: Optional[str] = None):
"""Initializes UnitsMerge module.
Args:
input_sizes: A dictionary mapping input names to their number of feature
per unit (a single integer n for inputs of shape
[max_num_observed_units, n], or None for inputs of shape
[max_num_observed_units]).
max_num_observed_units: The maximum number of oberved units,
ie. obs_spec["raw_units"].shape[0].
output_name: The name to give to the output of this module, of shape
[output_size] and dtype float32.
output_size: The size of the output vector.
gating_type: The type of gating mechanism to use.
use_layer_norm: Whether to use layer normalization.
input_dtypes: An optional dictionary with the dtypes of the inputs. If an
input is missing from this dictionary, its dtype is assumed to be
float32.
name: The name of this component.
"""
super().__init__(input_sizes=input_sizes,
output_name=output_name,
output_size=output_size,
gating_type=gating_type,
use_layer_norm=use_layer_norm,
input_dtypes=input_dtypes,
name=name)
self._max_num_observed_units = max_num_observed_units
@property
def input_spec(self) -> types.SpecDict:
spec = types.SpecDict()
for name, size in self._input_sizes.items():
dtype = self._input_dtypes[name]
if size is not None:
spec[name] = specs.Array((self._max_num_observed_units, size,), dtype)
else:
spec[name] = specs.Array((self._max_num_observed_units,), dtype)
return spec
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._output_name: specs.Array(
(self._max_num_observed_units, self._output_size,), jnp.float32)})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
return jax.vmap(super()._forward)(inputs)
# Note that we have not implemented VisualMerge because we use SumMerge for
# visual streams, to optimize speed.
| alphastar-main | alphastar/architectures/components/merge.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vector-based components, acting on 1d vectors."""
from typing import Callable, Optional, Sequence
from alphastar import types
from alphastar.architectures import modular
from alphastar.architectures.components import util
import chex
from dm_env import specs
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class VectorEncoder(modular.BatchedComponent):
"""Encodes a vector of features by applying a function and a linear layer."""
def __init__(self,
input_name: types.StreamType,
output_name: types.StreamType,
num_features: int,
output_size: int,
fun: Optional[Callable[[chex.Array], chex.Array]] = None,
name: Optional[str] = None):
"""Initializes VectorEncoder module.
Args:
input_name: The name of the input to use, of shape
[num_features] and dtype int32.
output_name: The name to give to the output, of shape
[output_size] and dtype float32.
num_features: The size of the input to encode.
output_size: The size of the output vector.
fun: An optional function to apply to the input before applying the linear
transformation.
name: The name of this component.
"""
super().__init__(name=name)
self._input_name = input_name
self._output_name = output_name
self._num_features = num_features
self._output_size = output_size
self._fun = fun
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
self._input_name: specs.Array((self._num_features,), np.int32)})
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._output_name: specs.Array((self._output_size,), np.float32)})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs[self._input_name]
x = util.astype(x, jnp.float32)
if self._fun is not None:
x = self._fun(x)
x = hk.Linear(self._output_size)(x)
outputs = types.StreamDict({self._output_name: x})
return outputs, {}
class Embedding(modular.BatchedComponent):
"""Embeds a single feature (a int32) into a float32 vector (hk.Embed)."""
def __init__(self,
input_name: types.StreamType,
output_name: types.StreamType,
num_classes: int,
output_size: int,
mask_name: Optional[types.StreamType] = None,
fun: Optional[Callable[[chex.Array], chex.Array]] = None,
name: Optional[str] = None):
"""Initializes Embedding module.
Args:
input_name: The name of the input to use, of shape [] and dtype int32.
output_name: The name to give to the output, of shape
[output_size] and dtype float32.
num_classes: The number of values the input can take, ie. max(input)-1.
For safety, the input is clipped to stay within [0, num_classes-1], but
it probably should never be larger than num_classes-1. If using `fun`,
this is the maximum value after applying the function `fun`.
output_size: The size of the output vector.
mask_name: If specified, this determines a mask input, of shape [] and
dtype bool, to mask the output. The output will be 0 if the mask is
set to False, otherwise it will be unaffected.
fun: An optional function to apply to the input before embedding.
name: The name of this component.
"""
super().__init__(name=name)
self._input_name = input_name
self._output_name = output_name
self._num_classes = num_classes
self._output_size = output_size
self._mask_name = mask_name
self._fun = fun
@property
def input_spec(self) -> types.SpecDict:
spec = types.SpecDict({self._input_name: specs.Array((), np.int32)})
if self._mask_name is not None:
spec[self._mask_name] = specs.Array((), np.bool_)
return spec
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._output_name: specs.Array((self._output_size,), np.float32)})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs[self._input_name]
if self._fun is not None:
x = self._fun(x)
x = jnp.minimum(util.astype(x, jnp.int32), self._num_classes - 1)
x = hk.Embed(vocab_size=self._num_classes, embed_dim=self._output_size)(x)
if self._mask_name:
x = jnp.where(inputs[self._mask_name], x, 0)
outputs = types.StreamDict({self._output_name: x})
return outputs, {}
class FixedLengthToMask(modular.BatchedComponent):
"""Converts a fixed length list of integer into a boolean mask."""
def __init__(self,
input_name: types.StreamType,
output_name: types.StreamType,
input_size: int,
num_classes: int,
name: Optional[str] = None):
"""Initializes FixedLengthToMask module.
Args:
input_name: The name of the input to use, of shape [input_size] and
dtype int32.
output_name: The name to give to the output, of shape
[num_classes] and dtype bool.
input_size: The size of the input. The output will contain `input_size`
ones, or less if the same value appears several times in the input.
num_classes: The number of values the inputs can take, ie. max(input)-1.
For safety, the input is clipped to stay within [0, num_classes-1], but
it probably should never be larger than num_classes-1. This is also the
size of the output.
name: The name of this component.
"""
super().__init__(name=name)
self._input_name = input_name
self._output_name = output_name
self._input_size = input_size
self._num_classes = num_classes
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
self._input_name: specs.Array((self._input_size,), np.int32)})
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._output_name: specs.Array((self._num_classes,), np.bool_)})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
indices = inputs[self._input_name]
indices = jnp.minimum(indices, self._num_classes - 1)
mask = jnp.matmul(jnp.ones_like(indices),
indices[:, jnp.newaxis] == jnp.arange(self._num_classes))
mask = util.astype(mask, jnp.bool_)
outputs = types.StreamDict({self._output_name: mask})
return outputs, {}
class BinaryVectorEmbedding(modular.BatchedComponent):
"""Encodes a boolean mask."""
def __init__(self,
input_name: types.StreamType,
output_name: types.StreamType,
input_size: int,
output_size: int,
mask_name: Optional[types.StreamType] = None,
name: Optional[str] = None):
"""Initializes BinaryVectorEmbedding module.
Args:
input_name: The name of the input to use, of shape [input_size] and
dtype bool.
output_name: The name to give to the output, of shape
[output_size] and dtype bool.
input_size: The size of the input vector.
output_size: The size of the output vector.
mask_name: If specified, this determines a mask input, of shape [] and
dtype bool, to mask the output. The output will be 0 if the mask is
set to False, otherwise it will be unaffected.
name: The name of this component.
"""
super().__init__(name=name)
self._input_name = input_name
self._output_name = output_name
self._input_size = input_size
self._output_size = output_size
self._mask_name = mask_name
@property
def input_spec(self) -> types.SpecDict:
spec = types.SpecDict({
self._input_name: specs.Array((self._input_size,), np.bool_)})
if self._mask_name is not None:
spec[self._mask_name] = specs.Array((), np.bool_)
return spec
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._output_name: specs.Array((self._output_size,), np.float32)})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs[self._input_name]
x = hk.Linear(output_size=self._output_size)(util.astype(x, jnp.float32))
if self._mask_name:
x = jnp.where(inputs[self._mask_name], x, 0)
outputs = types.StreamDict({self._output_name: x})
return outputs, {}
class ClockFeatureEncoder(modular.BatchedComponent):
"""Embedding for the game clock value position."""
def __init__(self,
input_name: types.StreamType,
output_name: types.StreamType,
encoding_size: int,
output_size: int,
t_min: int = 1,
t_max: int = 100_000,
name: Optional[str] = None):
"""Initializes ClockFeatureEncoder module.
Args:
input_name: The name of the input to use, of shape [] and dtype int32.
output_name: The name to give to the output, of shape [output_size] and
dtype float32.
encoding_size: The size of the sine-wave encoding. It must be even.
output_size: The size of the output after the linear layer is applied
to the sine-wave encoding.
t_min: The minimum expected clock value.
t_max: The maximum expected clock value.
name: The name of this component.
"""
super().__init__(name=name)
self._input_name = input_name
self._output_name = output_name
if encoding_size % 2:
raise ValueError(f"Encoding size (set to {encoding_size}) must be even.")
self._encoding_size = encoding_size
self._output_size = output_size
self._t_min = float(t_min)
self._t_max = float(t_max)
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({self._input_name: specs.Array((), np.int32)})
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._output_name: specs.Array((self._output_size,), np.float32)})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
num_timescales = float(self._encoding_size // 2)
log_timescale_step = np.log(self._t_max / self._t_min) / (num_timescales-1)
timescales = np.arange(num_timescales, dtype=np.float32)
inv_timescales = np.exp(timescales * -log_timescale_step) / self._t_min
inv_timescales = jnp.asarray(inv_timescales)
x = util.astype(inputs[self._input_name], jnp.float32)
rescaled_time = x[jnp.newaxis] * inv_timescales
x = jnp.concatenate(
[jnp.sin(rescaled_time), jnp.cos(rescaled_time)], axis=-1)
x = hk.Linear(self._output_size)(x)
outputs = types.StreamDict({self._output_name: x})
return outputs, {}
class Resnet(modular.BatchedComponent):
"""A fully-connected resnet."""
def __init__(self,
input_size: int,
num_resblocks: int,
use_layer_norm: bool = True,
input_name: types.StreamType = "vector_stream",
output_name: types.StreamType = "vector_stream",
name: Optional[str] = None):
"""Initializes Resnet module.
Args:
input_size: The size of the input vector.
num_resblocks: The number of residual blocks.
use_layer_norm: Whether to use layer normalization.
input_name: The name of the input to use, of shape [input_size] and
dtype float32.
output_name: The name to give to the output, of shape [input_size] and
dtype float32.
name: Name of the component.
"""
super().__init__(name=name)
self._input_size = input_size
self._num_resblocks = num_resblocks
self._use_layer_norm = use_layer_norm
self._input_name = input_name
self._output_name = output_name
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
self._input_name: specs.Array((self._input_size,), jnp.float32)})
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._output_name: specs.Array((self._input_size,), jnp.float32)})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs[self._input_name]
for _ in range(self._num_resblocks):
x = util.VectorResblock(use_layer_norm=self._use_layer_norm)(x)
outputs = types.StreamDict({self._output_name: x})
return outputs, {}
class ToVisual(modular.BatchedComponent):
"""Vector to visual using linear layer + strided convolutions."""
def __init__(self,
input_name: types.StreamType,
output_name: types.StreamType,
input_size: int,
output_spatial_size: int,
output_features_size: int,
hidden_feature_sizes: Sequence[int],
upscale_factor: int = 2,
use_layer_norm: bool = True,
kernel_size: int = 4,
name: Optional[str] = None):
"""Initializes ToVisual module.
Args:
input_name: The name of the input to use, of shape [input_size] and
dtype float32.
output_name: The name to give to the output, of shape
[output_spatial_size, output_spatial_size, output_features_size] and
dtype float32.
input_size: The size of the input vector.
output_spatial_size: The spatial size of the output (2d feature maps).
output_features_size: The number of the feature planes in the output (2d
feature maps).
hidden_feature_sizes: The number of feature planes before the output. Each
convolution is strided, ie. increasing the spatial resolution.
upscale_factor: The upscale factor of each strided convolution.
use_layer_norm: Whether to use layer normalization.
kernel_size: The size of the convolution kernel to use. Note that with
upsampling, a `kernel_size` not multiple of the `upscale_factor`
will result in a checkerboard pattern, so it is not recommended.
name: Name of the component.
"""
super().__init__(name=name)
self._input_name = input_name
self._output_name = output_name
self._output_spatial_size = output_spatial_size
self._input_size = input_size
self._feature_sizes = list(hidden_feature_sizes) + [output_features_size]
if output_spatial_size % (upscale_factor ** (len(self._feature_sizes) - 1)):
raise ValueError(f"The spatial output size (set to {output_spatial_size})"
" must be a multiple of the upscale factor "
f"({upscale_factor}) to the power of the number of "
f"convolutions ({len(self._feature_sizes)}).")
self._use_layer_norm = use_layer_norm
self._upscale_factor = upscale_factor
if upscale_factor < 1:
raise ValueError(f"upscale_factor (set to {upscale_factor}) must be > 0.")
self._kernel_size = kernel_size
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
self._input_name: specs.Array((self._input_size,), jnp.float32)})
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._output_name: specs.Array((self._output_spatial_size,
self._output_spatial_size,
self._feature_sizes[-1]),
jnp.float32)})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs[self._input_name]
res = (self._output_spatial_size //
(self._upscale_factor ** (len(self._feature_sizes) - 1)))
if self._use_layer_norm:
x = util.vector_layer_norm(x)
x = jax.nn.relu(x)
x = hk.Linear(output_size=self._feature_sizes[0] * res * res)(x)
x = jnp.reshape(x, [res, res, self._feature_sizes[0]])
for num_features in self._feature_sizes[1:]:
if self._use_layer_norm:
x = util.visual_layer_norm(x)
x = jax.nn.relu(x)
x = hk.Conv2DTranspose(
output_channels=num_features,
kernel_shape=self._kernel_size,
stride=self._upscale_factor)(x)
outputs = types.StreamDict({self._output_name: x})
return outputs, {}
class Logits(modular.BatchedComponent):
"""Logits for scalar heads (function, delay, queued, repeat)."""
def __init__(
self,
num_logits: int,
input_size: int,
logits_output_name: types.StreamType,
mask_output_name: types.StreamType,
input_name: types.StreamType = "vector_stream",
num_linear_layers: int = 2,
use_layer_norm: bool = True,
name: Optional[str] = None):
"""Initializes Logits module.
Args:
num_logits: The number of logits to produce.
input_size: The size of the input vector.
logits_output_name: The name to give to the output for the logits, of
shape [num_logits] and dtype float32.
mask_output_name: The name to give to the output for the logits, of
shape [num_logits] and dtype bool. Logits from vector logits are not
masked, so the mask only contains ones.
input_name: The name of the input to use, of shape [input_size] and
dtype float32.
num_linear_layers: Number of linear layers to use to compute the logits
(ie. the depth of the MLP).
use_layer_norm: Whether to use layer normalization.
name: Name of the component.
"""
super().__init__(name)
self._num_logits = num_logits
self._input_size = input_size
self._logits_output_name = logits_output_name
self._mask_output_name = mask_output_name
self._input_name = input_name
if num_linear_layers < 1:
raise ValueError(
f"num_linear_layers (set to {num_linear_layers}) must be > 0.")
self._num_linear_layers = num_linear_layers
self._use_layer_norm = use_layer_norm
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
self._input_name: specs.Array((self._input_size,), np.float32)})
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._logits_output_name: specs.Array((self._num_logits,), np.float32),
self._mask_output_name: specs.Array((self._num_logits,), np.bool_)})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs[self._input_name]
for i in range(self._num_linear_layers):
if i == self._num_linear_layers - 1:
output_size = self._num_logits
else:
output_size = x.shape[-1]
if self._use_layer_norm:
x = util.vector_layer_norm(x)
x = jax.nn.relu(x)
x = hk.Linear(output_size=output_size)(x)
# Vector heads do not use masks (in this implementation).
mask = jnp.ones((self._num_logits,), dtype=jnp.bool_)
# Note: if mask is used, add `logits = common.mask_logits(logits, mask)`
outputs = types.StreamDict({self._logits_output_name: x,
self._mask_output_name: mask})
return outputs, {}
| alphastar-main | alphastar/architectures/components/vector.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Units-based components, acting on lists of 1d vectors."""
import enum
import math
from typing import Mapping, Optional, Sequence, Tuple
from alphastar import types
from alphastar.architectures import modular
from alphastar.architectures.components import util
from alphastar.architectures.components.static_data import camera_masks
from alphastar.architectures.components.static_data import unit_encoder_data
from alphastar.commons import sample
import chex
from dm_env import specs
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from pysc2.env.converter.cc.game_data.python import uint8_lookup
from pysc2.lib.features import FeatureUnit
from s2clientprotocol import raw_pb2 as sc_raw
class UnitTagsMasking(enum.Enum):
NONE = "none"
NON_EMPTY = "non_empty"
TARGETABLE = "targetable"
TARGETABLE_WITH_CAMERA = "targetable_with_camera"
SELECTABLE = "selectable"
def get_unit_tags_mask(raw_units: chex.Array,
mode: UnitTagsMasking) -> chex.Array:
"""Get a mask over unit tags.
Args:
raw_units: The raw_units observation from the environment, of shape
[..., num_raw_unit_features] and dtype int32.
mode: The masking mode, a UnitTagsMasking.
Returns:
A boolean mask of size [...] specifying which units are masked given the
mode.
"""
if mode is UnitTagsMasking.NONE:
return jnp.ones(shape=raw_units.shape[0], dtype=jnp.bool_)
if mode is UnitTagsMasking.NON_EMPTY:
return jnp.not_equal(
raw_units[..., int(FeatureUnit.alliance)], 0)
elif mode is UnitTagsMasking.TARGETABLE:
return raw_units[..., int(FeatureUnit.tag)] > 0
elif mode is UnitTagsMasking.SELECTABLE:
my_units = jnp.equal(
raw_units[..., int(FeatureUnit.alliance)], int(sc_raw.Alliance.Self))
# In theory, units in cargo could be selected, but currently it is not
# supported by the transforms and actions.
not_in_cargo = jnp.equal(
raw_units[..., int(FeatureUnit.is_in_cargo)], 0)
return jnp.logical_and(
get_unit_tags_mask(raw_units, UnitTagsMasking.TARGETABLE),
jnp.logical_and(my_units, not_in_cargo))
else:
raise ValueError(f"Unsupported mode: {mode}.")
def get_unit_tags_camera_mask(raw_units: chex.Array,
function_arg: chex.Array) -> chex.Array:
"""Get which units can be targeted considering the camera.
For some function arguments of the action, we are not allowed to target
enemy units outside the camera. This function returns a mask over the units
which specifies which units can be targeted given the function argument.
Args:
raw_units: The raw_units observation from the environment,
of shape [max_num_observed_units, num_raw_unit_features] and dtype int32.
function_arg: The `function` argument of the current action, of shape []
and dtype int32.
Returns:
A boolean mask of size [max_num_observed_units] specifying which units are
masked.
"""
chex.assert_rank(raw_units, 2)
chex.assert_type(raw_units, jnp.int32)
chex.assert_rank(function_arg, 0)
chex.assert_type(function_arg, jnp.int32)
# 1) Is the unit on the camera?
is_on_camera = raw_units[:, int(FeatureUnit.is_on_screen)]
mask = jnp.equal(is_on_camera, 1)
# 2) Is the unit owned by us?. We can target our own units outside the camera
unit_owner = raw_units[:, int(FeatureUnit.alliance)]
my_units = jnp.equal(unit_owner, int(sc_raw.Alliance.Self))
mask = jnp.logical_or(mask, my_units)
# 3) Can this function argument target units outside the camera?
all_camera_only_functions = camera_masks.get_on_camera_only_functions_unit()
all_not_camera_only_functions = jnp.asarray(
np.logical_not(all_camera_only_functions))
is_not_camera_only = all_not_camera_only_functions[function_arg]
is_not_camera_only = jnp.broadcast_to(
is_not_camera_only[jnp.newaxis], [raw_units.shape[0]])
mask = jnp.logical_or(mask, is_not_camera_only)
# 4) Is it targetable? (this should remove effects)
targetable_units = get_unit_tags_mask(raw_units, UnitTagsMasking.TARGETABLE)
mask = jnp.logical_and(mask, targetable_units)
return mask
_UnitEncoderOneHot = Tuple[chex.Array, int]
class UnitsEncoder(modular.BatchedComponent):
"""Encode the units input."""
def __init__(
self,
output_name: types.StreamType,
max_num_observed_units: int,
num_raw_unit_features: int,
units_stream_size: int,
action_spec: types.ActionSpec,
num_unit_types: int,
num_buff_types: int,
name: Optional[str] = None
):
"""Initializes UnitsEncoder module.
Args:
output_name: The name to give to the output of this module, of shape
[max_num_observed_units, units_stream_size] and dtype float32.
max_num_observed_units: The maximum number of oberved units,
ie. obs_spec["raw_units"].shape[0].
num_raw_unit_features: The number of features per unit,
ie. obs_spec["raw_units"].shape[1].
units_stream_size: The size of the output encoding each unit.
action_spec: The action spec.
num_unit_types: The number of different units.
num_buff_types: The number of different buffs.
name: The name of this component.
"""
super().__init__(name)
self._output_name = output_name
self._units_stream_size = units_stream_size
self._max_num_observed_units = max_num_observed_units
self._num_raw_unit_features = num_raw_unit_features
self._world_dim = int(math.sqrt(action_spec["world"].maximum + 1))
self._action_spec = action_spec
self._num_unit_types = num_unit_types
self._num_buff_types = num_buff_types
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
("observation", "raw_units"): specs.Array(
(self._max_num_observed_units, self._num_raw_unit_features),
jnp.int32)})
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._output_name: specs.Array(
(self._max_num_observed_units, self._units_stream_size),
jnp.float32),
"non_empty_units": specs.Array(
(self._max_num_observed_units,), jnp.bool_)})
def _encode_unit(self, raw_unit: chex.Array) -> chex.Array:
chex.assert_rank(raw_unit, 1)
chex.assert_type(raw_unit, jnp.int32)
# Lookup tables:
attributes_lookup = unit_encoder_data.get_attribute_lookup(
self._num_unit_types)
function_list = util.get_function_list(self._action_spec)
function_names = [f.name for f in function_list]
order_id_lookup = unit_encoder_data.get_order_id_lookup(function_names)
build_queue_order_id_lookup = (
unit_encoder_data.get_build_queue_order_id_lookup(function_names))
addon_lookup = unit_encoder_data.get_addon_lookup(self._num_unit_types)
# Embeddings (to feed to hk.Linear modules):
embeddings = [
jax.nn.one_hot(raw_unit[FeatureUnit.unit_type], self._num_unit_types),
jnp.asarray(attributes_lookup)[raw_unit[FeatureUnit.unit_type]],
_binary_scale_embedding(raw_unit[FeatureUnit.x], self._world_dim),
_binary_scale_embedding(raw_unit[FeatureUnit.y], self._world_dim),
_features_embedding(raw_unit, {
FeatureUnit.build_progress: 1. / 100,
FeatureUnit.health_ratio: 1. / 255,
FeatureUnit.shield_ratio: 1. / 255,
FeatureUnit.energy_ratio: 1. / 255,
FeatureUnit.order_progress_0: 1. / 100,
FeatureUnit.order_progress_1: 1. / 100}),
_remap_and_one_hot_embedding(
raw_unit[FeatureUnit.order_id_0], order_id_lookup),
_remap_and_one_hot_embedding(
raw_unit[FeatureUnit.order_id_1], build_queue_order_id_lookup),
_remap_and_one_hot_embedding(
raw_unit[FeatureUnit.order_id_2], build_queue_order_id_lookup),
_remap_and_one_hot_embedding(
raw_unit[FeatureUnit.order_id_3], build_queue_order_id_lookup),
_remap_and_one_hot_embedding(
raw_unit[FeatureUnit.order_id_3], build_queue_order_id_lookup),
]
# Encoded one-hots (to pass to jax.nn.one_hot then hk.Linear):
one_hot_encoded = [
_encode_one_hot(raw_unit, FeatureUnit.alliance),
_encode_sqrt_one_hot(raw_unit, FeatureUnit.health),
_encode_sqrt_one_hot(raw_unit, FeatureUnit.shield),
_encode_sqrt_one_hot(raw_unit, FeatureUnit.energy),
_encode_capped_one_hot(raw_unit, FeatureUnit.cargo_space_taken),
_encode_one_hot(raw_unit, FeatureUnit.display_type),
_encode_one_hot(raw_unit, FeatureUnit.cloak),
_encode_one_hot(raw_unit, FeatureUnit.is_powered),
_encode_divided_one_hot(raw_unit, FeatureUnit.mineral_contents, 100),
_encode_divided_one_hot(raw_unit, FeatureUnit.vespene_contents, 100),
_encode_mined_resource_one_hot(
raw_unit, FeatureUnit.mineral_contents, self._num_unit_types),
_encode_mined_resource_one_hot(
raw_unit, FeatureUnit.vespene_contents, self._num_unit_types),
_encode_capped_one_hot(raw_unit, FeatureUnit.cargo_space_max),
_encode_capped_one_hot(raw_unit, FeatureUnit.assigned_harvesters),
_encode_capped_one_hot(raw_unit, FeatureUnit.ideal_harvesters),
_encode_capped_one_hot(raw_unit, FeatureUnit.weapon_cooldown),
_encode_capped_one_hot(raw_unit, FeatureUnit.order_length),
_encode_lookup(raw_unit[FeatureUnit.addon_unit_type], addon_lookup),
_encode_one_hot(raw_unit, FeatureUnit.hallucination),
# Since there are up to 2 buffs, we want to sum the one-hots. That's
# done by setting the length of the first one_hot to 0.
(raw_unit[FeatureUnit.buff_id_0], 0),
(raw_unit[FeatureUnit.buff_id_1], self._num_buff_types),
_encode_one_hot(raw_unit, FeatureUnit.active),
_encode_one_hot(raw_unit, FeatureUnit.is_on_screen),
_encode_one_hot(raw_unit, FeatureUnit.is_blip),
_encode_divided_one_hot(raw_unit, FeatureUnit.order_progress_0, 10),
_encode_divided_one_hot(raw_unit, FeatureUnit.order_progress_1, 10),
_encode_one_hot(raw_unit, FeatureUnit.is_in_cargo),
_encode_sqrt_one_hot(raw_unit, FeatureUnit.buff_duration_remain),
_encode_one_hot(raw_unit, FeatureUnit.attack_upgrade_level),
_encode_one_hot(raw_unit, FeatureUnit.armor_upgrade_level),
_encode_one_hot(raw_unit, FeatureUnit.shield_upgrade_level),
# Previous arguments, they are always the last two entries:
_encode_one_hot(raw_unit, -2),
_encode_one_hot(raw_unit, -1),
]
# Put all the encoded one-hots in a single boolean vector:
sum_offsets = np.cumsum([0] + [offset for _, offset in one_hot_encoded])
indices = jnp.stack([idx + offset for (idx, _), offset
in zip(one_hot_encoded, sum_offsets[:-1])])
boolean_code = jnp.matmul(
jnp.ones((len(indices),), jnp.float32),
indices[:, jnp.newaxis] == jnp.arange(sum_offsets[-1]))
embeddings.append(util.astype(boolean_code, jnp.float32))
embedding = sum([hk.Linear(self._units_stream_size)(x) for x in embeddings])
mask = get_unit_tags_mask(raw_unit, UnitTagsMasking.NON_EMPTY)
embedding = jnp.where(mask, embedding, 0)
return embedding, mask
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
raw_units = inputs["observation", "raw_units"]
embedding, mask = jax.vmap(self._encode_unit)(raw_units)
outputs = types.StreamDict({
self._output_name: embedding,
"non_empty_units": mask})
return outputs, {}
def _encode_one_hot(raw_unit: chex.Array,
feature_idx: int) -> _UnitEncoderOneHot:
chex.assert_rank(raw_unit, 1)
chex.assert_type(raw_unit, jnp.int32)
return raw_unit[feature_idx], unit_encoder_data.MAX_VALUES[feature_idx] + 1
def _encode_capped_one_hot(raw_unit: chex.Array,
feature_idx: int) -> _UnitEncoderOneHot:
chex.assert_rank(raw_unit, 1)
chex.assert_type(raw_unit, jnp.int32)
max_value = unit_encoder_data.MAX_VALUES[feature_idx]
return jnp.minimum(raw_unit[feature_idx], max_value), max_value + 1
def _encode_sqrt_one_hot(raw_unit: chex.Array,
feature_idx: int) -> _UnitEncoderOneHot:
chex.assert_rank(raw_unit, 1)
chex.assert_type(raw_unit, jnp.int32)
max_value = unit_encoder_data.MAX_VALUES[feature_idx]
max_sqrt_value = int(math.floor(math.sqrt(max_value)))
x = jnp.floor(jnp.sqrt(util.astype(raw_unit[feature_idx], jnp.float32)))
x = jnp.minimum(util.astype(x, jnp.int32), max_sqrt_value)
return x, max_sqrt_value + 1
def _encode_divided_one_hot(raw_unit: chex.Array,
feature_idx: int,
divisor: int) -> _UnitEncoderOneHot:
chex.assert_rank(raw_unit, 1)
chex.assert_type(raw_unit, jnp.int32)
max_value = unit_encoder_data.MAX_VALUES[feature_idx]
max_divided_value = max_value // divisor
x = jnp.floor_divide(raw_unit[feature_idx], divisor)
x = jnp.minimum(x, max_divided_value)
return x, max_divided_value + 1
def _encode_mined_resource_one_hot(raw_unit: chex.Array,
feature_idx: int,
num_unit_types: int) -> _UnitEncoderOneHot:
"""Encode the amount of mined resource."""
chex.assert_rank(raw_unit, 1)
chex.assert_type(raw_unit, jnp.int32)
unit_type = raw_unit[FeatureUnit.unit_type]
initial_resource_lookup = np.zeros((num_unit_types,), dtype=np.int32)
for unit, resource in unit_encoder_data.INITIAL_RESOURCE_CONTENTS.items():
unit_id = uint8_lookup.PySc2ToUint8(unit)
initial_resource_lookup[unit_id] = resource
initial_resource = jnp.asarray(initial_resource_lookup)[unit_type]
mined_resource = initial_resource - raw_unit[feature_idx]
max_value = unit_encoder_data.MAX_VALUES[feature_idx]
max_sqrt_value = int(math.floor(math.sqrt(max_value)))
x = jnp.floor(jnp.sqrt(util.astype(mined_resource, jnp.float32)))
x = jnp.clip(util.astype(x, jnp.int32), 0, max_sqrt_value)
return x, max_sqrt_value + 1
def _encode_lookup(to_encode: chex.Array,
lookup_table: np.ndarray) -> chex.Array:
chex.assert_rank(to_encode, 0)
chex.assert_type(to_encode, jnp.int32)
return jnp.asarray(lookup_table)[to_encode], max(lookup_table) + 1
def _features_embedding(raw_unit: chex.Array,
rescales: Mapping[int, float]) -> chex.Array:
"""Select features in `rescales`, rescale and concatenate them."""
chex.assert_rank(raw_unit, 1)
chex.assert_type(raw_unit, jnp.int32)
assert rescales
selected_features = []
feature_indices = sorted(rescales.keys())
i_min = 0
while i_min < len(feature_indices):
i_max = i_min
while ((i_max < len(feature_indices) - 1) and
(feature_indices[i_max + 1] == feature_indices[i_max] + 1)):
i_max += 1
consecutive_features = raw_unit[
feature_indices[i_min]:feature_indices[i_max] + 1]
consecutive_rescales = jnp.asarray(
[rescales[feature_indices[i]] for i in range(i_min, i_max + 1)],
jnp.float32)
i_min = i_max + 1
rescaled_features = jnp.multiply(consecutive_features, consecutive_rescales)
selected_features.append(rescaled_features)
return util.astype(jnp.concatenate(selected_features, axis=0), jnp.float32)
def _binary_scale_embedding(to_encode: chex.Array,
world_dim: int) -> chex.Array:
"""Encode the feature using its binary representation."""
chex.assert_rank(to_encode, 0)
chex.assert_type(to_encode, jnp.int32)
num_bits = (world_dim - 1).bit_length()
bit_mask = 1 << np.arange(num_bits)
pos = jnp.broadcast_to(to_encode[jnp.newaxis], num_bits)
result = jnp.not_equal(jnp.bitwise_and(pos, bit_mask), 0)
return util.astype(result, jnp.float32)
def _remap_and_one_hot_embedding(to_encode: chex.Array,
lookup_table: np.ndarray) -> chex.Array:
remapped, num_classes = _encode_lookup(to_encode, lookup_table)
return jax.nn.one_hot(remapped, num_classes)
class Transformer(modular.BatchedComponent):
"""Apply unit-wise resblocks, and transformer layers, to the units."""
def __init__(
self,
max_num_observed_units: int,
units_stream_size: int,
transformer_num_layers: int,
transformer_num_heads: int,
transformer_key_size: int,
transformer_value_size: int,
resblocks_num_before: int,
resblocks_num_after: int,
resblocks_hidden_size: Optional[int] = None,
use_layer_norm: bool = True,
input_name: types.StreamType = "units_stream",
output_name: types.StreamType = "units_stream",
name: Optional[str] = None):
"""Initializes Transformer module.
Args:
max_num_observed_units: The maximum number of oberved units,
ie. obs_spec["raw_units"].shape[0].
units_stream_size: The size of the output encoding each unit.
transformer_num_layers: Number of consecutive transformer layers.
transformer_num_heads: Number of heads in the transformers.
transformer_key_size: Size of the keys in the transformers.
transformer_value_size: Per-head output size of the transformers.
resblocks_num_before: Number of per-unit fully connected resblocks before
the transformers.
resblocks_num_after: Number of per-unit fully connected resblocks after
the transformers.
resblocks_hidden_size: Number of hidden units in the resblocks.
use_layer_norm: Whether to use layer normalization.
input_name: The name of the input to use, of shape
[max_num_observed_units, units_stream_size] and dtype float32.
output_name: The name to give to the output, of shape
[max_num_observed_units, units_stream_size] and dtype float32.
name: The name of this component.
"""
super().__init__(name=name)
self._max_num_observed_units = max_num_observed_units
self._units_stream_size = units_stream_size
self._transformer_num_layers = transformer_num_layers
self._transformer_num_heads = transformer_num_heads
self._transformer_key_size = transformer_key_size
self._transformer_value_size = transformer_value_size
self._resblocks_num_before = resblocks_num_before
self._resblocks_num_after = resblocks_num_after
self._resblocks_hidden_size = resblocks_hidden_size
self._use_layer_norm = use_layer_norm
self._input_name = input_name
self._output_name = output_name or input_name
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
"non_empty_units": specs.Array(
(self._max_num_observed_units,), jnp.bool_),
self._input_name: specs.Array(
(self._max_num_observed_units, self._units_stream_size),
jnp.float32)})
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._output_name: specs.Array(
(self._max_num_observed_units, self._units_stream_size),
jnp.float32)})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs[self._input_name]
mask = inputs["non_empty_units"]
for _ in range(self._resblocks_num_before):
x = util.UnitsResblock(
hidden_size=self._resblocks_hidden_size,
use_layer_norm=self._use_layer_norm)(x)
for _ in range(self._transformer_num_layers):
x1 = x
if self._use_layer_norm:
x1 = util.units_layer_norm(x1)
x1 = jax.nn.relu(x1)
# The logits mask has shape [num_heads, num_units, num_units]:
logits_mask = mask[jnp.newaxis, jnp.newaxis]
x1 = hk.MultiHeadAttention(
num_heads=self._transformer_num_heads,
key_size=self._transformer_key_size,
w_init_scale=1.,
value_size=self._transformer_value_size,
model_size=self._units_stream_size)(
query=x1, key=x1, value=x1, mask=logits_mask)
# Mask here mostly for safety:
x1 = jnp.where(mask[:, jnp.newaxis], x1, 0)
x = x + x1
for _ in range(self._resblocks_num_after):
x = util.UnitsResblock(
hidden_size=self._resblocks_hidden_size,
use_layer_norm=self._use_layer_norm)(x)
x = jnp.where(mask[:, jnp.newaxis], x, 0)
outputs = types.StreamDict({self._output_name: x})
return outputs, {}
class MLP(modular.BatchedComponent):
"""Apply unit-wise linear layers to the units."""
def __init__(
self,
max_num_observed_units: int,
units_stream_size: int,
layer_sizes: Sequence[int],
use_layer_norm: bool = True,
input_name: types.StreamType = "units_stream",
output_name: types.StreamType = "units_stream",
name: Optional[str] = None):
"""Initializes MLP module.
Args:
max_num_observed_units: The maximum number of oberved units,
ie. obs_spec["raw_units"].shape[0].
units_stream_size: The size of the input encoding each unit.
layer_sizes: The size of each output of layer of the MLP.
use_layer_norm: Whether to use layer normalization.
input_name: The name of the input to use, of shape
[max_num_observed_units, units_stream_size] and dtype float32.
output_name: The name to give to the output, of shape
[max_num_observed_units, units_stream_size] and dtype float32.
name: The name of this component.
"""
super().__init__(name=name)
self._max_num_observed_units = max_num_observed_units
self._units_stream_size = units_stream_size
if not layer_sizes:
raise ValueError("layer_sizes must contain at least one element.")
self._layer_sizes = layer_sizes
self._use_layer_norm = use_layer_norm
self._input_name = input_name
self._output_name = output_name or input_name
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
self._input_name: specs.Array(
(self._max_num_observed_units, self._units_stream_size),
jnp.float32)})
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._output_name: specs.Array(
(self._max_num_observed_units, self._layer_sizes[-1]), jnp.float32)
})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs[self._input_name]
for size in self._layer_sizes:
if self._use_layer_norm:
x = util.units_layer_norm(x)
x = jax.nn.relu(x)
x = hk.Linear(size)(x)
outputs = types.StreamDict({self._output_name: x})
return outputs, {}
class ToVector(modular.BatchedComponent):
"""Per-unit processing then average over the units dimension."""
def __init__(self,
input_name: types.StreamType,
output_name: types.StreamType,
max_num_observed_units: int,
units_stream_size: int,
units_hidden_sizes: Sequence[int],
vector_stream_size: int,
use_layer_norm: bool = True,
name: Optional[str] = None):
"""Initializes ToVector module.
Args:
input_name: The name of the input to use, of shape
[max_num_observed_units, units_stream_size] and dtype float32.
output_name: The name to give to the output, of shape
[vector_stream_size] and dtype float32.
max_num_observed_units: The maximum number of oberved units,
ie. obs_spec["raw_units"].shape[0].
units_stream_size: The size of the output encoding each unit.
units_hidden_sizes: The list of sizes of the hidden layers processing
each unit independently, before merging the unit representations.
vector_stream_size: The size of the output (1d vector representation).
use_layer_norm: Whether to use layer normalization.
name: The name of this component.
"""
super().__init__(name=name)
self._input_name = input_name
self._output_name = output_name
self._max_num_observed_units = max_num_observed_units
self._units_stream_size = units_stream_size
self._units_hidden_sizes = units_hidden_sizes
self._vector_stream_size = vector_stream_size
self._use_layer_norm = use_layer_norm
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
self._input_name: specs.Array(
(self._max_num_observed_units, self._units_stream_size),
jnp.float32)})
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._output_name: specs.Array((self._vector_stream_size,), jnp.float32)
})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
x = inputs[self._input_name]
for size in self._units_hidden_sizes:
if self._use_layer_norm:
x = util.units_layer_norm(x)
x = jax.nn.relu(x)
x = hk.Linear(output_size=size)(x)
x = x.mean(axis=0)
if self._use_layer_norm:
x = util.vector_layer_norm(x)
x = jax.nn.relu(x)
x = hk.Linear(output_size=self._vector_stream_size)(x)
outputs = types.StreamDict({self._output_name: x})
return outputs, {}
class ToVisualScatter(modular.BatchedComponent):
"""Scatter the units into their positions in the visual stream.
This means that each element of the units stream will be embedded and placed
in the visual stream, at the location corresponding to its (x, y) coordinate
in the world map.
"""
def __init__(self,
input_name: types.StreamType,
output_name: types.StreamType,
max_num_observed_units: int,
num_raw_unit_features: int,
units_stream_size: int,
units_world_dim: int,
units_hidden_sizes: Sequence[int],
output_spatial_size: int,
output_features_size: int,
kernel_size: int = 3,
use_layer_norm: bool = True,
name: Optional[str] = None):
"""Initializes ToVisualScatter module.
Args:
input_name: The name of the input to use, of shape
[max_num_observed_units, units_stream_size] and dtype float32.
output_name: The name to give to the output, of shape
[output_spatial_size, output_spatial_size, output_features_size] and
dtype float32.
max_num_observed_units: The maximum number of oberved units,
ie. obs_spec["raw_units"].shape[0].
num_raw_unit_features: The number of features per unit,
ie. obs_spec["raw_units"].shape[1].
units_stream_size: The size of the output encoding each unit.
units_world_dim: The size of the "world" reference frame of the units.
This corresponds to the range of the x and y fields of the raw_units
input (and can be different from the size of the visual stream).
units_hidden_sizes: The list of sizes of the hidden layers processing
each unit independently, before merging the unit representations.
output_spatial_size: The spatial size of the output (2d feature maps).
output_features_size: The number of the feature planes in the output (2d
feature maps).
kernel_size: The size of the convolution kernel to use after scattering.
use_layer_norm: Whether to use layer normalization.
name: The name of this component.
"""
super().__init__(name=name)
self._input_name = input_name
self._output_name = output_name
self._max_num_observed_units = max_num_observed_units
self._num_raw_unit_features = num_raw_unit_features
self._units_stream_size = units_stream_size
if units_world_dim % output_spatial_size != 0:
raise ValueError(f"units_world_dim (set to {units_world_dim}) must be a"
"multiple of output_spatial_size "
f"({output_spatial_size}).")
self._units_world_dim = units_world_dim
self._units_hidden_sizes = tuple(units_hidden_sizes)
self._output_spatial_size = output_spatial_size
self._output_features_size = output_features_size
self._kernel_size = kernel_size
self._use_layer_norm = use_layer_norm
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
("observation", "raw_units"): specs.Array(
(self._max_num_observed_units, self._num_raw_unit_features),
jnp.int32),
"non_empty_units": specs.Array(
(self._max_num_observed_units,), jnp.bool_),
self._input_name: specs.Array(
(self._max_num_observed_units, self._units_stream_size),
jnp.float32)})
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._output_name: specs.Array((self._output_spatial_size,
self._output_spatial_size,
self._output_features_size),
jnp.float32)})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
raw_units = inputs["observation", "raw_units"]
non_empty_units = inputs["non_empty_units"]
z = inputs[self._input_name]
non_empty_units = non_empty_units[:, jnp.newaxis]
for size in self._units_hidden_sizes:
if self._use_layer_norm:
z = util.units_layer_norm(z)
z = jax.nn.relu(z)
z = hk.Linear(output_size=size)(z)
z = jnp.where(non_empty_units, z, 0)
ratio = self._units_world_dim // self._output_spatial_size
unit_x = raw_units[:, FeatureUnit.x] // ratio
unit_y = raw_units[:, FeatureUnit.y] // ratio
unit_x = jnp.clip(unit_x, 0, self._output_spatial_size - 1)
unit_y = jnp.clip(unit_y, 0, self._output_spatial_size - 1)
one_hot_x = jax.nn.one_hot(unit_x, self._output_spatial_size)
one_hot_y = jax.nn.one_hot(unit_y, self._output_spatial_size)
z = jnp.einsum("uy,uf->uyf", one_hot_y, z)
z = jnp.einsum("ux,uyf->yxf", one_hot_x, z)
if self._use_layer_norm:
z = util.visual_layer_norm(z)
z = jax.nn.relu(z)
z = hk.Conv2D(
output_channels=self._output_features_size,
kernel_shape=self._kernel_size)(z)
outputs = types.StreamDict({self._output_name: z})
return outputs, {}
class PointerEmbedding(modular.BatchedComponent):
"""Embeds a single int32 into a float32 vector, taking embeddings as inputs.
This is similar to vector.Embedding, the difference is that the embeddings
are not learned, they are passed as an input (embeddings).
Given two inputs, embeddings and inputs, the output is embeddings[index].
"""
def __init__(self,
num_embeddings: int,
embeddings_size: int,
index_input_name: types.StreamType,
embeddings_input_name: types.StreamType,
output_name: types.StreamType,
name: Optional[str] = None):
"""Initializes PointerEmbedding module.
Args:
num_embeddings: The number of embeddings, ie. the shape[0] of the
embeddings input.
embeddings_size: The size of each embedding, ie. the shape[1] of the
embeddings input.
index_input_name: The name of the index input to use, of shape []
and dtype int32.
embeddings_input_name: The name of the embeddings input, of shape
[num_embeddings, embeddings_size] and dtype float32.
output_name: The name to give to the output, of shape [embeddings_size]
and dtype float32.
name: The name of this component.
"""
super().__init__(name=name)
self._num_embeddings = num_embeddings
self._embeddings_size = embeddings_size
self._index_input_name = index_input_name
self._embeddings_input_name = embeddings_input_name
self._output_name = output_name
@property
def input_spec(self) -> types.SpecDict:
spec = types.SpecDict()
spec[self._index_input_name] = specs.Array((), jnp.int32)
spec[self._embeddings_input_name] = specs.Array(
(self._num_embeddings, self._embeddings_size), jnp.float32)
return spec
@property
def output_spec(self) -> types.SpecDict:
spec = types.SpecDict()
spec[self._output_name] = specs.Array((self._embeddings_size,), jnp.float32)
return spec
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
embeddings = inputs[self._embeddings_input_name]
index = inputs[self._index_input_name]
index = jnp.minimum(index, embeddings.shape[0])
output = types.StreamDict()
output[self._output_name] = embeddings[index]
return output, {}
class BinaryVectorPointerEmbedding(modular.BatchedComponent):
"""Embeds a boolean mask into a float32 vector, taking embeddings as inputs.
This is similar to vector.BinaryVectorEmbedding, the difference is that the
embeddings are not learned, they are passed as an input (embeddings).
Given two inputs, embeddings and mask, the output is the sum of the unmasked
embeddings.
"""
def __init__(self,
num_embeddings: int,
embeddings_size: int,
mask_input_name: types.StreamType,
embeddings_input_name: types.StreamType,
output_name: types.StreamType,
name: Optional[str] = None):
"""Initializes BinaryVectorPointerEmbedding module.
Args:
num_embeddings: The number of embeddings, ie. the shape[0] of the
embeddings input.
embeddings_size: The size of each embedding, ie. the shape[1] of the
embeddings input.
mask_input_name: The name of the index input to use, of shape
[num_embeddings] and dtype bool_.
embeddings_input_name: The name of the embeddings input, of shape
[num_embeddings, embeddings_size] and dtype float32.
output_name: The name to give to the output, of shape [embeddings_size]
and dtype float32.
name: The name of this component.
"""
super().__init__(name=name)
self._num_embeddings = num_embeddings
self._embeddings_size = embeddings_size
self._mask_input_name = mask_input_name
self._embeddings_input_name = embeddings_input_name
self._output_name = output_name
@property
def input_spec(self) -> types.SpecDict:
spec = types.SpecDict()
spec[self._mask_input_name] = specs.Array(
(self._num_embeddings,), jnp.bool_)
spec[self._embeddings_input_name] = specs.Array(
(self._num_embeddings, self._embeddings_size), jnp.float32)
return spec
@property
def output_spec(self) -> types.SpecDict:
spec = types.SpecDict()
spec[self._output_name] = specs.Array((self._embeddings_size,), jnp.float32)
return spec
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
embeddings = inputs[self._embeddings_input_name]
indices = util.astype(inputs[self._mask_input_name], jnp.float32)
x = jnp.matmul(indices, embeddings)
output = types.StreamDict()
output[self._output_name] = x
return output, {}
class PointerLogits(modular.BatchedComponent):
"""Produce logits using a pointer network.
This is basically an attention mechanism between keys, coming from the units
stream, and a single key, coming from the vector stream.
"""
def __init__(
self,
max_num_observed_units: int,
num_raw_unit_features: int,
logits_output_name: types.StreamType,
mask_output_name: types.StreamType,
query_input_size: int,
keys_input_size: int,
unit_tags_masking: UnitTagsMasking,
query_input_name: types.StreamType = "vector_stream",
keys_input_name: types.StreamType = "units_stream",
num_layers_query: int = 2,
num_layers_keys: int = 2,
key_size: int = 64,
use_layer_norm: bool = True,
name: Optional[str] = None):
"""Initializes PointerLogits module.
Args:
max_num_observed_units: The maximum number of oberved units,
ie. obs_spec["raw_units"].shape[0].
num_raw_unit_features: The number of features per unit,
ie. obs_spec["raw_units"].shape[1].
logits_output_name: The name to give to the logits output, of shape
[max_num_observed_units] and dtype float32.
mask_output_name: The name to give to the mask output, of shape
[max_num_observed_units] and dtype bool.
query_input_size: The size of the input used for the query.
keys_input_size: The size of the input used for each key.
unit_tags_masking: The type of masking to use.
query_input_name: The name of the input to use for the query (a 1d
vector, typically the vector stream), of shape [query_input_size] and
dtype float32.
keys_input_name: The name of the input to use for the keys (a list of 1d
vectors, typically the units stream), of shape
[max_num_observed_units, keys_input_size] and dtype float32.
num_layers_query: The number of layers used to process the query before
the attention mechanism.
num_layers_keys: The number of layers used to individually process the
keys before the attention mechanism.
key_size: The size of the keys (and query) in the attention mechanism.
use_layer_norm: Whether to use layer normalization.
name: The name of this component.
"""
super().__init__(name=name)
self._max_num_observed_units = max_num_observed_units
self._num_raw_unit_features = num_raw_unit_features
self._logits_output_name = logits_output_name
self._mask_output_name = mask_output_name
self._query_input_size = query_input_size
self._keys_input_size = keys_input_size
self._unit_tags_masking = unit_tags_masking
self._query_input_name = query_input_name
self._keys_input_name = keys_input_name
self._num_layers_query = num_layers_query
self._num_layers_keys = num_layers_keys
self._key_size = key_size
self._use_layer_norm = use_layer_norm
if num_layers_query == 0 and key_size != query_input_size:
raise ValueError("If num_layers_query is set to 0, key_size must be "
"equal to query_input_size, but they are set to "
f"{key_size} and {query_input_size} respectively.")
if num_layers_keys == 0 and key_size != keys_input_size:
raise ValueError("If num_layers_keys is set to 0, key_size must be "
"equal to keys_input_size, but they are set to "
f"{key_size} and {keys_input_size} respectively.")
@property
def input_spec(self) -> types.SpecDict:
spec = types.SpecDict({
("observation", "raw_units"): specs.Array(
(self._max_num_observed_units, self._num_raw_unit_features),
jnp.int32),
self._keys_input_name: specs.Array(
(self._max_num_observed_units, self._keys_input_size), jnp.float32),
self._query_input_name: specs.Array(
(self._query_input_size,), jnp.float32)})
if self._unit_tags_masking is UnitTagsMasking.TARGETABLE_WITH_CAMERA:
spec["action", "function"] = specs.Array((), jnp.int32)
return spec
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._logits_output_name: specs.Array(
(self._max_num_observed_units,), jnp.float32),
self._mask_output_name: specs.Array(
(self._max_num_observed_units,), jnp.bool_)})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
# Query.
query = inputs[self._query_input_name]
for i in range(self._num_layers_query):
if self._use_layer_norm:
query = util.vector_layer_norm(query)
query = jax.nn.relu(query)
if i == self._num_layers_query - 1:
query = hk.Linear(output_size=self._key_size)(query)
else:
query = hk.Linear(output_size=query.shape[-1])(query)
# Keys.
keys = inputs[self._keys_input_name]
for i in range(self._num_layers_keys):
if self._use_layer_norm:
keys = util.units_layer_norm(keys)
keys = jax.nn.relu(keys)
if i == self._num_layers_keys - 1:
keys = hk.Linear(output_size=self._key_size)(keys)
else:
keys = hk.Linear(output_size=keys.shape[-1])(keys)
# Mask
if self._unit_tags_masking is UnitTagsMasking.TARGETABLE_WITH_CAMERA:
mask = get_unit_tags_camera_mask(inputs["observation", "raw_units"],
inputs["action", "function"])
else:
mask = get_unit_tags_mask(inputs["observation", "raw_units"],
mode=self._unit_tags_masking)
# Pointer
logits = jnp.matmul(keys, query) # ij,j->i
logits = sample.mask_logits(logits, mask)
outputs = types.StreamDict({self._logits_output_name: logits,
self._mask_output_name: mask})
return outputs, {}
class FinalizeUnitTagsLogits(modular.BatchedComponent):
"""Compute full mask and add the end of selection bit to logits and mask."""
def __init__(
self,
input_logits_name: types.StreamType,
input_mask_name: types.StreamType,
output_logits_name: types.StreamType,
output_mask_name: types.StreamType,
vector_input_name: types.StreamType,
max_num_observed_units: int,
vector_input_size: int,
name: Optional[str] = None):
"""Initializes FinalizeUnitTagsLogits module.
Args:
input_logits_name: The name of the input to use for the logits, of shape
[max_num_observed_units] and dtype float32.
input_mask_name: The name of the input to use for the mask, of shape
[max_num_observed_units] and dtype bool.
output_logits_name: The name of the output for the logits, of shape
[max_num_observed_units + 1] and dtype float32.
output_mask_name: The name of the output for the mask, of shape
[max_num_observed_units + 1] and dtype bool.
vector_input_name: The name of the input to use to compute the EOS
(End-Of-Selection) logit, of shape [vector_input_size] and dtype
float32.
max_num_observed_units: The maximum number of oberved units,
ie. obs_spec["raw_units"].shape[0].
vector_input_size: The size of the vector stream input.
name: The name of this component.
"""
super().__init__(name=name)
self._input_logits_name = input_logits_name
self._input_mask_name = input_mask_name
self._output_logits_name = output_logits_name
self._output_mask_name = output_mask_name
self._vector_input_name = vector_input_name
self._num_units = max_num_observed_units
self._vector_input_size = vector_input_size
@property
def input_spec(self) -> types.SpecDict:
return types.SpecDict({
self._input_logits_name: specs.Array((self._num_units,), jnp.float32),
self._input_mask_name: specs.Array((self._num_units,), jnp.bool_),
self._vector_input_name: specs.Array(
(self._vector_input_size,), jnp.float32),
"selected_unit_tags": specs.Array((self._num_units + 1,), jnp.bool_)})
@property
def output_spec(self) -> types.SpecDict:
return types.SpecDict({
self._output_logits_name: specs.Array(
(self._num_units + 1,), jnp.float32),
self._output_mask_name: specs.Array((self._num_units + 1,), jnp.bool_)})
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
# Logits:
units_logits = inputs[self._input_logits_name]
eos_logit = hk.Linear(1)(jax.nn.relu(inputs[self._vector_input_name]))
logits = jnp.concatenate([units_logits, eos_logit], axis=0)
# Mask:
units_mask = inputs[self._input_mask_name]
selected_units = inputs["selected_unit_tags"]
# We cannot select the same unit twice:
units_mask = jnp.logical_and(units_mask,
jnp.logical_not(selected_units[:-1]))
# We must select at least 1 unit, so EOS is masked if no unit is selected:
eos_mask = jnp.sum(selected_units, dtype=jnp.bool_)
# Once EOS is selected, we can only select EOS:
is_eos_selected = selected_units[-1:]
units_mask = jnp.logical_and(units_mask, jnp.logical_not(is_eos_selected))
mask = jnp.concatenate([units_mask, eos_mask[jnp.newaxis]], axis=0)
logits = sample.mask_logits(logits, mask)
outputs = types.StreamDict({self._output_logits_name: logits,
self._output_mask_name: mask})
return outputs, {}
class UnitTagsHead(modular.BatchedComponent):
"""Unit tag head.
This applies a `scan` operation to the inner component.
It also builds a boolean vector `selected_unit_tags`
For simplicity, we require the inner component to be stateless.
This module is equivalent to the following pseudo-code:
```
constant_inputs = extract constant_inputs from inputs
carries = extract carries from inputs
x = extract per_step_inputs from inputs
y = []
selected_units = [0] * (num_units + 1)
for i in range(max_num_selected_units):
comp_output = inner_component.unroll((x[i], carries, selected_units))
carries = extract carries from comp_output
y[i] = extract per_step_outputs from comp_outputs
selected_units[y[i][action]] = 1
return (y, carries, selected_units)```
"""
def __init__(
self,
inner_component: modular.Component,
constant_inputs: Sequence[types.StreamType],
carries: Sequence[types.StreamType],
per_step_inputs: Sequence[types.StreamType],
per_step_outputs: Sequence[types.StreamType],
max_num_selected_units: int,
max_num_observed_units: int,
action_output: types.StreamType,
name: Optional[str] = None):
"""Initializes UnitTagsHead module.
Args:
inner_component: The component to wrap and unroll `max_num_selected_units`
times inside this component. It must at least produce action for the
currently selected unit tag, and in most cases should also produce
logits and mask.
constant_inputs: The list of input names to pass to the every unroll
instance of the `inner_component`. They can have any shape and dtype.
carries: The list of inputs to pass to the `inner_component` which values
will be changed by the `inner_component` at each unroll step, before
being passed to the next step. They are also returned as outputs of this
module. They can have any shape and dtype.
per_step_inputs: The list of inputs to pass to the `inner_component`
per-step. These inputs shape be [max_num_selected_units, ...]. At the
i-th unroll step, these inputs will be indexed by [i, ...]. They can
have any dtype.
per_step_outputs: The list of outputs of the `inner_component` to return
for every unroll step. Since gathering these outputs can take up much
memory, they must be specified explicitely. They will have shape
[max_num_selected_units, ...], where the element indexed by [i, ...]
comes from the i-th unroll step. The dtype is specified by the
`inner_component` output.
max_num_selected_units: The maximum number of selected units, ie. the
first dimension of the unit_tags logit vector. This corresponds to the
number of unroll steps performed by this component.
max_num_observed_units: The maximum number of oberved units,
ie. obs_spec["raw_units"].shape[0].
action_output: The name of the output of the `inner_component`
corresponding to the action. It must be of shape [] and dtype int32.
This is used to produce the `selected_unit_tags` carry (of shape
[max_num_observed_units] and dtype bool).
name: The name of this component.
"""
super().__init__(name=name)
self._inner_component = inner_component
self._constant_inputs = constant_inputs
self._carries = carries
self._per_step_inputs = per_step_inputs
self._per_step_outputs = per_step_outputs
self._max_num_selected_units = max_num_selected_units
self._max_num_observed_units = max_num_observed_units
self._action_output = action_output
# For simplicity, we request inner_component to be stateless:
if inner_component.prev_state_spec or inner_component.next_state_spec:
raise ValueError(f"{self.name}: Inner component must be stateless.")
# Check that the input and output sets do not intersect:
if set(constant_inputs).intersection(set(carries)):
raise ValueError("constant_inputs and carries must "
"be disjoint sets but both contain "
f"{set(constant_inputs).intersection(set(carries))}.")
if set(constant_inputs).intersection(set(per_step_inputs)):
raise ValueError(
"constant_inputs and per_step_inputs must be disjoint sets but both "
f"contain {set(constant_inputs).intersection(set(per_step_inputs))}.")
if set(per_step_inputs).intersection(set(carries)):
raise ValueError("per_step_inputs and carries must "
"be disjoint sets but both contain "
f"{set(per_step_inputs).intersection(set(carries))}.")
if set(per_step_outputs).intersection(set(carries)):
raise ValueError("per_step_outputs and carries must "
"be disjoint sets but both contain "
f"{set(per_step_outputs).intersection(set(carries))}.")
# Check that input and output sets are contained in the inner component spec
input_spec_names = set(inner_component.input_spec.keys())
output_spec_names = set(inner_component.output_spec.keys())
if not set(constant_inputs).issubset(input_spec_names):
raise ValueError(
"constant_inputs must be a subset of inner_component.input_spec, but "
f"{set(constant_inputs).difference(input_spec_names)} is not there.")
if not set(carries).issubset(input_spec_names):
raise ValueError(
"carries must be a subset of inner_component.input_spec, but "
f"{set(carries).difference(input_spec_names)} is not there.")
if not set(per_step_inputs).issubset(input_spec_names):
raise ValueError(
"per_step_inputs must be a subset of inner_component.input_spec, but "
f"{set(per_step_inputs).difference(input_spec_names)} is not there.")
if not set(carries).issubset(output_spec_names):
raise ValueError(
"carries must be a subset of inner_component.output_spec, but "
f"{set(carries).difference(output_spec_names)} is not there.")
if not set(per_step_outputs).issubset(output_spec_names):
raise ValueError(
"per_step_outputs must be a subset of inner_component.output_spec, "
f"but {set(per_step_inputs).difference(input_spec_names)} is not "
"there.")
if action_output not in per_step_outputs:
raise ValueError(f"action_output ({action_output}) must be in "
"per_step_outputs.")
# Check that the inner component does not change the spec of carries:
for c in carries:
if inner_component.input_spec[c] != inner_component.output_spec[c]:
raise ValueError(f"{self.name}: Carry {c} changed spec.")
# Check that the input and output sets are not larger than the inner
# component spec:
all_inputs = set(constant_inputs).union(carries).union(per_step_inputs)
missing_inputs = all_inputs.difference(
set(inner_component.input_spec.keys()))
if missing_inputs:
raise ValueError(f"{missing_inputs} are specified as inputs, but are not "
"in inner_component.input_spec.")
all_outputs = set(carries).union(per_step_outputs)
missing_outputs = all_outputs.difference(
set(inner_component.output_spec.keys()))
if missing_outputs:
raise ValueError(f"{missing_outputs} are specified as outputs, but are "
"not in inner_component.output_spec.")
# Check that all the inputs of the inner component are specified in this
# init function arguments:
missing_inputs2 = set(inner_component.input_spec.keys()).difference(
all_inputs.union(set(["selected_unit_tags"])))
if missing_inputs2:
raise ValueError(f"{missing_inputs2} are in inner_component.input_spec, "
"but are not specified as inputs.")
# Check that the action output has the right type:
if inner_component.output_spec[action_output].shape:
raise ValueError(
"The output specified as action_output must have shape (), but has "
f"shape {inner_component.output_spec[action_output].shape}.")
if inner_component.output_spec[action_output].dtype != jnp.int32:
raise ValueError(
"The output specified as action_output must have dtype int32, but "
f"has dtype {inner_component.output_spec[action_output].dtype}.")
def _replicate_spec(self, spec: specs.Array) -> specs.Array:
return spec.replace(shape=(self._max_num_selected_units,) + spec.shape)
@property
def input_spec(self) -> types.SpecDict:
spec = types.SpecDict()
for name in self._constant_inputs:
spec[name] = self._inner_component.input_spec[name]
for name in self._carries:
spec[name] = self._inner_component.input_spec[name]
for name in self._per_step_inputs:
spec[name] = self._replicate_spec(self._inner_component.input_spec[name])
return spec
@property
def output_spec(self) -> types.SpecDict:
spec = types.SpecDict()
for name in self._carries:
spec[name] = self._inner_component.output_spec[name]
for name in self._per_step_outputs:
spec[name] = self._replicate_spec(self._inner_component.output_spec[name])
spec["selected_unit_tags"] = specs.Array(
(self._max_num_observed_units,), jnp.bool_)
return spec
def _forward(self, inputs: types.StreamDict) -> modular.ForwardOutputType:
# We need inner_component to be a hk.Module to use it with hk.scan:
inner_module_unroll = hk.to_module(self._inner_component.unroll)(
name="inner_module")
constant_inputs = inputs.filter(self._constant_inputs)
def iterate(carries, loop_inputs):
comp_inputs = constant_inputs.copy()
# scan replaces empty inputs with None:
comp_inputs.update(carries or types.StreamDict())
comp_inputs.update(loop_inputs or types.StreamDict())
comp_inputs = jax.tree_map(lambda x: x[jnp.newaxis], comp_inputs)
comp_outputs, _, _ = inner_module_unroll(comp_inputs, types.StreamDict())
comp_outputs = jax.tree_map(lambda x: x[0], comp_outputs)
carries.update(comp_outputs.filter(self._carries))
loop_outputs = comp_outputs.filter(self._per_step_outputs)
action = comp_outputs[self._action_output]
action_one_hot = jax.nn.one_hot(
action, self._max_num_observed_units + 1, dtype=jnp.bool_)
carries["selected_unit_tags"] = jnp.logical_or(
carries["selected_unit_tags"], action_one_hot)
return carries, loop_outputs
carries = inputs.filter(self._carries)
carries["selected_unit_tags"] = jnp.zeros(
(self._max_num_observed_units + 1,), jnp.bool_)
loop_inputs = inputs.filter(self._per_step_inputs)
carries, loop_outputs = hk.scan(
iterate, carries, loop_inputs, length=self._max_num_selected_units)
outputs = loop_outputs
outputs.update(carries)
outputs["selected_unit_tags"] = outputs["selected_unit_tags"][:-1]
# Logs are ignored for simplicity
return outputs, {}
| alphastar-main | alphastar/architectures/components/units.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for components."""
from typing import MutableMapping, Optional
from alphastar import types
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from pysc2.lib import actions as sc2_actions
REPEATED_FUNCTION_TYPES = [sc2_actions.raw_cmd]
def astype(x: chex.Array, dtype: jnp.dtype) -> chex.Array:
"""Cast x if necessary."""
if x.dtype != dtype:
return x.astype(dtype)
else:
return x
def get_function_list(action_spec: types.ActionSpec
) -> sc2_actions.Functions:
"""Get the list of Function available given this action_spec."""
num_functions = action_spec['function'].maximum + 1
if num_functions > len(sc2_actions.RAW_FUNCTIONS):
raise ValueError(
f'action_spec contains {num_functions}, which is larger than the '
f'actual number of functions in sc2 {len(sc2_actions.RAW_FUNCTIONS)}.')
normal_functions = [
f for f in sc2_actions.RAW_FUNCTIONS if f.id < num_functions]
functions = sc2_actions.Functions(normal_functions)
# Sanity check:
assert len(functions) == num_functions
return functions
def get_full_argument_masks(action_spec: types.ActionSpec
) -> MutableMapping[str, chex.Array]:
"""Get the (static) full argument masks.
For each argument arg, full_argument_mask[arg][function] is the
mask for the argument given the function argument.
Args:
action_spec: The action specification.
Returns:
A dict containing a jnp.ndarray of size (num_function,) for each
action, specifying if the argument is used for each function argument.
"""
function_list = get_function_list(action_spec)
full_argument_masks = dict(
{k: np.zeros((len(function_list),), dtype=bool) for k in action_spec},
# Function and delay are never masked:
function=np.ones((len(function_list),), dtype=bool),
delay=np.ones((len(function_list),), dtype=bool))
for func in function_list:
for argument in func.args:
full_argument_masks[argument.name][func.id] = True
if func.function_type in REPEATED_FUNCTION_TYPES:
# repeat is used iff the function type is in REPEATED_FUNCTION_TYPES
full_argument_masks['repeat'][func.id] = True
return jax.tree_map(jnp.asarray, full_argument_masks)
def vector_layer_norm(x: chex.Array) -> chex.Array:
if x.ndim < 1:
raise ValueError(
f'Input must have at least one dimension (shape: {x.shape}).')
layer_norm = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)
return astype(layer_norm(astype(x, jnp.float32)), x.dtype)
def units_layer_norm(x: chex.Array) -> chex.Array:
if x.ndim < 2:
raise ValueError(
f'Input must have at least two dimension (shape: {x.shape}).')
layer_norm = hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)
return astype(layer_norm(astype(x, jnp.float32)), x.dtype)
def visual_layer_norm(x: chex.Array) -> chex.Array:
if x.ndim < 3:
raise ValueError(
f'Input must have at least three dimension (shape: {x.shape}).')
layer_norm = hk.LayerNorm(
axis=[-3, -2, -1], create_scale=True, create_offset=True)
return astype(layer_norm(astype(x, jnp.float32)), x.dtype)
class VectorResblock(hk.Module):
"""Fully connected residual block."""
def __init__(self,
num_layers: int = 2,
hidden_size: Optional[int] = None,
use_layer_norm: bool = True,
name: Optional[str] = None):
"""Initializes VectorResblock module.
Args:
num_layers: Number of layers in the residual block.
hidden_size: Size of the activation vector in the residual block.
use_layer_norm: Whether to use layer normalization.
name: The name of this component.
"""
super().__init__(name=name)
self._num_layers = num_layers
self._hidden_size = hidden_size
self._use_layer_norm = use_layer_norm
def __call__(self, x: chex.Array) -> chex.Array:
chex.assert_rank(x, 1)
chex.assert_type(x, jnp.float32)
shortcut = x
input_size = x.shape[-1]
for i in range(self._num_layers):
if i < self._num_layers - 1:
output_size = self._hidden_size or input_size
w_init, b_init = None, None
else:
output_size = input_size
w_init = hk.initializers.RandomNormal(stddev=0.005)
b_init = hk.initializers.Constant(0.)
if self._use_layer_norm:
x = vector_layer_norm(x)
x = jax.nn.relu(x)
x = hk.Linear(output_size=output_size, w_init=w_init, b_init=b_init)(x)
return x + shortcut
class UnitsResblock(VectorResblock):
"""Fully connected residual block, unit-wise."""
def __call__(self, x: chex.Array) -> chex.Array:
chex.assert_rank(x, 2)
chex.assert_type(x, jnp.float32)
return jax.vmap(super().__call__)(x)
class VisualResblock(hk.Module):
"""Convolutional (2d) residual block."""
def __init__(self,
kernel_size: int,
num_layers: int = 2,
hidden_size: Optional[int] = None,
use_layer_norm: bool = True,
name: Optional[str] = None):
"""Initializes VisualResblock module.
Args:
kernel_size: The size of the convolution kernel.
num_layers: Number of layers in the residual block.
hidden_size: Size of the activation vector in the residual block.
use_layer_norm: Whether to use layer normalization.
name: The name of this component.
"""
super().__init__(name=name)
self._kernel_size = kernel_size
self._num_layers = num_layers
self._hidden_size = hidden_size
self._use_layer_norm = use_layer_norm
def __call__(self, x: chex.Array) -> chex.Array:
chex.assert_rank(x, 3)
chex.assert_type(x, jnp.float32)
shortcut = x
input_size = x.shape[-1]
for i in range(self._num_layers):
if i < self._num_layers - 1:
output_size = self._hidden_size or input_size
w_init, b_init = None, None
else:
output_size = input_size
w_init = hk.initializers.RandomNormal(stddev=0.005)
b_init = hk.initializers.Constant(0.)
if self._use_layer_norm:
x = visual_layer_norm(x)
x = jax.nn.relu(x)
x = hk.Conv2D(output_channels=output_size,
kernel_shape=self._kernel_size,
w_init=w_init,
b_init=b_init)(x)
return x + shortcut
| alphastar-main | alphastar/architectures/components/util.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for common."""
from absl.testing import absltest
from absl.testing import parameterized
from alphastar import types
from alphastar.architectures import util as modular_util
from alphastar.architectures.components import common
from alphastar.architectures.components import test_utils
from alphastar.commons import sample
_ALL_ARGUMENT_NAMES = (
modular_util.Argument.FUNCTION,
modular_util.Argument.DELAY,
modular_util.Argument.QUEUED,
modular_util.Argument.REPEAT,
modular_util.Argument.UNIT_TAGS,
modular_util.Argument.TARGET_UNIT_TAG,
modular_util.Argument.WORLD
)
class CommonTest(test_utils.ComponentTest):
"""Basic tests for the common components."""
@parameterized.parameters(*_ALL_ARGUMENT_NAMES)
def test_ActionFromBehaviourFeatures(self,
argument_name: types.ArgumentName):
component = common.ActionFromBehaviourFeatures(argument_name=argument_name)
self._test_component(component, batch_size=2, unroll_len=3)
@parameterized.product(
is_training=[True, False],
argument_name=_ALL_ARGUMENT_NAMES)
def test_Sample(self,
is_training: bool,
argument_name: types.ArgumentName):
_, action_spec = test_utils.get_test_specs(is_training)
component = common.Sample(argument_name=argument_name,
num_logits=action_spec[argument_name].maximum + 1,
sample_fn=sample.sample)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.parameters(True, False)
def test_ArgumentMasks(self, is_training: bool):
_, action_spec = test_utils.get_test_specs(is_training)
component = common.ArgumentMasks(action_spec=action_spec)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.parameters(True, False)
def test_FeatureFromPrevState(self, is_training: bool):
component = common.FeatureFromPrevState(input_name='input_stream',
output_name='output_stream',
is_training=is_training,
stream_shape=(2, 4))
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
@parameterized.parameters((True, 0), (True, 2), (False, 0))
def test_FeatureToNextState(self, is_training: bool, overlap_len: int):
component = common.FeatureToNextState(input_name='input_stream',
output_name='output_stream',
stream_shape=(2, 4),
overlap_len=overlap_len)
self._test_component(
component, batch_size=2, unroll_len=3 if is_training else 1)
def test_FeatureToNextState_error(self):
component = common.FeatureToNextState(input_name='input_stream',
output_name='output_stream',
stream_shape=(2, 4),
overlap_len=2)
with self.assertRaises(ValueError):
self._test_component(component, batch_size=2, unroll_len=1)
if __name__ == '__main__':
absltest.main()
| alphastar-main | alphastar/architectures/components/common_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| alphastar-main | alphastar/architectures/components/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.