python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test implementations of GAN grad calculator."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from dd_two_player_games import drift_utils
from dd_two_player_games import gan
from dd_two_player_games import gan_grads_calculator
from dd_two_player_games import losses
from dd_two_player_games import regularizer_estimates
class DiracDiscriminator(hk.Linear):
"""DiracGAN discriminator module."""
def __init__(self, init_value=0.1):
super(DiracDiscriminator, self).__init__(
output_size=1, with_bias=False,
w_init=hk.initializers.Constant(init_value),
name='DiracDiscriminator')
def __call__(self, inputs, is_training=False):
del is_training
return super(DiracDiscriminator, self).__call__(inputs)
class DiracGenerator(hk.Module):
"""DiracGAN generator module."""
def __init__(self, init_value=0.1):
super(DiracGenerator, self).__init__(name='DiracGenerator')
self.init_value = init_value
def __call__(self, x, is_training=False):
del x, is_training
w = hk.get_parameter(
'w', [1, 1], jnp.float32,
init=hk.initializers.Constant(self.init_value))
return w
def l_prime(x):
return 1 / (1 + np.exp(-x))
def l_second(x):
return np.exp(-x)/ (1 + np.exp(-x))**2
DISC_INIT_VALUES = [-0.5, -0.1, 0.1, 0.5]
GEN_INIT_VALUES = [-0.5, -0.1, 0.1, 0.5]
DEFAULT_TEST_INPUT = list(itertools.product(DISC_INIT_VALUES, GEN_INIT_VALUES))
class GANGradsCalculatorTest(parameterized.TestCase):
@parameterized.parameters(DEFAULT_TEST_INPUT)
def testDiracGANGradsNoExplicitReg(self, disc_init_value, gen_init_value):
"""Tests that DiracGAN gradients are as expected."""
players_hk_tuple = gan.GANTuple(
disc=DiracDiscriminator, gen=DiracGenerator)
losses_tuple = gan.GANTuple(
disc=losses.discriminator_goodfellow_loss,
gen=losses.generator_saturating_loss)
gan_module = gan.GAN(
players_hk=players_hk_tuple, losses=losses_tuple,
penalties=gan.GANTuple(disc=None, gen=None),
player_param_transformers=gan.GANTuple(disc=None, gen=None),
players_kwargs=gan.GANTuple(
disc={'init_value': disc_init_value},
gen={'init_value': gen_init_value}),
num_latents=1)
grad_calculator = gan_grads_calculator.GANGradsCalculator(
gan_module, estimator_fn=regularizer_estimates.biased_estimate_grad_fn,
use_pmean=False)
zero_coeffs = drift_utils.PlayerRegularizationTerms(
self_norm=0, other_norm=0, other_dot_prod=0)
coeffs = gan.GANTuple(disc=zero_coeffs, gen=zero_coeffs)
# Initial rng, not used when creating samples so OK to share between
# parameters and generator.
init_rng = jax.random.PRNGKey(0)
# a toy cyclic iterator, not used.
dataset = itertools.cycle([jnp.array([[0.]])])
init_params, state = gan_module.initial_params(init_rng, next(dataset))
params = init_params
data_batch = next(dataset)
disc_grads_values = grad_calculator.disc_grads(
params, state, data_batch, init_rng, True, coeffs)
# assert the values of the discriminator
disc_param = jax.tree_leaves(params.disc)[0]
gen_param = jax.tree_leaves(params.gen)[0]
expected_disc_grads_values = l_prime(disc_param * gen_param) * gen_param
chex.assert_trees_all_close(
jax.tree_leaves(disc_grads_values)[0],
expected_disc_grads_values,
rtol=1e-3,
atol=6e-2,
ignore_nones=True)
gen_grads_values = grad_calculator.gen_grads(
params, state, data_batch, init_rng, True, coeffs)
expected_gen_grads_values = - l_prime(disc_param * gen_param) * disc_param
chex.assert_trees_all_close(
jax.tree_leaves(gen_grads_values)[0],
expected_gen_grads_values,
rtol=1e-3,
atol=6e-2,
ignore_nones=True)
@parameterized.parameters(DEFAULT_TEST_INPUT)
def testDiracGANGradsExplicitRegSelfNorm(
self, disc_init_value, gen_init_value):
"""Tests that DiracGAN gradients are as expected."""
players_hk_tuple = gan.GANTuple(
disc=DiracDiscriminator, gen=DiracGenerator)
losses_tuple = gan.GANTuple(
disc=losses.discriminator_goodfellow_loss,
gen=losses.generator_saturating_loss)
gan_module = gan.GAN(
players_hk=players_hk_tuple, losses=losses_tuple,
penalties=gan.GANTuple(disc=None, gen=None),
player_param_transformers=gan.GANTuple(disc=None, gen=None),
players_kwargs=gan.GANTuple(
disc={'init_value': disc_init_value},
gen={'init_value': gen_init_value}),
num_latents=1)
grad_calculator = gan_grads_calculator.GANGradsCalculator(
gan_module, estimator_fn=regularizer_estimates.biased_estimate_grad_fn,
use_pmean=False)
self_norm_reg_value = 10.
self_norm = drift_utils.PlayerRegularizationTerms(
self_norm=self_norm_reg_value, other_norm=0, other_dot_prod=0)
coeffs = gan.GANTuple(disc=self_norm, gen=self_norm)
# Initial rng, not used when creating samples so OK to share between
# parameters and generator.
init_rng = jax.random.PRNGKey(0)
# a toy cyclic iterator, not used.
dataset = itertools.cycle([jnp.array([[0.]])])
init_params, state = gan_module.initial_params(init_rng, next(dataset))
params = init_params
data_batch = next(dataset)
disc_grads_values = grad_calculator.disc_grads(
params, state, data_batch, init_rng, True, coeffs)
# assert the values of the discriminator
disc_param = jax.tree_leaves(params.disc)[0]
gen_param = jax.tree_leaves(params.gen)[0]
param_prod = disc_param * gen_param
# loss grad
expected_disc_grads_values = l_prime(param_prod) * gen_param
# explicit regularisation grad
expected_disc_grads_values += (
self_norm_reg_value * gen_param ** 3 * l_prime(
param_prod) * l_second(param_prod))
chex.assert_trees_all_close(
jax.tree_leaves(disc_grads_values)[0],
expected_disc_grads_values,
rtol=1e-3,
atol=6e-2,
ignore_nones=True)
gen_grads_values = grad_calculator.gen_grads(
params, state, data_batch, init_rng, True, coeffs)
expected_gen_grads_values = - l_prime(param_prod) * disc_param
expected_gen_grads_values += (
self_norm_reg_value * disc_param ** 3 * l_prime(
param_prod) * l_second(param_prod))
chex.assert_trees_all_close(
jax.tree_leaves(gen_grads_values)[0],
expected_gen_grads_values,
rtol=1e-3,
atol=6e-2,
ignore_nones=True)
@parameterized.parameters(DEFAULT_TEST_INPUT)
def testDiracGANGradsExplicitRegOtherNorm(
self, disc_init_value, gen_init_value):
"""Tests that DiracGAN gradients are as expected."""
players_hk_tuple = gan.GANTuple(
disc=DiracDiscriminator, gen=DiracGenerator)
losses_tuple = gan.GANTuple(
disc=losses.discriminator_goodfellow_loss,
gen=losses.generator_saturating_loss)
gan_module = gan.GAN(
players_hk=players_hk_tuple, losses=losses_tuple,
penalties=gan.GANTuple(disc=None, gen=None),
player_param_transformers=gan.GANTuple(disc=None, gen=None),
players_kwargs=gan.GANTuple(
disc={'init_value': disc_init_value},
gen={'init_value': gen_init_value}),
num_latents=1)
grad_calculator = gan_grads_calculator.GANGradsCalculator(
gan_module, estimator_fn=regularizer_estimates.biased_estimate_grad_fn,
use_pmean=False)
other_norm_reg_value = 10.
self_norm = drift_utils.PlayerRegularizationTerms(
self_norm=0, other_norm=other_norm_reg_value, other_dot_prod=0)
coeffs = gan.GANTuple(disc=self_norm, gen=self_norm)
# Initial rng, not used when creating samples so OK to share between
# parameters and generator.
init_rng = jax.random.PRNGKey(0)
# a toy cyclic iterator, not used.
dataset = itertools.cycle([jnp.array([[0.]])])
init_params, state = gan_module.initial_params(init_rng, next(dataset))
params = init_params
data_batch = next(dataset)
disc_grads_values = grad_calculator.disc_grads(
params, state, data_batch, init_rng, True, coeffs)
# assert the values of the discriminator
disc_param = jax.tree_leaves(params.disc)[0]
gen_param = jax.tree_leaves(params.gen)[0]
param_prod = disc_param * gen_param
# loss grad
expected_disc_grads_values = l_prime(param_prod) * gen_param
# explicit regularisation grad
expected_disc_grads_values += other_norm_reg_value * (
disc_param * l_prime(param_prod) ** 2
+ disc_param ** 2 * gen_param * l_prime(
param_prod) * l_second(param_prod))
chex.assert_trees_all_close(
jax.tree_leaves(disc_grads_values)[0],
expected_disc_grads_values,
rtol=1e-3,
atol=6e-2,
ignore_nones=True)
gen_grads_values = grad_calculator.gen_grads(
params, state, data_batch, init_rng, True, coeffs)
expected_gen_grads_values = - l_prime(param_prod) * disc_param
expected_gen_grads_values += other_norm_reg_value * (
gen_param * l_prime(param_prod) ** 2
+ gen_param ** 2 * disc_param * l_prime(
param_prod) * l_second(param_prod))
chex.assert_trees_all_close(
jax.tree_leaves(gen_grads_values)[0],
expected_gen_grads_values,
rtol=1e-3,
atol=6e-2,
ignore_nones=True)
if __name__ == '__main__':
absltest.main()
| discretisation_drift-main | dd_two_player_games/gan_grads_calculator_test.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Different estimators for the regularizers.
Functions in this file are used to estimate quantities of the form:
\nabla_{\phi} (
\nabla_{\theta} L_1(\theta, \phi) ^T
stop_grad(\nabla_{\theta} L_2(\theta, \phi)))
where L_1, L_2 are loss functions which contains expectations.
"""
import jax
import jax.numpy as jnp
from dd_two_player_games import utils
def _select_grad(grad_fn, grad_var):
return lambda *args: grad_fn(*args)._asdict()[grad_var]
def biased_estimate_grad_fn(fn1, fn2, grad_var='disc'):
r"""Function which computes an unbiased estimate of the regularizer grads.
Estimates:
\nabla_{\phi} (
\nabla_{\theta} L_1(\theta, \phi) ^T \nabla_{\theta} L_2(\theta, \phi))
This returns a biased estimate, by using the same set of samples from the
expectations in L_1, and L_2 when computing them.
This estimator does not return the same results if multiple devices are used.
Args:
fn1: A function which will return the first set of gradients used for the
regularizers. These gradients will be backpropagated through.
Returns \nabla_{\theta} L_1(\theta, \phi).
fn2: A function which will return the second set of gradients used for the
regularizers. These gradients will be NOT backpropagated through.
Returns \nabla_{\theta} L_2(\theta, \phi).
grad_var: A string - 'disc' or 'gen' - the variable with respect to which
to compute the gradients.
Returns:
A gradient function which can be applied to compute the desired gradients.
"""
def estimate(params, state, data_batch, rng, is_training):
value1 = fn1(params, state, data_batch, rng, is_training)
value2 = fn2(params, state, data_batch, rng, is_training)
value2 = jax.lax.stop_gradient(value2)
return utils.tree_dot_product(value1, value2)
return _select_grad(jax.grad(estimate), grad_var)
def unbiased_estimate_fn_different_device_results(fn1, fn2, grad_var='disc'):
return _unbiased_estimate_fn_cross_product_samples(
fn1, fn2, False, grad_var=grad_var)
def unbiased_estimate_fn_lowest_variance(fn1, fn2, grad_var='disc'):
return _unbiased_estimate_fn_cross_product_samples(
fn1, fn2, True, grad_var=grad_var)
def _unbiased_estimate_fn_cross_product_samples(
fn1, fn2, use_pmap_on_stop_grad_tree, grad_var='disc'):
r"""Function which computes an unbiased estimate of the regularizer grads.
Estimates:
\nabla_{\phi} (
\nabla_{\theta} L_1(\theta, \phi) ^T \nabla_{\theta} L_2(\theta, \phi))
It computes:
1/ N^2 \sum_{i=1}{N} (
\nabla_{GRAD_VAR} (\nabla_{\theta} L_1(\theta, \phi)(x_1,i) ^T))
\sum_{j=1}{N} \nabla_{\theta} L_1(\theta, \phi)(x_2,i) ^T
where x_1, i, and x_2, i are iid samples from the distributions
required to compute L_1 and L_2.
This returns a biased estimated, by using the same set of samples from the
expectations in L_1, and L_2 when computing them.
This estimator does not return the same results if multiple devices are used.
Variance of this estimator scaled with 1/ (N^2 P), where N is the batch size
per device, and P is the number of devices.
Args:
fn1: A function which will return the first set of gradients used for the
regularizers. These gradients will be backpropagated through.
Returns \nabla_{\theta} L_1(\theta, \phi).
fn2: A function which will return the second set of gradients used for the
regularizers. These gradients will be NOT backpropagated through.
Returns \nabla_{\theta} L_2(\theta, \phi).
use_pmap_on_stop_grad_tree: A boolean deciding whether or not to use
pmap on the tree for which we apply stop_gradient. If True, this is
results in a lower variance estimate.
grad_var: A string - 'disc' or 'gen' - the variable with respect to which
to compute the gradients.
Returns:
A gradient function which can be applied to compute the desired gradients.
"""
def dot_prod(tree2, params, state, x1, rng1, is_training):
tree1 = fn1(params, state, x1, rng1, is_training)
tree2 = jax.lax.stop_gradient(tree2)
return utils.tree_dot_product(tree1, tree2)
def vmap_grad(params, state, data_batch, rng, is_training):
data1, data2 = jnp.split(data_batch, [int(data_batch.shape[0]/2),], axis=0)
# Split rngs so that we can vmap the operations and obtained an unbiased
# estimate. Note: this means that we will obtain a different sample
# for each vmap, but due to the split we will also obtain a different
# set of samples than those obtained in the forward pass.
rng1, rng2 = jax.random.split(rng, 2)
# The non gradient part gets computed and averaged first.
value2 = fn2(params, state, data2, rng2, is_training)
# We pmean over all devices. This results in a lower variance
# estimates but keeps the unbiased property of the estimator since
# it avoids backpropagating through a gradient operation.
if use_pmap_on_stop_grad_tree:
value2 = jax.lax.pmean(value2, axis_name='i')
grad_fn = _select_grad(jax.grad(dot_prod, argnums=1), grad_var)
return grad_fn(value2, params, state, data1, rng1, is_training)
return vmap_grad
def biased_estimate_multiple_devices_grad_fn(fn1, fn2, grad_var='disc'):
return biased_estimate_general_grad_fn(fn1, fn2, True, grad_var=grad_var)
def biased_estimate_general_grad_fn(
fn1, fn2, use_pmap_on_stop_grad_tree, grad_var='disc'):
r"""Function which computes a biased estimate of the regularizer grads.
This biased estimate obtains the same results regardless of the number
of devices used.
Estimates:
\nabla_{\phi} (
\nabla_{\theta} L_1(\theta, \phi) ^T \nabla_{\theta} L_2(\theta, \phi))
It computes:
1/ N^2 \sum_{i=1}{N} (
\nabla_{GRAD_VAR} (\nabla_{\theta} L_1(\theta, \phi)(x_1,i) ^T))
\sum_{j=1}{N} \nabla_{\theta} L_1(\theta, \phi)(x_2,i) ^T
where x_1, i, and x_2, i are iid samples from the distributions
required to compute L_1 and L_2.
This returns a biased estimated, by using the same set of samples from the
expectations in L_1, and L_2 when computing them.
Args:
fn1: A function which will return the first set of gradients used for the
regularizers. These gradients will be backpropagated through.
Returns \nabla_{\theta} L_1(\theta, \phi).
fn2: A function which will return the second set of gradients used for the
regularizers. These gradients will be NOT backpropagated through.
Returns \nabla_{\theta} L_2(\theta, \phi).
use_pmap_on_stop_grad_tree: A boolean deciding whether or not to use
pmap on the tree for which we apply stop_gradient. If True, this is
results in a lower variance estimate.
grad_var: A string - 'disc' or 'gen' - the variable with respect to which
to compute the gradients.
Returns:
A gradient function which can be applied to compute the desired gradients.
"""
def dot_prod(tree2, params, state, x, rng, is_training):
tree1 = fn1(params, state, x, rng, is_training)
tree2 = jax.lax.stop_gradient(tree2)
return utils.tree_dot_product(tree1, tree2)
def vmap_grad(params, state, data_batch, rng, is_training):
# The non gradient part gets computed and averaged first.
value2 = fn2(params, state, data_batch, rng, is_training)
# We pmean over all devices. This results in a lower variance
# estimates but keeps the unbiased property of the estimator since
# it avoids backpropagating through a gradient operation.
if use_pmap_on_stop_grad_tree:
value2 = jax.lax.pmean(value2, axis_name='i')
grad_fn = _select_grad(jax.grad(dot_prod, argnums=1), grad_var)
return grad_fn(value2, params, state, data_batch, rng, is_training)
return vmap_grad
def two_data_estimate_grad_fn(fn1, fn2, grad_var='disc'):
def estimate(
params, state, data_batch1, data_batch2, rng1, rng2, is_training):
value1 = fn1(params, state, data_batch1, rng1, is_training)
value2 = fn2(params, state, data_batch2, rng2, is_training)
value2 = jax.lax.stop_gradient(value2)
return utils.tree_dot_product(value1, value2)
return _select_grad(jax.grad(estimate), grad_var)
| discretisation_drift-main | dd_two_player_games/regularizer_estimates.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computing inception score and FID using TF-GAN."""
from absl import logging
import jax
import jax.numpy as jnp
import numpy as np
import six
import tensorflow as tf
import tensorflow_gan as tfgan
import tensorflow_hub as tfhub
import tqdm
from dd_two_player_games import metric_utils as metrics
_one_key_split = lambda key: tuple(jax.random.split(key))
def split_key(key, no_pmap):
if no_pmap:
return _one_key_split(key)
else:
return jax.pmap(_one_key_split)(key)
def assert_data_ranges(data, eps=1e-3):
# Check data is in [-1, 1]
assert jnp.min(data) >= -1 - eps, jnp.min(data)
assert jnp.max(data) <= 1 + eps, jnp.max(data)
assert jnp.min(data) < -0.01, jnp.min(data)
assert jnp.max(data) > 0.01, jnp.max(data)
DEFAULT_DTYPES = {tfgan.eval.INCEPTION_OUTPUT: tf.float32,
tfgan.eval.INCEPTION_FINAL_POOL: tf.float32}
def classifier_fn_from_tfhub(
inception_model, output_fields, return_tensor=False):
"""Returns a function that can be as a classifier function.
Copied from tfgan but avoid loading the model each time calling _classifier_fn
Wrapping the TF-Hub module in another function defers loading the module until
use, which is useful for mocking and not computing heavy default arguments.
Args:
inception_model: A model loaded from TFHub.
output_fields: A string, list, or `None`. If present, assume the module
outputs a dictionary, and select this field.
return_tensor: If `True`, return a single tensor instead of a dictionary.
Returns:
A one-argument function that takes an image Tensor and returns outputs.
"""
if isinstance(output_fields, six.string_types):
output_fields = [output_fields]
def _classifier_fn(images):
output = inception_model(images)
if output_fields is not None:
output = {x: output[x] for x in output_fields}
if return_tensor:
assert len(output) == 1
output = list(output.values())[0]
return tf.nest.map_structure(tf.compat.v1.layers.flatten, output)
return _classifier_fn
class InceptionMetrics(object):
"""Object which holds onto the Inception net.
Data and samples need to be in [-1, 1].
"""
def __init__(self, dataset=None, num_splits=10,
check_data_ranges=False):
self.dataset = dataset
self.num_splits = num_splits
self.is_initialized = False # Delay some startup away from the constructor.
self.check_data_ranges = check_data_ranges
self.inception_model = None
logging.info(
'Splitting inception data/samples in %d splits', num_splits)
def initialize(self):
"""Load weights from CNS and pmap/construct the forward pass."""
if not self.is_initialized:
self.inception_model = tfhub.load(tfgan.eval.INCEPTION_TFHUB)
@tf.function
def run_inception(x):
classifier_fn = classifier_fn_from_tfhub(
self.inception_model,
output_fields=[
tfgan.eval.INCEPTION_OUTPUT,
tfgan.eval.INCEPTION_FINAL_POOL])
return tfgan.eval.run_classifier_fn(
x, classifier_fn, dtypes=DEFAULT_DTYPES)
def run_net(x):
size = tfgan.eval.INCEPTION_DEFAULT_IMAGE_SIZE
if x.ndim == 5:
# If we have an extra dimension because of pmap merge the
# batch and device dimension.
shape = x.shape
x = np.reshape(x, (-1, shape[2], shape[3], shape[4]))
x = tf.image.resize(x, [size, size])
inception_output = run_inception(x)
logits = inception_output['logits']
pool = inception_output['pool_3']
logits = tf.nn.log_softmax(logits)
return logits, pool
self.run_net = run_net
self.data_mus, self.data_sigmas = None, None
self.is_initialized = True
def get_metrics(
self, sampler, num_inception_images, batch_size, rng, no_pmap=True,
sample_postprocess_fn=None):
"""Calculate inception score and FID on a set of samples.
Args:
sampler: A sample_utils.Sampler object which can be generate samples.
num_inception_images: The number of samples to produce per metric.
In total, the number of samples used is `num_inception_images`
x `num_splits` in order to create standard deviations around the
metrics. A reasonable number is 10K. Note that for FID, the number
of samples is important since FID is a biased metric: the more
samples you have, the better the metric value. This does not entail
that the model performs better.
batch_size: The batch size to use on each iteration of the inception net,
taken as the overall batch size (rather than per-device).
rng: An rng to use with a haiku network.
no_pmap: Whether functions should operate assuming a num_devices axis.
sample_postprocess_fn: The forward pass of the generator might produce
more than just a single batch of images. This function, if none, is
applied to the output of the generator before being passed to Inception.
Returns:
A dict with the keys 'IS' and 'FID'
"""
self.initialize()
num_samples = num_inception_images * self.num_splits
logging.info(
'Number of samples used to compute each metric value: %d',
num_inception_images)
logging.info('Total number of samples used: %d', num_samples)
num_batches = int(np.ceil(num_samples / batch_size))
if self.data_mus is None:
self.data_mus, self.data_sigmas, _, _ = self.get_dataset_statistics(
self.dataset, num_batches=num_batches)
# Loop over all batches and get pool + logits
logits, pool = [], []
for _ in range(num_batches):
rng, step_rng = (split_key(rng, no_pmap) if rng is not None
else (None, None))
samples = sampler.sample_batch(batch_size, rng=step_rng)
if sample_postprocess_fn:
samples = sample_postprocess_fn(samples)
if self.check_data_ranges:
assert_data_ranges(samples)
this_logits, this_pool = self.run_net(samples) # pytype: disable=attribute-error
pool += [this_pool]
logits += [this_logits]
logits = np.concatenate(logits, 0)[:num_samples]
pool = np.concatenate(pool, 0)[:num_samples]
logging.info('Calculating FID...')
fid_mean, fid_std = metrics.calculate_frechet_distance_with_error_bars(
pool, self.data_mus, self.data_sigmas)
logging.info('Calculating Inception scores...')
is_mean, is_std = metrics.calculate_inception_scores_with_error_bars(
logits, self.num_splits, use_jax=False)
return {'IS_mean': is_mean, 'IS_std': is_std,
'FID': np.array(fid_mean), 'FID_std': np.array(fid_std)}
def get_dataset_statistics(self, dataset, num_batches):
"""Loop over all samples in a dataset and calc pool means and covs.
Args:
dataset: an iterator which returns appropriately batched images and
labels, with the images in [-1, 1] and shaped (device x NHWC)
num_batches: the number of batches to use for the computation of the
statistics.
Returns:
The mean and covariance stats computed across the entire dataset.
"""
self.initialize()
batch = 0
logits, pool = [], []
for x in tqdm.tqdm(dataset):
if self.check_data_ranges:
assert_data_ranges(x)
if batch == num_batches:
break
this_logits, this_pool = self.run_net(x)
pool += [this_pool]
logits += [this_logits]
batch += 1
logging.info('Finished iterating, concatenating pool and logits')
pool = np.concatenate(pool, 0)
logits = np.concatenate(logits, 0)
logging.info('Pool shape %s, logits shape %s', pool.shape, logits.shape)
logging.info('Calculating Inception scores...')
IS_mean, IS_std = metrics.calculate_inception_scores_with_error_bars( # pylint: disable=invalid-name
logits, self.num_splits, use_jax=False)
logging.info('Inception score on dataset is %f +/- %f.', IS_mean, IS_std)
logging.info('Calculating means and sigmas on dataset...')
mus = []
sigmas = []
chunk_size = (pool.shape[0] // self.num_splits)
for index in range(self.num_splits):
mus.append(np.mean(
pool[index * chunk_size: (index + 1) * chunk_size], axis=0))
sigmas.append(np.cov(
pool[index * chunk_size: (index + 1) * chunk_size], rowvar=False))
return mus, sigmas, pool, logits
| discretisation_drift-main | dd_two_player_games/tfgan_inception_utils.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils test."""
from absl.testing import absltest
from dd_two_player_games import utils
class Utils(absltest.TestCase):
def test_any_non_zero(self):
self.assertEqual(utils.any_non_zero((0,)), 0)
self.assertEqual(utils.any_non_zero((0, 0, 0)), 0)
self.assertEqual(utils.any_non_zero((0, 0, 0.1)), 1)
self.assertEqual(utils.any_non_zero((0, 0, -0.1)), 1)
self.assertEqual(utils.any_non_zero((0.1, 0, -0.1)), 1)
if __name__ == '__main__':
absltest.main()
| discretisation_drift-main | dd_two_player_games/utils_test.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config for training GANs using SGD (including explicit regularisation)."""
from jaxline import base_config
from ml_collections import config_dict
def get_config():
"""Return config object for training."""
config = base_config.get_base_config()
## Experiment config.
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
random_seed=0,
dataset='cifar10',
data_processor='ImageProcessor',
optimizers=dict(
discriminator=dict(
name='sgd',
clip=None,
lr=1e-2,
kwargs=dict(momentum=0.)),
generator=dict(
name='sgd',
clip=None,
lr=0.005,
kwargs=dict(momentum=0.)),
),
nets=dict( # See `nets.py`
discriminator='CifarDiscriminator',
disc_kwargs=dict(),
generator='CifarGenerator',
gen_kwargs=dict(),
),
losses=dict( # See `losses.py`
discriminator='discriminator_goodfellow_loss',
generator='generator_saturating_loss',
),
penalties=dict( # See `losses.py`
discriminator=None,
generator=None,
),
param_transformers=dict( # See `nets.py`
discriminator='spectral_norm',
generator=None,
),
training=dict(
simultaneous_updates=True,
runge_kutta_updates=False,
# One of: disc_first, gen_first.
alternating_player_order='disc_first',
estimator_fn='unbiased_estimate_fn_lowest_variance',
# estimator_fn='biased_estimate_multiple_devices_grad_fn',
batch_size=128,
rk_disc_regularizer_weight_coeff=0.,
grad_regularizes=dict(
dd_coeffs_multiplier=dict(
disc=dict(
self_norm=0.0,
other_norm=0.0,
other_dot_prod=-1.0,
),
gen=dict(
self_norm=0.0,
other_norm=0.0,
other_dot_prod=-1.0,
)),
explicit_non_dd_coeffs=dict(
disc=dict(
self_norm=0.0,
other_norm=0.0,
other_dot_prod=0.0,
),
gen=dict(
self_norm=0.0,
other_norm=0.0,
other_dot_prod=0.0,
))),
num_gen_updates=1,
num_disc_updates=1,
num_latents=128),
eval=dict(
run_image_metrics=True,
batch_size=16,
# The number of data/sample splits to be used for evaluation.
num_eval_splits=5,
num_inception_images=10000),
)))
## Training loop config.
config.interval_type = 'steps'
config.training_steps = int(3e5)
config.train_checkpoint_all_hosts = False
config.log_train_data_interval = 10
config.log_tensors_interval = 10
config.save_checkpoint_interval = 100
config.eval_specific_checkpoint_dir = ''
config.restore_path = ''
config.checkpoint_dir = '/tmp/dd_two_player_games'
config.best_model_eval_metric = 'IS_mean'
return config
| discretisation_drift-main | dd_two_player_games/sgd_cifar_config.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to compute GAN evaluation metrics."""
import jax.numpy as jnp
import numpy as np
import scipy.linalg
def calculate_inception_scores_with_error_bars(
pred, num_splits=10, use_jax=True):
"""Calculate the Inception Score using multiple splits.
Args:
pred: The logits produced when running InceptionV3 on a set of samples,
not including the FC layer's bias (for some reason), then passing said
logits through a log softmax.
num_splits: Number of splits to compute mean and std of IS over.
use_jax: Accelerate these calculations with jax?
Returns:
The mean and standard deviation of the Inception Score on each split.
"""
onp = jnp if use_jax else np
scores = []
chunk_size = (pred.shape[0] // num_splits)
for index in range(num_splits):
pred_chunk = pred[index * chunk_size: (index + 1) * chunk_size]
scores.append(calculate_inception_score(pred_chunk, use_jax=use_jax))
return onp.mean(scores), onp.std(scores)
def calculate_inception_score(pred, use_jax=True):
onp = jnp if use_jax else np
log_mean = onp.log(onp.expand_dims(onp.mean(onp.exp(pred), 0), 0))
kl_inception = onp.exp(pred) * (pred - log_mean)
kl_inception = onp.mean(onp.sum(kl_inception, 1))
return onp.exp(kl_inception)
def calculate_frechet_distance_with_error_bars(
pool, data_mus, data_sigmas, use_jax=True):
"""Calculate the Frechet distance using multiple splits.
Args:
pool: The pool3 features produced when running InceptionV3 on a set of
samples.
data_mus: A list of `num_split` means obtained from the data,
to be compared with those obtained from samples.
data_sigmas: A list of `num_split` covariances obtained from the data,
to be compared with those obtained from samples.
use_jax: Accelerate these calculations with jax?
Returns:
The mean and standard deviation of the Inception Score on each split.
"""
assert len(data_mus) == len(data_sigmas)
num_splits = len(data_mus)
onp = jnp if use_jax else np
scores = []
chunk_size = (pool.shape[0] // num_splits)
for index in range(num_splits):
pool_chunk = pool[index * chunk_size: (index + 1) * chunk_size]
mean = onp.mean(pool_chunk, axis=0)
cov = onp.cov(pool_chunk, rowvar=False)
scores.append(calculate_frechet_distance(
mean, cov, data_mus[index], data_sigmas[index]))
return onp.mean(np.array(scores)), onp.std(np.array(scores))
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2):
"""JAX implementation of the Frechet Distance.
Taken from https://github.com/bioinf-jku/TTUR
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Args:
mu1: Sample mean over activations from distribution 1 (samples by default).
sigma1: Covariance matrix over activations from dist 1 (samples by default).
mu2: Sample mean over activations from distribution 2 (dataset by default).
sigma2: Covariance matrix over activations from dist 2 (dataset by default).
Returns:
The Frechet Distance.
"""
diff = mu1 - mu2
# Run 25 itrs of newton-schulz to get the matrix sqrt of sigma1 dot sigma2
covmean = np.real(scipy.linalg.sqrtm(np.matmul(sigma1, sigma2)))
out = (jnp.dot(diff, diff) + jnp.trace(sigma1) + jnp.trace(sigma2)
- 2 * jnp.trace(covmean))
return out
| discretisation_drift-main | dd_two_player_games/metric_utils.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experiment file for GAN training."""
import collections
import datetime
import functools
import os
import signal
import threading
from absl import app
from absl import flags
from absl import logging
import dill
import jax
from jaxline import experiment
from jaxline import platform
from jaxline import utils as pipeline_utils
import numpy as np
import optax
from dd_two_player_games import data_utils
from dd_two_player_games import drift_utils
from dd_two_player_games import gan
from dd_two_player_games import gan_grads_calculator
from dd_two_player_games import losses
from dd_two_player_games import model_utils
from dd_two_player_games import nets
from dd_two_player_games import optim
from dd_two_player_games import regularizer_estimates
from dd_two_player_games import tfgan_inception_utils
from dd_two_player_games import utils
FLAGS = flags.FLAGS
UpdateDebugInfo = collections.namedtuple(
'UpdateDebugInfo', ['log_dict', 'grads', 'update'])
def _make_coeff_tuple(config):
disc_coeffs = drift_utils.PlayerRegularizationTerms(**config['disc'])
gen_coeffs = drift_utils.PlayerRegularizationTerms(**config['gen'])
return gan.GANTuple(disc=disc_coeffs, gen=gen_coeffs)
def get_explicit_coeffs(regularizer_config, dd_coeffs):
"""Obtain the coefficients used for explicit regularization."""
mul = _make_coeff_tuple(regularizer_config.dd_coeffs_multiplier)
dd_explicit = jax.tree_multimap(lambda x, y: x * y, mul, dd_coeffs)
user_explicit = _make_coeff_tuple(regularizer_config.explicit_non_dd_coeffs)
return jax.tree_multimap(lambda x, y: x + y, dd_explicit, user_explicit)
def sn_dict_for_logging(sn_dict):
"""Transform the spectral norm state for logging/display."""
res = {}
for k, v in sn_dict.items():
sn_index = k.find('sn_params_tree')
if sn_index > 0:
k = k[sn_index:]
res[k + '/sv'] = v['sigma']
return res
def _check_config(config):
if config.training.simultaneous_updates:
if (config.training.num_disc_updates != 1 or
config.training.num_gen_updates != 1):
raise ValueError(
'For simultaneous updates the number of updates per player has '
'to be 1!')
def _get_data_processor(config):
return getattr(data_utils, config.data_processor)
def _get_dataset(config, mode='train'):
"""Obtain dataset."""
# Note: we always use the 'train' split of the data.
# Currently mode only affects batch size.
assert mode in ('train', 'eval')
if mode == 'train':
global_batch_size = config.training.batch_size
else:
global_batch_size = config.eval.batch_size
num_devices = jax.device_count()
logging.info(
'Using %d devices, adjusting batch size accordingly!', num_devices)
per_device_batch_size, ragged = divmod(global_batch_size, num_devices)
if ragged:
raise ValueError(
f'Global batch size {global_batch_size} must be divisible by '
f'num devices {num_devices}')
local_device_count = jax.local_device_count()
logging.info('local_device_count: %d', local_device_count)
if config.dataset in ('mnist', 'cifar10'):
return data_utils.get_image_dataset(
config.dataset, 'train',
batch_dims=[local_device_count, per_device_batch_size],
processor=_get_data_processor(config),
shard_args=(jax.host_count(), jax.host_id()),
seed=config.random_seed)
else:
raise NotImplementedError(
'dataset {} not implemented'.format(config.dataset))
def _get_optim(player_optimizer_config):
"""Get optimizer (with learning rate scheduler)."""
if 'scheduler' not in player_optimizer_config:
optimizer = getattr(
optax, player_optimizer_config.name)(
player_optimizer_config.lr, **player_optimizer_config.kwargs)
scheduler = optax.constant_schedule(player_optimizer_config.lr)
return optimizer, scheduler
scheduler_kwargs = dict(player_optimizer_config.scheduler_kwargs)
scheduler_kwargs['boundaries_and_scales'] = {
int(k): v for k, v in scheduler_kwargs['boundaries_and_scales'].items()}
lr_schedule = getattr(optax, player_optimizer_config.scheduler)(
init_value=player_optimizer_config.lr, **scheduler_kwargs)
# Build the optimizers based on the learning rate scheduler.
if 'sgd' == player_optimizer_config.name:
clip = player_optimizer_config.clip
if clip:
optimizer = optax.chain(
optax.scale_by_schedule(lr_schedule),
optax.clip(clip), # Clipping only for sgd.
optax.scale(-1))
else:
optimizer = optax.chain(
optax.scale_by_schedule(lr_schedule),
optax.scale(-1))
elif 'momentum' == player_optimizer_config.name:
optimizer = optax.chain(
optax.trace(decay=player_optimizer_config.kwargs.momentum,
nesterov=False),
optax.scale_by_schedule(lr_schedule),
optax.scale(-1))
elif 'adam' == player_optimizer_config.name:
optimizer = optax.chain(
optax.scale_by_adam(**player_optimizer_config.kwargs),
optax.scale_by_schedule(lr_schedule),
optax.scale(-1))
else:
raise ValueError('Unsupported optimizer {}'.format(
player_optimizer_config.name))
return optimizer, lr_schedule
def _get_optimizers(config):
"""Construct optimizer from config."""
disc_optimizer, disc_schedule = _get_optim(config.optimizers.discriminator)
gen_optimizer, gen_schedule = _get_optim(config.optimizers.generator)
optimizers = gan.GANTuple(disc=disc_optimizer, gen=gen_optimizer)
schedules = gan.GANTuple(disc=disc_schedule, gen=gen_schedule)
return optimizers, schedules
def _build_gan(config):
"""Build the GAN object."""
players = gan.GANTuple(
disc=getattr(nets, config.nets.discriminator),
gen=getattr(nets, config.nets.generator))
players_kwargs = gan.GANTuple(
disc=config.nets.disc_kwargs, gen=config.nets.gen_kwargs)
player_losses = gan.GANTuple(
disc=getattr(losses, config.losses.discriminator),
gen=getattr(losses, config.losses.generator))
if config.penalties.discriminator:
disc_penalty = gan.GANPenalty(
fn=getattr(losses, config.penalties.discriminator[0]),
coeff=config.penalties.discriminator[1])
else:
disc_penalty = None
player_penalties = gan.GANTuple(disc=disc_penalty, gen=None)
disc_transform = getattr(nets, config.param_transformers.discriminator)
player_param_transformers = gan.GANTuple(disc=disc_transform, gen=None)
return gan.GAN(
players, player_losses, player_penalties, player_param_transformers,
players_kwargs, config.training.num_latents)
class Experiment(experiment.AbstractExperiment):
"""GAN experiment."""
CHECKPOINT_ATTRS = {
'_params': 'params',
'_opt_state': 'opt_state',
'_state': 'state',
}
def __init__(self, mode, init_rng, config):
super().__init__(mode=mode, init_rng=init_rng)
# We control our own rngs, to avoid changes in jaxline that we do not
# control.
del init_rng
init_rng = jax.random.PRNGKey(config.random_seed)
self.mode = mode
self.init_params_rng = pipeline_utils.bcast_local_devices(init_rng)
# We need a different rng for each device since we want to obtain different
# samples from each device.
self.init_rng = jax.pmap(functools.partial(
pipeline_utils.specialize_rng_host_device, axis_name='i',
host_id=jax.host_id(),
mode='unique_host_unique_device'), axis_name='i')(self.init_params_rng)
_check_config(config)
self._train_dataset = _get_dataset(config)
self._eval_input = None
self._gan = _build_gan(config)
self._optimizers, self._lr_schedules = _get_optimizers(config)
self._data_processor = _get_data_processor(config)
self._simultaneous_updates = config.training.simultaneous_updates
self._runge_kutta_updates = config.training.runge_kutta_updates
self._num_updates = gan.GANTuple(
disc=config.training.num_disc_updates,
gen=config.training.num_gen_updates)
self._learning_rates = gan.GANTuple(
disc=config.optimizers.discriminator.lr,
gen=config.optimizers.generator.lr)
self._alternating_player_order = drift_utils.PlayerOrder[
config.training.alternating_player_order]
self._dd_coeffs = drift_utils.get_dd_coeffs(
self._alternating_player_order,
self._simultaneous_updates, self._learning_rates, self._num_updates)
self._explicit_coeffs = get_explicit_coeffs(
config.training.grad_regularizes, self._dd_coeffs)
self._estimator_fn = getattr(
regularizer_estimates, config.training.estimator_fn)
self._gan_grads_calculator = gan_grads_calculator.GANGradsCalculator(
self._gan, self._estimator_fn)
if self._simultaneous_updates:
if self._num_updates.disc != 1:
raise ValueError('The number of discriminator updates in simultaneous '
'training must be 1!')
if self._num_updates.gen != 1:
raise ValueError('The number of generator updates in simultaneous '
'training must be 1!')
# Always pmap the update steps.
self._discriminator_update = jax.pmap(
self._discriminator_update, axis_name='i',
static_broadcasted_argnums=(5))
self._generator_update = jax.pmap(
self._generator_update, axis_name='i',
static_broadcasted_argnums=(5))
if self._runge_kutta_updates:
self._runge_kutta = optim.RungeKutta(
self._gan_grads_calculator.both_player_grads,
config.training.runge_kutta_order)
# When it comes to RK updates, we have the choice of whether
# we want to add explicit regularization to each RK step (for this
# we use self._explicit_coeffs) or not apply the penalty to each RK step
# but add the explicit regularization gradients to the RK combined
# gradients.
# Note: while the explicit penalty is applied at the end of the RK step
# it is still applied to the initial parameters, rather than those
# obtained after the RK update has been performed.
self._rk_after_explicit_coeffs = _make_coeff_tuple(
config.training.grad_regularizes.rk_after_explicit_coeffs)
self._runge_kutta_update = jax.pmap(
self._runge_kutta_update, axis_name='i',
static_broadcasted_argnums=(8, 9, 10))
# Model parameters
self._params = None
self._opt_state = None
self._state = None
# eval state
self.eval_config = config.eval
self._run_image_metrics = config.eval.run_image_metrics
self._image_metrics = None
self._eval_dataset = _get_dataset(config, 'eval')
def _maybe_init_training(self):
data_batch = next(self._train_dataset)
# If we are starting training now (not from a saved checkpoint), then
# initialize parameters.
if not self._params:
logging.info('Initializing model parameters')
init_gan = jax.pmap(self._gan.initial_params, axis_name='i')
# Use the initial parameters rng to get the same initialization
# between all replicas.
self._params, self._state = init_gan(self.init_params_rng, data_batch)
self._opt_state = gan.GANTuple(
gen=jax.pmap(self._optimizers.gen.init, axis_name='i')(
self._params.gen),
disc=jax.pmap(self._optimizers.disc.init, axis_name='i')(
self._params.disc))
def new_rng(self, global_step):
"""Get a new rng.
We fold in `global_step` so that we can split rngs inside a step without
fearing of reusing the split value at a next step.
Args:
global_step: The global step.
Returns:
A new RNG - or pmapped array of rngs.
"""
def split_rng(rng):
return tuple(jax.random.split(jax.random.fold_in(rng, global_step[0])))
self.init_rng, rng = jax.pmap(split_rng)(self.init_rng)
return rng
# Note: reads but does not modify the state of the object.
# Returns the new discriminator parameters and optimizer state which
# get updated in the `step` function.
def _discriminator_update(
self, data_batch, params, opt_state, state, rng_disc, is_training):
disc_grads, disc_gan_loss_aux = self._gan_grads_calculator.disc_grads(
params, state, data_batch, rng_disc, is_training, self._explicit_coeffs)
disc_update, disc_opt_state = self._optimizers.disc.update(
disc_grads, opt_state.disc)
new_disc_params = optax.apply_updates(params.disc, disc_update)
disc_debug_info = UpdateDebugInfo(
log_dict=disc_gan_loss_aux.log_dict,
grads=disc_grads, update=disc_update)
state = disc_gan_loss_aux.state
return new_disc_params, disc_opt_state, state, disc_debug_info
# Note: reads but does not modify the state of the object.
# Returns the new generator parameters and optimizer state which
# get updated in the `step` function.
def _generator_update(
self, data_batch, params, opt_state, state, rng_gen, is_training):
gen_grads, gen_gan_loss_aux = self._gan_grads_calculator.gen_grads(
params, state, data_batch, rng_gen, is_training, self._explicit_coeffs)
gen_update, gen_opt_state = self._optimizers.gen.update(
gen_grads, opt_state.gen)
new_gen_params = optax.apply_updates(params.gen, gen_update)
gen_debug_info = UpdateDebugInfo(
log_dict=gen_gan_loss_aux.log_dict, grads=gen_grads, update=gen_update)
state = gen_gan_loss_aux.state
return new_gen_params, gen_opt_state, state, gen_debug_info
def discriminator_step(self, params, opt_state, state, global_step):
for _ in range(self._num_updates.disc):
data_batch = next(self._train_dataset)
(disc_params, disc_opt_state,
state, disc_update_info) = self._discriminator_update(
data_batch, params, opt_state, state, self.new_rng(global_step),
True)
params = params._replace(disc=disc_params)
opt_state = opt_state._replace(disc=disc_opt_state)
return params, opt_state, state, disc_update_info
def generator_step(self, params, opt_state, state, global_step):
for _ in range(self._num_updates.gen):
data_batch = next(self._train_dataset)
(gen_params, gen_opt_state,
state, gen_update_info) = self._generator_update(
data_batch, params, opt_state, state, self.new_rng(global_step),
True)
params = params._replace(gen=gen_params)
opt_state = opt_state._replace(gen=gen_opt_state)
return params, opt_state, state, gen_update_info
def _runge_kutta_update(self, params, opt_state, step_size,
states, data_batches, generator_batches,
disc_rngs, gen_rngs, is_training,
inside_rk_explicit_coeffs,
after_rk_explicit_coeffs):
"""Runge Kutta update."""
rk_args = (states, data_batches, generator_batches,
disc_rngs, gen_rngs, is_training, inside_rk_explicit_coeffs)
gan_grads, gan_aux = self._runge_kutta.grad(params, step_size, *rk_args)
disc_reg_grads, non_zero_coeff = self._gan_grads_calculator.disc_explicit_regularization_grads(
params, states[0], data_batches[0],
disc_rngs[0], is_training, after_rk_explicit_coeffs)
disc_grads = utils.add_trees_with_coeff(
acc=gan_grads.disc,
mul=disc_reg_grads,
coeff=non_zero_coeff)
disc_grads = jax.lax.pmean(disc_grads, axis_name='i')
gen_reg_grads, non_zero_coeff = self._gan_grads_calculator.gen_explicit_regularization_grads(
params, states[0], generator_batches[0],
gen_rngs[0], is_training, after_rk_explicit_coeffs)
gen_grads = utils.add_trees_with_coeff(
acc=gan_grads.gen,
mul=gen_reg_grads,
coeff=non_zero_coeff)
gen_grads = jax.lax.pmean(gen_grads, axis_name='i')
disc_update, disc_opt_state = self._optimizers.disc.update(
disc_grads, opt_state.disc)
disc_params = optax.apply_updates(params.disc, disc_update)
gen_update, gen_opt_state = self._optimizers.gen.update(
gen_grads, opt_state.gen)
gen_params = optax.apply_updates(params.gen, gen_update)
params = gan.GANTuple(disc=disc_params, gen=gen_params)
opt_state = gan.GANTuple(disc=disc_opt_state, gen=gen_opt_state)
return params, opt_state, gan_aux
def _disc_lr(self, global_step):
return jax.pmap(self._lr_schedules.disc)(global_step)
def _runge_kutta_step(self, global_step):
# We use the discriminator schedule for RK updates.
order = self._runge_kutta.runge_kutta_order
# CRUCIAL: these need to be tuples (especially the static arg is training)
# otherwise jax recompiles every call and the code becomes extremely slow.
states = tuple([self._state] * order)
data_batches = tuple([next(self._train_dataset) for i in range(order)])
generator_batches = tuple([next(self._train_dataset) for i in range(order)])
disc_rngs = tuple([self.new_rng(global_step) for i in range(order)])
gen_rngs = tuple([self.new_rng(global_step) for i in range(order)])
is_training = tuple([True] * order)
inside_rk_explicit_coeffs = tuple([self._explicit_coeffs] * order)
# The coefficients after the RK step need no duplication since they
# will be applied outside the RK optimizer.
after_rk_explicit_coeffs = self._rk_after_explicit_coeffs
params, opt_state, gan_aux = self._runge_kutta_update(
self._params, self._opt_state, self._disc_lr(global_step),
states, data_batches, generator_batches,
disc_rngs, gen_rngs, is_training,
inside_rk_explicit_coeffs,
after_rk_explicit_coeffs)
self._params = params
self._opt_state = opt_state
# Since we are doing simultaneous updates, we are updating the states
# of the two players only after both updates.
player_states = gan.GANTuple(
disc=gan_aux.disc.state.players.disc,
gen=gan_aux.gen.state.players.gen)
param_transforms_states = gan.GANTuple(
disc=gan_aux.disc.state.param_transforms.disc,
gen=gan_aux.gen.state.param_transforms.gen)
state = gan.GANState(players=player_states,
param_transforms=param_transforms_states)
self._state = state
logged_dict = {}
logged_dict.update(gan_aux.disc.log_dict)
logged_dict.update(gan_aux.gen.log_dict)
return logged_dict
def _simultaneous_updates_step(self, global_step):
"""Perform a simultaneous update step.
Args:
global_step: The global step of this update.
Returns:
A dictionary of logged values.
Steps:
* Discriminator step:
* Compute discriminator gradients.
* Apply discriminator gradients.
* Update discriminator state.
* Generator step:
* Compute generator gradients using old discriminator parameters and
old discriminator state.
* Apply generator gradients.
* Update generator state.
"""
state = self._state
opt_state = self._opt_state
params = self._params
# Discriminator.
(params_from_disc_update, opt_state_from_disc_update,
state_from_disc_update, disc_update_info) = self.discriminator_step(
params, opt_state, state, global_step)
# Generator.
(params_from_gen_update, opt_state_from_gen_update,
state_from_gen_update, gen_update_info) = self.generator_step(
params, opt_state, state, global_step)
def pick_gan_tuple(disc_tuple, gen_tuple):
return gan.GANTuple(disc=disc_tuple.disc, gen=gen_tuple.gen)
self._params = pick_gan_tuple(
params_from_disc_update, params_from_gen_update)
self._opt_state = pick_gan_tuple(
opt_state_from_disc_update, opt_state_from_gen_update)
# Since we are doing simultaneous updates, we are updating the states
# of the two players only after both updates.
player_states = pick_gan_tuple(
state_from_disc_update.players, state_from_gen_update.players)
param_transforms_states = pick_gan_tuple(
state_from_disc_update.param_transforms,
state_from_gen_update.param_transforms)
state = gan.GANState(players=player_states,
param_transforms=param_transforms_states)
self._state = state
logged_dict = {}
logged_dict.update(disc_update_info.log_dict)
logged_dict.update(gen_update_info.log_dict)
return logged_dict
def _alternating_updates_step(self, global_step):
"""Perform an alternating updates step.
Args:
global_step: The global step of this update.
Returns:
A dictionary of logged values.
Steps:
* For the discriminator number of updates:
* Compute discriminator gradients.
* Apply discriminator gradients.
* Update discriminator state.
* For the generator number of updates:
* Compute generator gradients using new discriminator parameters and
new discriminator state.
* Apply generator gradients.
* Update generator state.
"""
state = self._state
opt_state = self._opt_state
params = self._params
logged_dict = {}
if self._alternating_player_order == drift_utils.PlayerOrder.disc_first:
step_fns = [self.discriminator_step, self.generator_step]
else:
step_fns = [self.generator_step, self.discriminator_step]
for player_step_fn in step_fns:
# Note: the generator update uses the new state obtained from the
# discriminator update but does *NOT* update the discriminator state,
# even though it does a discriminator forward pass.
# This implies that if methods like Spectral Normalization are used,
# the generator does a SN step so that it uses the normalized parameters,
# but does not change the value of the power iteration vectors.
# This is different compared to the Tensorflow implementation which used
# custom getters.
# Experiments show this makes no difference, and makes the theory more
# consistent, since we assume that the generator does not change the
# discriminator in its update.
params, opt_state, state, player_update_info = player_step_fn(
params, opt_state, state, global_step)
logged_dict.update(player_update_info.log_dict)
self._params = params
self._opt_state = opt_state
self._state = state
return logged_dict
def step(self, global_step, rng, **unused_kwargs):
"""Training step."""
del rng # We handle our own random number generation.
logging.info('Training step %d', global_step)
# Initialize parameters
self._maybe_init_training()
if self._simultaneous_updates:
update_log_dict = self._simultaneous_updates_step(global_step)
elif self._runge_kutta_updates:
update_log_dict = self._runge_kutta_step(global_step)
else:
update_log_dict = self._alternating_updates_step(global_step)
# We update the dictionary with information from the state.
update_log_dict.update(
sn_dict_for_logging(self._state.param_transforms.disc))
# For losses, we average across devices - the same as averaging over
# mini-batches.
# For SN singular values:
# Since we are doing sync training all SN values should be the same, but
# we average for numerical stability.
update_log_dict = utils.average_dict_values(update_log_dict)
return update_log_dict
def evaluate(self, global_step, rng, writer, **unused_args):
"""Evaluation step."""
del rng # We handle our own random number generation.
sampler = model_utils.Sampler(self._gan, self._params, self._state)
samples = sampler.sample_batch(
self.eval_config.batch_size, self.new_rng(global_step))
data_batch = next(self._eval_dataset)
if self._run_image_metrics:
if not self._image_metrics:
# No dataset hash, recompute dataset metrics.
self._image_metrics = tfgan_inception_utils.InceptionMetrics(
self._eval_dataset,
num_splits=self.eval_config.num_eval_splits)
eval_metrics = self._image_metrics.get_metrics(
sampler, self.eval_config.num_inception_images,
self.eval_config.batch_size, rng=self.new_rng(global_step),
no_pmap=False, sample_postprocess_fn=None)
eval_metrics = {k: float(v) for k, v in eval_metrics.items()}
else:
# Add dummy metrics so that jaxline does not complain that we do
# not provide the metrics required for the best checkpoint.
eval_metrics = {'IS_mean': 0, 'FID': 0}
logging.info(eval_metrics)
if writer:
global_step = np.array(pipeline_utils.get_first(global_step))
def process_for_display(images):
images = self._data_processor.postprocess(images)
assert len(images.shape) == 5
return np.concatenate(images, axis=0)
display_samples = process_for_display(samples)
display_data = process_for_display(data_batch)
writer.write_images(
global_step,
{'samples': display_samples,
'data': display_data,
'xd_samples': display_samples})
eval_metrics = utils.average_dict_values(eval_metrics)
return eval_metrics
def safe_dict_update(dict1, dict2):
for key, value in dict2.items():
if key in dict1:
raise ValueError('key already present {}'.format(key))
dict1[key] = value
def _restore_state_to_in_memory_checkpointer(restore_path):
"""Initializes experiment state from a checkpoint."""
# Load pretrained experiment state.
python_state_path = os.path.join(restore_path, 'checkpoint.dill')
with open(python_state_path, 'rb') as f:
pretrained_state = dill.load(f)
logging.info('Restored checkpoint from %s', python_state_path)
# Assign state to a dummy experiment instance for the in-memory checkpointer,
# broadcasting to devices.
dummy_experiment = Experiment(
mode='train', init_rng=0, config=FLAGS.config.experiment_kwargs.config)
for attribute, key in Experiment.CHECKPOINT_ATTRS.items():
setattr(dummy_experiment, attribute,
pipeline_utils.bcast_local_devices(pretrained_state[key]))
jaxline_state = dict(
global_step=pretrained_state['global_step'],
experiment_module=dummy_experiment)
snapshot = pipeline_utils.SnapshotNT(0, jaxline_state)
# Finally, seed the jaxline `pipeline_utils.InMemoryCheckpointer` global dict.
pipeline_utils.GLOBAL_CHECKPOINT_DICT['latest'] = pipeline_utils.CheckpointNT(
threading.local(), [snapshot])
def _get_step_date_label(global_step):
# Date removing microseconds.
date_str = datetime.datetime.now().isoformat().split('.')[0]
return f'step_{global_step}_{date_str}'
def _save_state_from_in_memory_checkpointer(
save_path, experiment_class: experiment.AbstractExperiment):
"""Saves experiment state to a checkpoint."""
logging.info('Saving model.')
for (checkpoint_name,
checkpoint) in pipeline_utils.GLOBAL_CHECKPOINT_DICT.items():
if not checkpoint.history:
logging.info('Nothing to save in "%s"', checkpoint_name)
continue
pickle_nest = checkpoint.history[-1].pickle_nest
global_step = pickle_nest['global_step']
state_dict = {'global_step': global_step}
for attribute, key in experiment_class.CHECKPOINT_ATTRS.items():
state_dict[key] = pipeline_utils.get_first(
getattr(pickle_nest['experiment_module'], attribute))
save_dir = os.path.join(
save_path, checkpoint_name, _get_step_date_label(global_step))
python_state_path = os.path.join(save_dir, 'checkpoint.dill')
os.makedirs(save_dir, exist_ok=True)
with open(python_state_path, 'wb') as f:
dill.dump(state_dict, f)
logging.info(
'Saved "%s" checkpoint to %s', checkpoint_name, python_state_path)
def _setup_signals(save_model_fn):
"""Sets up a signal for model saving."""
# Save a model on Ctrl+C.
def sigint_handler(unused_sig, unused_frame):
# Ideally, rather than saving immediately, we would then "wait" for a good
# time to save. In practice this reads from an in-memory checkpoint that
# only saves every 30 seconds or so, so chances of race conditions are very
# small.
save_model_fn()
logging.info(r'Use `Ctrl+\` to save and exit.')
# Exit on `Ctrl+\`, saving a model.
prev_sigquit_handler = signal.getsignal(signal.SIGQUIT)
def sigquit_handler(unused_sig, unused_frame):
# Restore previous handler early, just in case something goes wrong in the
# next lines, so it is possible to press again and exit.
signal.signal(signal.SIGQUIT, prev_sigquit_handler)
save_model_fn()
logging.info(r'Exiting on `Ctrl+\`')
# Re-raise for clean exit.
os.kill(os.getpid(), signal.SIGQUIT)
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGQUIT, sigquit_handler)
def main(argv, experiment_class: experiment.AbstractExperiment):
# Maybe restore a model.
restore_path = FLAGS.config.restore_path
if restore_path:
_restore_state_to_in_memory_checkpointer(restore_path)
# Maybe save a model.
save_dir = os.path.join(FLAGS.config.checkpoint_dir, 'models')
if FLAGS.config.one_off_evaluate:
save_model_fn = lambda: None # No need to save checkpoint in this case.
else:
save_model_fn = functools.partial(
_save_state_from_in_memory_checkpointer, save_dir, experiment_class)
_setup_signals(save_model_fn) # Save on Ctrl+C (continue) or Ctrl+\ (exit).
try:
platform.main(experiment_class, argv)
finally:
save_model_fn() # Save at the end of training or in case of exception.
if __name__ == '__main__':
flags.mark_flag_as_required('config')
app.run(lambda argv: main(argv, Experiment)) # pytype: disable=wrong-arg-types
| discretisation_drift-main | dd_two_player_games/experiment.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datasets for GAn training."""
import collections
import tensorflow as tf
import tensorflow_datasets as tfds
DataProcessor = collections.namedtuple(
'DataProcessor', ['preprocess', 'postprocess'])
def image_preprocess(image):
"""Convert to floats in [0, 1]."""
image = tf.cast(image['image'], tf.float32) / 255.0
# Scale the data to [-1, 1] to stabilize training.
return 2.0 * image - 1.0
def image_postprocess(image):
return (image + 1.) / 2
ImageProcessor = DataProcessor(image_preprocess, image_postprocess)
def get_image_dataset(
dataset_name, split, batch_dims, processor, shard_args, seed=1):
"""Read and shuffle dataset.
Args:
dataset_name: the name of the dataset as expected by Tensorflow datasets.
split: `train`, `valid` or `test.
batch_dims: the batch dimensions to be used for the dataset. Can be
an integer or a list of integers. A list of integers is often used if
parallel training is used.
processor: a `DataProcessor` instance.
shard_args: Argument to be passed to dataset to TensorFlow datasets
`shard`. A tuple of size 2, the first one is the number of shards
(often corresponding to the number of devices), and the second one
is the index in the number of shards that is used by the current device.
In jax, often the right value is [jax.host_count(), jax.host_id()].
seed: the random seed passed to `ds.shuffle`.
Returns:
An iterator of numpy arrays, with the leading dimensions given by
`batch_dims`.
"""
ds = tfds.load(dataset_name, split=split)
ds = ds.shard(*shard_args)
# Shuffle before repeat ensures all examples seen in an epoch.
# See https://www.tensorflow.org/guide/data_performance#repeat_and_shuffle.
ds = ds.shuffle(buffer_size=10000, seed=seed)
ds = ds.repeat()
ds = ds.map(processor.preprocess,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
for batch_size in batch_dims[::-1]:
ds = ds.batch(batch_size, drop_remainder=True)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
return ds.as_numpy_iterator()
| discretisation_drift-main | dd_two_player_games/data_utils.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config for training GANs using RungeKutta."""
from jaxline import base_config
from ml_collections import config_dict
def get_config():
"""Return config object for training."""
config = base_config.get_base_config()
## Experiment config.
# Learning rate scheduler
# lr_scheduler_kwargs = {'501': 4, '400000': 0.5, '1000000': 0.5}
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
random_seed=0,
dataset='cifar10',
data_processor='ImageProcessor',
optimizers=dict(
discriminator=dict(
# scheduler='piecewise_constant_schedule',
# scheduler_kwargs=dict(
# boundaries_and_scales=lr_scheduler_kwargs),
name='sgd',
lr=1e-2,
kwargs=dict()),
generator=dict(
# scheduler='piecewise_constant_schedule',
# scheduler_kwargs=dict(
# boundaries_and_scales=lr_scheduler_kwargs),
name='sgd',
lr=1e-2,
kwargs=dict()),
),
nets=dict( # See `nets.py`
discriminator='CifarDiscriminator',
disc_kwargs=dict(),
generator='CifarGenerator',
gen_kwargs=dict(),
),
losses=dict( # See `losses.py`
discriminator='discriminator_goodfellow_loss',
generator='generator_saturating_loss',
),
penalties=dict( # See `losses.py`
discriminator=None,
generator=None,
),
param_transformers=dict( # See `nets.py`
discriminator='spectral_norm',
generator=None,
),
training=dict(
simultaneous_updates=False,
runge_kutta_updates=True,
alternating_player_order='disc_first',
estimator_fn='unbiased_estimate_fn_lowest_variance',
batch_size=128,
runge_kutta_order=4,
grad_regularizes=dict(
dd_coeffs_multiplier=dict(
disc=dict(
self_norm=0.0,
other_norm=0.0,
other_dot_prod=0.0,
),
gen=dict(
self_norm=0.0,
other_norm=0.0,
other_dot_prod=0.0,
)),
rk_after_explicit_coeffs=dict(
disc=dict(
self_norm=0.0,
other_norm=0.0,
other_dot_prod=0.0,
),
gen=dict(
self_norm=0.0,
other_norm=0.0,
other_dot_prod=0.0,
)),
explicit_non_dd_coeffs=dict(
disc=dict(
self_norm=0.0,
other_norm=0.0,
other_dot_prod=0.0,
),
gen=dict(
self_norm=0.0,
other_norm=0.0,
other_dot_prod=0.0,
))),
num_gen_updates=1,
num_disc_updates=1,
num_latents=128),
eval=dict(
run_image_metrics=True,
batch_size=16,
# The number of data/sample splits to be used for evaluation.
num_eval_splits=5,
num_inception_images=10000,),
)))
## Training loop config.
config.interval_type = 'steps'
config.training_steps = int(3e5)
config.log_tensors_interval = 100
config.save_checkpoint_interval = 100
# Debugging info
# Set the `init_ckpt_path` to a trained model for local training.
# config.init_ckpt_path = ''
# Change to evaluate a specific checkpoint
config.eval_specific_checkpoint_dir = ''
# Change to False if you want to test checkpointing.
config.delete_existing_local_checkpoints = True
config.best_model_eval_metric = 'IS_mean'
return config
| discretisation_drift-main | dd_two_player_games/ode_gan_cifar_config.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities."""
import numbers
import jax
import jax.numpy as jnp
import numpy as np
def reduce_sum_tree(tree):
tree = jax.tree_util.tree_map(jnp.sum, tree)
return jax.tree_util.tree_reduce(jnp.add, tree)
def tree_square_norm(tree):
return reduce_sum_tree(jax.tree_map(jnp.square, tree))
def tree_mul(tree, constant):
return jax.tree_map(lambda x: x * constant, tree)
def tree_add(tree1, tree2):
return jax.tree_multimap(lambda x, y: x + y, tree1, tree2)
def tree_diff(tree1, tree2):
return jax.tree_multimap(lambda x, y: x - y, tree1, tree2)
def tree_dot_product(tree1, tree2):
prods = jax.tree_multimap(lambda x, y: x * y, tree1, tree2)
return reduce_sum_tree(prods)
def param_size(tree):
return jax.tree_util.tree_reduce(
jnp.add, jax.tree_map(lambda x: x.size, tree))
def tree_shape(tree):
return jax.tree_map(lambda x: jnp.array(x.shape), tree)
def add_trees_with_coeff(*, acc, mul, coeff):
if coeff != 0:
acc = tree_add(acc, tree_mul(mul, coeff))
return acc
def batch_l2_norms(x, eps=1e-5, start_reduction=1):
reduction_axis = list(range(start_reduction, x.ndim))
squares = jnp.sum(jnp.square(x), axis=reduction_axis)
return jnp.sqrt(eps + squares)
def assert_tree_equal(actual, desired):
def assert_fn(x, y):
equal = jnp.array_equal(x, y)
if not equal:
print('pp_print')
print(x)
print(y)
return equal
jax.tree_multimap(assert_fn, actual, desired)
def any_non_zero(iterable_arg):
acc = 0
for arg in iterable_arg:
if not isinstance(arg, numbers.Number):
raise ValueError('Non numeric argument in any_non_zero!')
acc = acc or (arg != 0)
return int(acc)
def average_dict_values(metrics):
"""Average *all* values in all .
Args:
metrics: A dictionary from key to tree.
Returns:
A dictionary from key to tree, where each value of the tree has been
replaced with its mean across all axes.
"""
np_avg = lambda t: np.array(t).mean()
output = {}
for k, value in metrics.items():
output[k] = jax.tree_util.tree_map(np_avg, value)
return output
| discretisation_drift-main | dd_two_player_games/utils.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss functions used for discriminator and generator training."""
import collections
import jax
import jax.numpy as jnp
from dd_two_player_games import utils
LossOutput = collections.namedtuple(
'LossOutput', ['total', 'components'])
def binary_cross_entropy_loss(logits, targets):
"""Binary cross entropy loss between logit and targets.
Computes:
- (targets * log(sigmoid(x)) + (1 - targets) * log(1 - sigmoid(x)))
If targets = 1: log(sigmoid(x)) = - log(1 + exp(-x))
If targets = 0: log(1- sigmoid(x)) = - log(1 + exp(x))
Args:
logits: Logits jnp array.
targets: Binary jnp array. Same shape as `logits`.
Returns:
A jnp array with 1 element, the value of the binary cross entropy loss
given the input logits and targets.
"""
assert logits.shape == targets.shape
neg_log_probs = jnp.logaddexp(0., jnp.where(targets, -1., 1.) * logits)
assert neg_log_probs.shape == (logits.shape[0], 1), neg_log_probs.shape
return jnp.mean(neg_log_probs)
def square_loss(logits, targets):
return jnp.mean((logits - targets) ** 2)
def square_loss_with_sigmoid_logits(logits, targets):
return jnp.mean((jax.nn.sigmoid(logits) - targets) ** 2)
### ************* Player losses.*************
# Discriminator and generator losses given by proper scoring rules.
def discriminator_scoring_rule_loss(scoring_rule_fn):
"""Constructs discriminator loss from scoring rule."""
def discrimiantor_loss_fn(
discriminator_real_data_outputs, discriminator_sample_outputs):
samples_loss = scoring_rule_fn(
discriminator_sample_outputs,
jnp.zeros_like(discriminator_sample_outputs))
data_loss = scoring_rule_fn(
discriminator_real_data_outputs,
jnp.ones_like(discriminator_real_data_outputs))
loss = data_loss + samples_loss
loss_components = {
'disc_data_loss': data_loss,
'disc_samples_loss': samples_loss}
return LossOutput(loss, loss_components)
return discrimiantor_loss_fn
def generator_scoring_rule_fn_loss(scoring_rule_fn):
"""Constructs generator loss from scoring rule."""
def generator_loss_fn(
discriminator_real_data_outputs, discriminator_sample_outputs):
del discriminator_real_data_outputs
loss = - scoring_rule_fn(
discriminator_sample_outputs,
jnp.zeros_like(discriminator_sample_outputs))
return LossOutput(loss, {'gen_samples_loss': loss})
return generator_loss_fn
def discriminator_goodfellow_loss(
discriminator_real_data_outputs, discriminator_sample_outputs):
"""Binary discriminator loss."""
return discriminator_scoring_rule_loss(binary_cross_entropy_loss)(
discriminator_real_data_outputs, discriminator_sample_outputs)
def generator_goodfellow_loss(
discriminator_real_data_outputs, discriminator_sample_outputs):
"""Generator loss proposed by the original GAN paper.
This generator loss corresponds to -log(D) (used in vanilla GAN).
The loss proposed by the original GAN paper as a substitute for the min-max
loss, since it provides better gradients. Details at:
https://arxiv.org/abs/1406.2661.
Args:
discriminator_real_data_outputs: the output from a discriminator net on
real data.
discriminator_sample_outputs: the output from a discriminator net on
generated data.
Returns:
The non saturating loss of the generator (a jnp.array): - log (D).
"""
del discriminator_real_data_outputs # unused
loss = binary_cross_entropy_loss(
discriminator_sample_outputs, jnp.ones_like(discriminator_sample_outputs))
return LossOutput(loss, {'gen_samples_loss': loss})
def generator_saturating_loss(
discriminator_real_data_outputs, discriminator_sample_outputs):
"""Saturating generator loss proposed by the original GAN paper.
This loss corresponds to the min-max version of the game, and provides
worse gradients early on in training (when the generator is not performing
well).
Args:
discriminator_real_data_outputs: the output from a discriminator net on
real data.
discriminator_sample_outputs: the output from a discriminator net on
generated data.
Returns:
The saturating loss of the generator (a jnp.array): log (1 - D).
"""
return generator_scoring_rule_fn_loss(binary_cross_entropy_loss)(
discriminator_real_data_outputs, discriminator_sample_outputs)
def discriminator_square_loss(
discriminator_real_data_outputs, discriminator_sample_outputs):
"""Binary discriminator loss."""
# Real data is classified as 1, fake data classified as 0.
return discriminator_scoring_rule_loss(square_loss)(
discriminator_real_data_outputs, discriminator_sample_outputs)
def generator_square_loss(
discriminator_real_data_outputs, discriminator_sample_outputs):
return generator_scoring_rule_fn_loss(square_loss)(
discriminator_real_data_outputs, discriminator_sample_outputs)
def discriminator_square_loss_with_sigmoid(
discriminator_real_data_outputs, discriminator_sample_outputs):
"""Binary discriminator loss."""
# Real data is classified as 1, fake data classified as 0.
return discriminator_scoring_rule_loss(square_loss_with_sigmoid_logits)(
discriminator_real_data_outputs, discriminator_sample_outputs)
def generator_square_loss_with_sigmoid(
discriminator_real_data_outputs, discriminator_sample_outputs):
return generator_scoring_rule_fn_loss(square_loss_with_sigmoid_logits)(
discriminator_real_data_outputs, discriminator_sample_outputs)
def wasserstein_discriminator_loss(
discriminator_real_data_outputs, discriminator_sample_outputs):
"""Discriminator loss according to the Wasserstein loss."""
data_loss = jnp.mean(- discriminator_real_data_outputs)
samples_loss = jnp.mean(discriminator_sample_outputs)
loss = data_loss + samples_loss
loss_components = {
'disc_data_loss': data_loss,
'disc_samples_loss': samples_loss}
return LossOutput(loss, loss_components)
def wasserstein_generator_loss(discriminator_real_data_outputs,
discriminator_sample_outputs):
"""Wasserstein generator loss. See: https://arxiv.org/abs/1701.07875."""
# adding discriminator op on real data (easier to interpret, but does not
# affect the optimization of the generator.
data_loss = jnp.mean(discriminator_real_data_outputs)
samples_loss = jnp.mean(- discriminator_sample_outputs)
loss = data_loss + samples_loss
loss_components = {
'gen_data_loss': data_loss,
'gen_samples_loss': samples_loss}
return LossOutput(loss, loss_components)
### ************* Penalties. *************
def wgan_gradient_penalty_loss(disc_transform, rng, real_data, samples):
"""The gradient penalty loss on an interpolation of data and samples.
Proposed by https://arxiv.org/pdf/1704.00028.pdf.
Args:
disc_transform: Function which takes one argument, data, and returns the
output of the discriminator on that data. It has to be a transformation of
the discriminator haiku module (via functools.partial or a lambda) such
that the discriminator parameters are used, in order to be able to
take gradients.
rng: A JAX PRNG.
real_data: jnp.array, real data batch.
samples: jnp.array, samples batch.
Returns:
A scalar, the loss due to the gradient penalty.
"""
batch_size = real_data.shape[0]
# Coefficient for the linear interpolation.
# We need to compute the shape of the data so that we can pass in 2d codes
# in case we used the DiscriminatedAutoencoderOptimizer, where we have a
# code discriminator which operates on 2D data.
alpha_shape = [batch_size] + [1] * (real_data.ndim - 1)
alpha = jax.random.uniform(rng, shape=alpha_shape)
interpolation = real_data + alpha * (samples - real_data)
# Compute the gradients wrt to the output of the critic for each interpolated
# example.
fn = lambda x: jnp.squeeze(disc_transform(jnp.expand_dims(x, axis=0)))
grads = jax.vmap(jax.grad(fn))(interpolation)
assert grads.shape == interpolation.shape
slopes = utils.batch_l2_norms(grads)
assert slopes.shape == (batch_size,)
return jnp.mean((slopes - 1)**2)
| discretisation_drift-main | dd_two_player_games/losses.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test implementations of regularizer estimates on 1 device."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import haiku as hk
import jax
import numpy as np
from dd_two_player_games import gan
from dd_two_player_games import losses
from dd_two_player_games import regularizer_estimates
class SimpleMLP(hk.nets.MLP):
def __init__(self, depth, hidden_size, out_dim, name='SimpleMLP'):
output_sizes = [hidden_size] * depth + [out_dim]
super(SimpleMLP, self).__init__(output_sizes, name=name)
def __call__(self, inputs, is_training=True):
del is_training
return super(SimpleMLP, self).__call__(inputs)
class MixtureOfGaussiansDataset():
def __init__(self, batch_size):
self.batch_size = batch_size
self.mog_mean = np.array([
[1.50, 1.50],
[1.50, 0.50],
[1.50, -0.50],
[1.50, -1.50],
[0.50, 1.50],
[0.50, 0.50],
[0.50, -0.50],
[0.50, -1.50],
[-1.50, 1.50],
[-1.50, 0.50],
[-1.50, -0.50],
[-1.50, -1.50],
[-0.50, 1.50],
[-0.50, 0.50],
[-0.50, -0.50],
[-0.50, -1.50]])
def __iter__(self):
return self
def __next__(self):
temp = np.tile(self.mog_mean, (self.batch_size // 16 + 1, 1))
mus = temp[0:self.batch_size, :]
return mus + 0.02 * np.random.normal(size=(self.batch_size, 2))
def init_gan(gan_module, data_batch):
init_params_rng = jax.random.PRNGKey(0)
rng = jax.random.PRNGKey(4)
# Params and data need to be over the number of batches.
params, state = gan_module.initial_params(init_params_rng, data_batch)
return params, state, data_batch, rng
REGULARIZER_TUPLES = [
('disc_loss_fn_disc_grads', 'disc_loss_fn_disc_grads', 'disc'),
('gen_loss_fn_gen_grads', 'gen_loss_fn_gen_grads', 'disc'),
('disc_loss_fn_gen_grads', 'gen_loss_fn_gen_grads', 'disc'),
('disc_loss_fn_disc_grads', 'disc_loss_fn_disc_grads', 'gen'),
('gen_loss_fn_disc_grads', 'disc_loss_fn_disc_grads', 'gen'),
('gen_loss_fn_gen_grads', 'gen_loss_fn_gen_grads', 'gen')]
IS_TRAINING_VALUES = [True, False]
DEFAULT_TEST_INPUT = [
x + (y,) for x in REGULARIZER_TUPLES for y in IS_TRAINING_VALUES] # pylint: disable=g-complex-comprehension
class RegularizerEstimatesTest(parameterized.TestCase):
@parameterized.parameters(DEFAULT_TEST_INPUT)
def test_biased_estimate_implementation_consistency_disc_norm(
self, fn1_name, fn2_name, grad_var, is_training):
batch_size = 128
dataset = MixtureOfGaussiansDataset(batch_size=batch_size)
data_batch = next(dataset)
players_hk_tuple = gan.GANTuple(disc=SimpleMLP, gen=SimpleMLP)
losses_tuple = gan.GANTuple(
disc=losses.discriminator_goodfellow_loss,
gen=losses.generator_goodfellow_loss)
gan_module = gan.GAN(
players_hk=players_hk_tuple, losses=losses_tuple,
penalties=gan.GANTuple(disc=None, gen=None),
player_param_transformers=gan.GANTuple(disc=None, gen=None),
players_kwargs=gan.GANTuple(
disc={'depth': 4, 'out_dim': 1, 'hidden_size': 25},
gen={'depth': 4, 'out_dim': 2, 'hidden_size': 25},),
num_latents=16)
fn1 = getattr(gan_module, fn1_name)
fn2 = getattr(gan_module, fn2_name)
grad_fn1 = regularizer_estimates.biased_estimate_general_grad_fn(
fn1, fn2, False, grad_var=grad_var)
grad_fn2 = regularizer_estimates.biased_estimate_grad_fn(
fn1, fn2, grad_var=grad_var)
(devices_params, devices_state,
devices_data_batch, devices_rng) = init_gan(gan_module, data_batch)
output1 = grad_fn1(
devices_params, devices_state, devices_data_batch, devices_rng,
is_training)
output2 = grad_fn2(
devices_params, devices_state, devices_data_batch, devices_rng,
is_training)
chex.assert_trees_all_close(output1, output2, rtol=1e-4, atol=1e-2)
if __name__ == '__main__':
absltest.main()
| discretisation_drift-main | dd_two_player_games/regularizer_estimates_test.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Drift utils test."""
from absl.testing import absltest
from absl.testing import parameterized
from dd_two_player_games import drift_utils
from dd_two_player_games import gan
LEARNING_RATE_TUPLES = [
(0.01, 0.01),
(0.01, 0.05),
(0.05, 0.01),
(0.0001, 0.5)]
class DriftUtilsTest(parameterized.TestCase):
"""Test class to ensure drift coefficients are computed correctly.
Ensures that the drift coefficients in two-player games are
computed as for the math for:
* simultaneous updates.
* alternating updates (for both player orders).
"""
@parameterized.parameters(LEARNING_RATE_TUPLES)
def test_sim_updates(self, disc_lr, gen_lr):
# player order does not matter.
# the number of updates does not matter for simultaneous updates.
learning_rates = gan.GANTuple(disc=disc_lr, gen=gen_lr)
drift_coeffs = drift_utils.get_dd_coeffs(
None, True, learning_rates, num_updates=None)
self.assertEqual(drift_coeffs.disc.self_norm, 0.5 * disc_lr)
self.assertEqual(drift_coeffs.disc.other_norm, 0.0)
self.assertEqual(drift_coeffs.disc.other_dot_prod, 0.5 * disc_lr)
self.assertEqual(drift_coeffs.gen.self_norm, 0.5 * gen_lr)
self.assertEqual(drift_coeffs.gen.other_norm, 0.0)
self.assertEqual(drift_coeffs.gen.other_dot_prod, 0.5 * gen_lr)
@parameterized.parameters(LEARNING_RATE_TUPLES)
def test_alt_updates(self, disc_lr, gen_lr):
learning_rates = gan.GANTuple(disc=disc_lr, gen=gen_lr)
num_updates = gan.GANTuple(disc=1, gen=1)
drift_coeffs = drift_utils.get_dd_coeffs(
drift_utils.PlayerOrder.disc_first, False, learning_rates,
num_updates=num_updates)
self.assertEqual(drift_coeffs.disc.self_norm, 0.5 * disc_lr)
self.assertEqual(drift_coeffs.disc.other_norm, 0.0)
self.assertEqual(drift_coeffs.disc.other_dot_prod, 0.5 * disc_lr)
self.assertEqual(drift_coeffs.gen.self_norm, 0.5 * gen_lr)
self.assertEqual(drift_coeffs.gen.other_norm, 0.0)
self.assertEqual(
drift_coeffs.gen.other_dot_prod,
0.5 * gen_lr * (1 - 2 * disc_lr / gen_lr))
@parameterized.parameters(LEARNING_RATE_TUPLES)
def test_alt_updates_change_player_order(self, disc_lr, gen_lr):
learning_rates = gan.GANTuple(disc=disc_lr, gen=gen_lr)
num_updates = gan.GANTuple(disc=1, gen=1)
drift_coeffs = drift_utils.get_dd_coeffs(
drift_utils.PlayerOrder.gen_first, False, learning_rates,
num_updates=num_updates)
self.assertEqual(drift_coeffs.disc.self_norm, 0.5 * disc_lr)
self.assertEqual(drift_coeffs.disc.other_norm, 0.0)
self.assertEqual(
drift_coeffs.disc.other_dot_prod,
0.5 * disc_lr * (1 - 2 * gen_lr / disc_lr))
self.assertEqual(drift_coeffs.gen.self_norm, 0.5 * gen_lr)
self.assertEqual(drift_coeffs.gen.other_norm, 0.0)
self.assertEqual(drift_coeffs.gen.other_dot_prod, 0.5 * gen_lr)
if __name__ == '__main__':
absltest.main()
| discretisation_drift-main | dd_two_player_games/drift_utils_test.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test implementations of regularizer estimates on multiple devices."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from dd_two_player_games import gan
from dd_two_player_games import losses
from dd_two_player_games import regularizer_estimates
# Use CPU for testing since we use chex to set multiple devices.
jax.config.update('jax_platform_name', 'cpu')
class SimpleMLP(hk.nets.MLP):
def __init__(self, depth, hidden_size, out_dim, name='SimpleMLP'):
output_sizes = [hidden_size] * depth + [out_dim]
super(SimpleMLP, self).__init__(output_sizes, name=name)
def __call__(self, inputs, is_training=True):
del is_training
return super(SimpleMLP, self).__call__(inputs)
class MixtureOfGaussiansDataset():
def __init__(self, batch_size):
self.batch_size = batch_size
self.mog_mean = np.array([
[1.50, 1.50],
[1.50, 0.50],
[1.50, -0.50],
[1.50, -1.50],
[0.50, 1.50],
[0.50, 0.50],
[0.50, -0.50],
[0.50, -1.50],
[-1.50, 1.50],
[-1.50, 0.50],
[-1.50, -0.50],
[-1.50, -1.50],
[-0.50, 1.50],
[-0.50, 0.50],
[-0.50, -0.50],
[-0.50, -1.50]])
def __iter__(self):
return self
def __next__(self):
temp = np.tile(self.mog_mean, (self.batch_size // 16 + 1, 1))
mus = temp[0:self.batch_size, :]
return mus + 0.02 * np.random.normal(size=(self.batch_size, 2))
def pmap_and_average(fn):
def mapped_fn(*args):
return jax.lax.pmean(fn(*args), axis_name='i')
return jax.pmap(mapped_fn, axis_name='i', static_broadcasted_argnums=4)
def bcast_local_devices(value):
"""Broadcasts an object to all local devices."""
devices = jax.local_devices()
return jax.tree_map(
lambda v: jax.device_put_sharded(len(devices) * [v], devices), value)
def init_gan_multiple_devices(gan_module, num_devices, data_batch, pmap=True):
batch_size = data_batch.shape[0]
if pmap:
reshaped_data_batch = np.reshape(
data_batch, (num_devices, batch_size // num_devices, -1))
init_gan = jax.pmap(gan_module.initial_params, axis_name='i')
else:
reshaped_data_batch = data_batch
init_gan = gan_module.initial_params
if pmap:
init_params_rng = bcast_local_devices(jax.random.PRNGKey(0))
# We use the same rng - note, this will not be the case in practice
# but we have no other way of controlling the samples.
rng = bcast_local_devices(jax.random.PRNGKey(4))
else:
init_params_rng = jax.random.PRNGKey(0)
rng = jax.random.PRNGKey(4)
# Params and data need to be over the number of batches.
params, state = init_gan(init_params_rng, reshaped_data_batch)
return params, state, reshaped_data_batch, rng
# Simple estimate that can be used for the 1 device computation of
# unbiased estimates.
def unbiased_estimate_1_device(fn1, fn2, grad_var='disc'):
two_data_fn = regularizer_estimates.two_data_estimate_grad_fn(
fn1, fn2, grad_var)
def vmap_grad(params, state, data_batch, rng, is_training):
data1, data2 = jnp.split(data_batch, [int(data_batch.shape[0]/2),], axis=0)
# Split rngs so that we can vmap the operations and obtained an unbiased
# estimate. Note: this means that we will obtain a different sample
# for each vmap, but due to the split we will also obtain a different
# set of samples than those obtained in the forward pass.
rng1, rng2 = jax.random.split(rng, 2)
return two_data_fn(params, state, data1, data2, rng1, rng2, is_training)
return vmap_grad
NUM_DEVICES = 4
REGULARIZER_TUPLES = [
('disc_loss_fn_disc_grads', 'disc_loss_fn_disc_grads', 'disc'),
('gen_loss_fn_gen_grads', 'gen_loss_fn_gen_grads', 'disc'),
('disc_loss_fn_gen_grads', 'gen_loss_fn_gen_grads', 'disc'),
('disc_loss_fn_disc_grads', 'disc_loss_fn_disc_grads', 'gen'),
('gen_loss_fn_disc_grads', 'disc_loss_fn_disc_grads', 'gen'),
('gen_loss_fn_gen_grads', 'gen_loss_fn_gen_grads', 'gen')]
IS_TRAINING_VALUES = [True, False]
DEFAULT_TEST_INPUT = [
x + (y,) for x in REGULARIZER_TUPLES for y in IS_TRAINING_VALUES] # pylint: disable=g-complex-comprehension
class RegularizerEstimatesMultipleEstimatesTest(parameterized.TestCase):
@parameterized.parameters(DEFAULT_TEST_INPUT)
def test_same_result_multiple_devices_biased(
self, fn1_name, fn2_name, grad_var, is_training):
batch_size = 128
dataset = MixtureOfGaussiansDataset(batch_size=batch_size)
data_batch = next(dataset)
players_hk_tuple = gan.GANTuple(disc=SimpleMLP, gen=SimpleMLP)
losses_tuple = gan.GANTuple(
disc=losses.discriminator_goodfellow_loss,
gen=losses.generator_goodfellow_loss)
gan_module = gan.GAN(
players_hk=players_hk_tuple, losses=losses_tuple,
penalties=gan.GANTuple(disc=None, gen=None),
player_param_transformers=gan.GANTuple(disc=None, gen=None),
players_kwargs=gan.GANTuple(
disc={'depth': 4, 'out_dim': 1, 'hidden_size': 25},
gen={'depth': 4, 'out_dim': 2, 'hidden_size': 25},),
num_latents=16)
fn1 = getattr(gan_module, fn1_name)
fn2 = getattr(gan_module, fn2_name)
# Multiple devices
chex.set_n_cpu_devices(NUM_DEVICES)
grad_fn = regularizer_estimates.biased_estimate_general_grad_fn(
fn1, fn2, True, grad_var=grad_var)
pmap_grad_fn = pmap_and_average(grad_fn)
(devices_params, devices_state,
devices_data_batch, devices_rng) = init_gan_multiple_devices(
gan_module, NUM_DEVICES, data_batch, pmap=True)
multiple_device_output = pmap_grad_fn(
devices_params, devices_state, devices_data_batch,
devices_rng, is_training)
# No pmap
no_pmap_grad_fn = regularizer_estimates.biased_estimate_general_grad_fn(
fn1, fn2, False, grad_var=grad_var)
(devices_params, devices_state,
devices_data_batch, devices_rng) = init_gan_multiple_devices(
gan_module, NUM_DEVICES, data_batch, pmap=False)
one_device_output = no_pmap_grad_fn(
devices_params, devices_state, devices_data_batch,
devices_rng, is_training)
chex.assert_trees_all_close(
jax.tree_map(lambda x: x[0], multiple_device_output),
one_device_output,
rtol=1e-3,
atol=6e-2,
ignore_nones=True)
@parameterized.parameters(DEFAULT_TEST_INPUT)
def test_same_result_multiple_devices_unbiased_consistency(
self, fn1_name, fn2_name, grad_var, is_training):
batch_size = 128
dataset = MixtureOfGaussiansDataset(batch_size=batch_size)
data_batch = next(dataset)
players_hk_tuple = gan.GANTuple(disc=SimpleMLP, gen=SimpleMLP)
losses_tuple = gan.GANTuple(
disc=losses.discriminator_goodfellow_loss,
gen=losses.generator_goodfellow_loss)
gan_module = gan.GAN(
players_hk=players_hk_tuple, losses=losses_tuple,
penalties=gan.GANTuple(disc=None, gen=None),
player_param_transformers=gan.GANTuple(disc=None, gen=None),
players_kwargs=gan.GANTuple(
disc={'depth': 4, 'out_dim': 1, 'hidden_size': 25},
gen={'depth': 4, 'out_dim': 2, 'hidden_size': 25},),
num_latents=16)
fn1 = getattr(gan_module, fn1_name)
fn2 = getattr(gan_module, fn2_name)
# Multiple devices
chex.set_n_cpu_devices(NUM_DEVICES)
grad_fn = regularizer_estimates.unbiased_estimate_fn_lowest_variance(
fn1, fn2, grad_var=grad_var)
pmap_grad_fn = pmap_and_average(grad_fn)
(devices_params, devices_state,
devices_data_batch, devices_rng) = init_gan_multiple_devices(
gan_module, NUM_DEVICES, data_batch, pmap=True)
multiple_device_output = pmap_grad_fn(
devices_params, devices_state, devices_data_batch,
devices_rng, is_training)
# No pmap
no_pmap_grad_fn = regularizer_estimates.unbiased_estimate_fn_different_device_results(
fn1, fn2, grad_var=grad_var)
(devices_params, devices_state,
devices_data_batch, devices_rng) = init_gan_multiple_devices(
gan_module, NUM_DEVICES, data_batch, pmap=False)
one_device_output = no_pmap_grad_fn(
devices_params, devices_state, devices_data_batch,
devices_rng, is_training)
chex.assert_trees_all_close(
jax.tree_map(lambda x: x[0], multiple_device_output),
one_device_output,
rtol=1e-3,
atol=5e-2)
@parameterized.parameters(DEFAULT_TEST_INPUT)
def test_same_result_multiple_devices_unbiased(
self, fn1_name, fn2_name, grad_var, is_training):
batch_size = 128
dataset = MixtureOfGaussiansDataset(batch_size=batch_size)
data_batch = next(dataset)
players_hk_tuple = gan.GANTuple(disc=SimpleMLP, gen=SimpleMLP)
losses_tuple = gan.GANTuple(
disc=losses.discriminator_goodfellow_loss,
gen=losses.generator_goodfellow_loss)
gan_module = gan.GAN(
players_hk=players_hk_tuple, losses=losses_tuple,
penalties=gan.GANTuple(disc=None, gen=None),
player_param_transformers=gan.GANTuple(disc=None, gen=None),
players_kwargs=gan.GANTuple(
disc={'depth': 4, 'out_dim': 1, 'hidden_size': 25},
gen={'depth': 4, 'out_dim': 2, 'hidden_size': 25},),
num_latents=16)
fn1 = getattr(gan_module, fn1_name)
fn2 = getattr(gan_module, fn2_name)
# Multiple devices
chex.set_n_cpu_devices(NUM_DEVICES)
grad_fn = regularizer_estimates.unbiased_estimate_fn_lowest_variance(
fn1, fn2, grad_var=grad_var)
pmap_grad_fn = pmap_and_average(grad_fn)
(devices_params, devices_state,
devices_data_batch, devices_rng) = init_gan_multiple_devices(
gan_module, NUM_DEVICES, data_batch, pmap=True)
multiple_device_output = pmap_grad_fn(
devices_params, devices_state, devices_data_batch,
devices_rng, is_training)
# No pmap
no_pmap_grad_fn = unbiased_estimate_1_device(fn1, fn2, grad_var=grad_var)
(devices_params, devices_state,
devices_data_batch, devices_rng) = init_gan_multiple_devices(
gan_module, NUM_DEVICES, data_batch, pmap=False)
one_device_output = no_pmap_grad_fn(
devices_params, devices_state, devices_data_batch,
devices_rng, is_training)
chex.assert_trees_all_close(
jax.tree_map(lambda x: x[0], multiple_device_output),
one_device_output,
rtol=1e-3,
atol=5e-2)
if __name__ == '__main__':
absltest.main()
| discretisation_drift-main | dd_two_player_games/regularizer_estimates_multiple_devices_test.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimizers and related utilities."""
from dd_two_player_games import utils
class RungeKutta(object):
"""Runge Kutta optimizer."""
def __init__(self, grad_fn, runge_kutta_order, same_args_to_grad_calls=False):
"""Initializes the RK object.
Args:
grad_fn: Gradient function. We assume the gradient function already
has called `pmean` if that is required.
runge_kutta_order: The RK order. Supported 1, 2, 4. Setting RK to 1
is the same as SGD.
same_args_to_grad_calls: whether or not to use the same arguments (ie
data batch, or rng) for each call to grad inside the RK computation.
"""
self.runge_kutta_order = runge_kutta_order
self.same_args_to_grad_calls = same_args_to_grad_calls
self.grad_fn = grad_fn
if runge_kutta_order == 1:
self.grad_weights = [1.]
self.step_scale = []
if runge_kutta_order == 2:
self.grad_weights = [0.5, 0.5]
self.step_scale = [1.]
elif runge_kutta_order == 4:
self.grad_weights = [1. / 6., 1. / 3., 1. / 3., 1. / 6.]
self.step_scale = [0.5, 0.5, 1.]
def grad(self, params, step_size, *grad_fn_args):
"""Compute gradients at given parameters."""
def select_args(i):
if self.same_args_to_grad_calls:
# Use the first batch / set of arguments for every grad call.
return tuple([x[0] for x in grad_fn_args])
return tuple([x[i] for x in grad_fn_args])
step_grad, aux = self.grad_fn(params, *select_args(0))
grad = utils.tree_mul(step_grad, self.grad_weights[0])
for i in range(self.runge_kutta_order - 1):
step_params = utils.tree_add(
params,
utils.tree_mul(step_grad, -1 * self.step_scale[i] * step_size))
step_grad, _ = self.grad_fn(step_params, *select_args(i+1))
grad = utils.tree_add(
grad,
utils.tree_mul(step_grad, self.grad_weights[i+1]))
return grad, aux
| discretisation_drift-main | dd_two_player_games/optim.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config for training GANs on MNIST with SGD."""
from jaxline import base_config
from ml_collections import config_dict
def get_config():
"""Return config object for training."""
config = base_config.get_base_config()
## Experiment config.
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
random_seed=0,
dataset='mnist',
num_eval_samples=100,
data_processor='ImageProcessor',
optimizers=dict(
discriminator=dict(
name='sgd',
kwargs=dict(momentum=0.),
lr=1e-2),
generator=dict(
name='sgd',
kwargs=dict(momentum=0.),
lr=1e-2),
),
nets=dict( # See `nets.py`
discriminator='MnistDiscriminator',
generator='MnistGenerator',
disc_kwargs=dict(),
gen_kwargs=dict(),
),
losses=dict( # See `losses.py`
discriminator='discriminator_goodfellow_loss',
generator='generator_saturating_loss',
),
penalties=dict( # See `losses.py`
discriminator=None,
generator=None,
),
param_transformers=dict( # See `nets.py`
discriminator='no_op',
generator=None,
),
training=dict(
simultaneous_updates=True,
runge_kutta_updates=False,
estimator_fn='biased_estimate_grad_fn',
batch_size=64,
rk_disc_regularizer_weight_coeff=0.,
grad_regularizes=dict(
dd_coeffs_multiplier=dict(
disc_reg_disc_norm=0.0,
disc_reg_gen_dot_prod=0.0,
gen_reg_disc_dot_prod=0.0,
gen_reg_gen_norm=0.0,
gen_reg_disc_norm=0.0,
disc_reg_gen_norm=0.0,
),
explicit_non_dd_coeffs=dict(
disc_reg_disc_norm=0.0,
disc_reg_gen_dot_prod=0.0,
gen_reg_disc_dot_prod=0.0,
disc_reg_gen_norm=0.0,
gen_reg_disc_norm=0.0,
gen_reg_gen_norm=0.0)),
num_gen_updates=1,
num_disc_updates=1,
num_latents=128),
eval=dict(
run_image_metrics=False,
batch_size=16,
# The number of data/sample splits to be used for evaluation.
num_eval_splits=5,
num_inception_images=10000),
)))
## Training loop config.
config.training_steps = int(1e5)
config.log_train_data_interval = 60
config.log_tensors_interval = 60
config.save_checkpoint_interval = 300
config.eval_specific_checkpoint_dir = ''
return config
| discretisation_drift-main | dd_two_player_games/sgd_mnist_config.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks to train GANs."""
import functools
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
def spectral_norm():
# Ignore biases.
return hk.SNParamsTree(ignore_regex='.*b$', eps=1e-12, n_steps=1)
def no_op():
return NoOpTree()
class NoOpTree(hk.Module):
"""No op parameter transformer."""
def __init__(self, track_singular_values=True):
"""Initializes an NoOpTree module."""
super().__init__(name='NoOpTree')
self._track_singular_values = track_singular_values
if self._track_singular_values:
self._sn_tree = spectral_norm()
def __call__(self, tree, update_stats=True):
if self._track_singular_values:
# We do not use the return result, but want to update the states in case
# we want to track the singular values without SN.
self._sn_tree(tree, update_stats=update_stats)
# No change to the input parameters.
return tree
class ConvNet2DTranspose(hk.Module):
"""Conv Transposed Net. Does not activate last layer."""
def __init__(self,
output_channels,
strides,
kernel_shapes,
use_batch_norm=False,
batch_norm_config=None,
name=None):
super().__init__(name=name)
if len(strides) == 1:
strides = strides * len(output_channels)
if len(output_channels) != len(strides):
raise ValueError('The length of the output_channels and strides has'
'to be the same but was {} and {}'.format(
len(output_channels), len(strides)))
if len(kernel_shapes) == 1:
if len(kernel_shapes[0]) != 2:
raise ValueError('Invalid kernel shapes {}'.format(kernel_shapes))
kernel_shapes = kernel_shapes * len(output_channels)
self._output_channels = output_channels
self._strides = strides
self._kernel_shapes = kernel_shapes
self._num_layers = len(output_channels)
self._use_batch_norm = use_batch_norm
self._batch_norm_config = batch_norm_config
def __call__(self, x, is_training=True, test_local_stats=False):
for layer in range(self._num_layers):
x = hk.Conv2DTranspose(
output_channels=self._output_channels[layer],
kernel_shape=self._kernel_shapes[layer],
stride=self._strides[layer],
padding='SAME')(x)
if layer != self._num_layers - 1:
if self._use_batch_norm:
bn = hk.BatchNorm(**self._batch_norm_config)
x = bn(x, is_training=is_training, test_local_stats=test_local_stats)
x = jax.nn.relu(x)
return x
class ConvNet2D(hk.Module):
"""Conv network."""
def __init__(self,
output_channels,
strides,
kernel_shapes,
activation,
name=None):
super().__init__(name=name)
if len(output_channels) != len(strides):
raise ValueError('The length of the output_channels and strides has'
'to be the same but was {} and {}'.format(
len(output_channels), len(strides)))
if len(kernel_shapes) == 1:
if len(kernel_shapes[0]) != 2:
raise ValueError('Invalid kernel shapes {}'.format(kernel_shapes))
kernel_shapes = kernel_shapes * len(output_channels)
self._output_channels = output_channels
self._strides = strides
self._kernel_shapes = kernel_shapes
self._activation = activation
self._num_layers = len(output_channels)
def __call__(self, x, is_training=True):
del is_training
for layer in range(self._num_layers):
x = hk.Conv2D(output_channels=self._output_channels[layer],
kernel_shape=self._kernel_shapes[layer],
stride=self._strides[layer],
padding='SAME')(x)
if layer != self._num_layers -1:
x = self._activation(x)
return x
class CifarGenerator(hk.Module):
"""As in the SN paper."""
def __init__(self, name='CifarGenerator'):
super().__init__(name=name)
def __call__(self, inputs, is_training=True, test_local_stats=False):
batch_size = inputs.shape[0]
first_shape = [4, 4, 512]
up_tensor = hk.Linear(np.prod(first_shape))(inputs)
first_tensor = jnp.reshape(up_tensor, [batch_size] + first_shape)
net = ConvNet2DTranspose(
output_channels=[256, 128, 64, 3],
kernel_shapes=[(4, 4), (4, 4), (4, 4), (3, 3)],
strides=[2, 2, 2, 1],
use_batch_norm=True,
batch_norm_config={
'create_scale': True, 'create_offset': True, 'decay_rate': 0.999,
'cross_replica_axis': 'i'})
output = net(first_tensor, is_training=is_training,
test_local_stats=test_local_stats)
return jnp.tanh(output)
class CifarDiscriminator(hk.Module):
"""Spectral normalization discriminator (metric) architecture."""
def __init__(self):
super().__init__(name='CifarDiscriminator')
def __call__(self, inputs, is_training=True):
activation = functools.partial(jax.nn.leaky_relu, negative_slope=0.1)
net = ConvNet2D(
output_channels=[64, 64, 128, 128, 256, 256, 512],
kernel_shapes=[
(3, 3), (4, 4), (3, 3), (4, 4), (3, 3), (4, 4), (3, 3)],
strides=[1, 2, 1, 2, 1, 2, 1],
activation=activation)
output = hk.Linear(1)(
hk.Flatten()(activation(net(inputs, is_training=is_training))))
return output
class MnistGenerator(hk.Module):
"""MNIST Generator network."""
def __init__(
self, last_layer_activation=jnp.tanh, output_channels=(32, 1)):
super().__init__(name='MnistGenerator')
self._output_channels = output_channels
self._last_layer_activation = last_layer_activation
def __call__(self, x, is_training=True, test_local_stats=False):
"""Maps noise latents to images."""
del is_training
x = hk.Linear(7 * 7 * 64)(x)
x = jnp.reshape(x, x.shape[:1] + (7, 7, 64))
x = jax.nn.relu(x)
x = ConvNet2DTranspose(
output_channels=self._output_channels,
kernel_shapes=[[5, 5]],
strides=[2])(x)
return self._last_layer_activation(x)
class MnistDiscriminator(hk.Module):
"""MNIST Discriminator network."""
def __init__(self):
super().__init__(name='MnistDiscriminator')
def __call__(self, x, is_training=True):
del is_training
activation = functools.partial(jax.nn.leaky_relu, negative_slope=0.2)
net = ConvNet2D(
output_channels=[8, 16, 32, 64, 128],
kernel_shapes=[[5, 5]],
strides=[2, 1, 2, 1, 2],
activation=activation)
return hk.Linear(1)(hk.Flatten()(activation(net(x))))
| discretisation_drift-main | dd_two_player_games/nets.py |
# Copyright 2021 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the drift."""
import collections
import enum
import jax
from dd_two_player_games import gan
class PlayerOrder(enum.Enum):
disc_first = 0
gen_first = 1
PlayerRegularizationTerms = collections.namedtuple(
'PlayerRegularizationTerms', [
'self_norm', # IDD
'other_dot_prod', # IDD
'other_norm', # Used for other regularizers, such as ODE-GAN.
]
)
def get_dd_coeffs(
alternating_player_order, simultaneous_updates,
learning_rates, num_updates):
"""Obtain the implicit regularization coefficients."""
if simultaneous_updates:
def coeffs(lr):
return PlayerRegularizationTerms(
self_norm=1./2 * lr,
other_norm=0,
other_dot_prod=1./2 * lr)
return jax.tree_map(coeffs, learning_rates)
else:
if alternating_player_order == PlayerOrder.disc_first:
first_player = 'disc'
second_player = 'gen'
else:
first_player = 'gen'
second_player = 'disc'
first_player_lr = getattr(learning_rates, first_player)
second_player_lr = getattr(learning_rates, second_player)
first_player_coeffs = PlayerRegularizationTerms(
self_norm=1./(2 * getattr(num_updates, first_player)) * first_player_lr,
other_norm=0,
other_dot_prod=1./2 * first_player_lr)
lr_ratio = first_player_lr / second_player_lr
second_player_coeffs = PlayerRegularizationTerms(
self_norm=1./(2 * getattr(
num_updates, second_player)) * second_player_lr,
other_norm=0,
other_dot_prod=-1./2 * second_player_lr *(2 * lr_ratio - 1))
return gan.GANTuple(** {first_player: first_player_coeffs,
second_player: second_player_coeffs})
| discretisation_drift-main | dd_two_player_games/drift_utils.py |
"""Copyright 2023 DeepMind Technologies Limited.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied. See the License for the specific language governing permissions
and limitations under the License.
The code below provides the implementation for the experiments used in
"On a continuous time model of gradient descent dynamics and instability in
deep learning", TMLR 2023 when using DAL instead of vanilla gradient descent.
"""
import jax
import jax.numpy as jnp
class DriftAdaptiveLearningRateScaler(object):
r"""Class which scales the gradient proportionally to the norm of the drift.
This class implements the DAL algorithm here: https://arxiv.org/abs/2302.01952
Note: the user can specify either:
* grad_fn: the gradient function of the model.
Can be obtained using jax.grad.
* hvp_fn: the hessian_vector_product of the model. We provide an
implementation of hvp below.
The reason we do not compute these here is that for large batch sizes,
the user might want to accumulate gradients over batches before passing it
here. We do that in the full batch case implemented in the paper, but not
for stochastic gradient descent. Similarly, if running over multiple devices
via `pmap`, the user has to ensure that grad_fn and hvp_fn have been wrapped
in `pmap` accordingly before creating this object.
scaling_power: denotes the `p` in the paper. If 0, vanilla SGD is used.
Note: static_lr below has no effect if DAL is used, that is, if scaling_power
is not 0. We support it here so that we can also implement vanilla SGD
with the same optimiser.
We know that the total drift of gradient descent in one area is given by
lr * || H(\theta') g(\theta')|| for a certain \theta' in the neighborhood of
the current parameters \theta. We use this to get an idea of the size of the
drift in certain areas, by using lr * ||H(\theta) g(\theta)|| as a guide.
We then scale the size of the gradient by 1/ (lr * ||H(\theta) g(\theta)||^p),
where p is a hyperparameter chosen by the user. For p=0, no scaling occurs.
For p = 1, there is a scaling proportional to Hg.
"""
def __init__(
self, hvp_fn, grad_fn,
scaling_power,
static_lr,
max_lr_scale=5,
scaling_start=None, scaling_stop=None, always_log_h_g_norm=True):
self.hvp_fn = hvp_fn
self.grad_fn = grad_fn
self.scaling_power = scaling_power
self.static_lr = static_lr
self.max_lr_scale = max_lr_scale
self.scaling_start = scaling_start if scaling_start else -1
self.scaling_stop = scaling_stop if scaling_stop else np.inf
self.always_log_h_g_norm = always_log_h_g_norm
if hvp_fn and grad_fn:
raise ValueError(
'Either use hvp_fn or grad_fn as an argument for '
'DriftAdaptiveLearningRateScaler')
def _h_g(self, params, grads, *grad_fn_args):
if self.hvp_fn:
return self.hvp_fn(params, grads, *grad_fn_args)
return hg_finite_differences_approx(self.grad_fn)(
params, grads, *grad_fn_args)
def scale_grads(self, grads, params, iteration, *grad_fn_args):
"""Compute (scaled) gradients at given parameters."""
# Fixed coefficient (not the result of a jax operation) such that
# multioptimiser knows the number of steps and returns all intermediate
# results for static learning rates.
h_g = self._h_g(params, grads, *grad_fn_args)
h_g_norm = jnp.sqrt(utils.tree_square_norm(h_g))
if self.scaling_power == 0:
# No learning rate scaling.
return grads, 1., h_g_norm if self.always_log_h_g_norm else -1.
grad_norm = jnp.sqrt(utils.tree_square_norm(grads))
max_scale = self.max_lr_scale / self.static_lr
lr_scale = self._adaptive_lr_multiplier(
h_g_norm/grad_norm, iteration, max_scale)
grads = utils.tree_mul(grads, lr_scale)
return grads, lr_scale, h_g_norm
def _adaptive_lr_multiplier(self, val, iteration, max_scale):
def scale(v):
denom = self.static_lr * jnp.power(v, self.scaling_power) /2.
return jnp.clip(1 / denom, 0., max_scale)
return jax.lax.cond(
(iteration > self.scaling_start) & (iteration < self.scaling_stop),
scale,
lambda _: 1., val) # No learning rate scaling outside the interval.
def hg_finite_differences_approx(grad_fn, epsilon=None):
r"""Estimate Hg via finite differences.
Hg \approx (
\nabla_{\theta}E(theta + epsilon) - \nabla_{\theta}E(theta)) /epsilon
Approximation used by https://arxiv.org/abs/2109.14119.
Args:
grad_fn: The gradient function (backward pass).
epsilon: The offset. If None, is set to 0.01 * ||\nabla_{\theta}E(theta)||.
Returns:
A function which given a set of parameters and gradients returns the
approximation above to the gradient norm.
"""
def finite_differences_fn(params, grads, *args):
grad_sq_norm = tree_square_norm(grads)
e = epsilon if epsilon else 0.01 * jax.lax.rsqrt(grad_sq_norm)
delta_params = tree_add(params, tree_mul(grads, e))
_, grad_at_delta_params = grad_fn(delta_params, *args)
return tree_mul(tree_diff(grad_at_delta_params, grads), 1./e)
return finite_differences_fn
def reduce_sum_tree(tree):
tree = jax.tree_util.tree_map(jnp.sum, tree)
return jax.tree_util.tree_reduce(jnp.add, tree)
def tree_square_norm(tree):
return reduce_sum_tree(jax.tree_map(jnp.square, tree))
def tree_mul(tree, constant):
return jax.tree_map(lambda x: x * constant, tree)
def tree_add(tree1, tree2):
return jax.tree_map(lambda x, y: x + y, tree1, tree2)
def tree_diff(tree1, tree2):
return jax.tree_map(lambda x, y: x - y, tree1, tree2)
def hvp(loss_fn, params, v, *args):
"""Computes the hessian vector product Hv.
This implementation uses forward-over-reverse mode for computing the hvp.
Args:
loss_fn: function computing the loss with signature
loss(params, batch).
params: pytree for the parameters of the model.
v: pytree of the same structure as params.
*args: other arguments to pass to loss_fn.
Returns:
hvp: array of shape [num_params] equal to Hv where H is the hessian.
"""
def grad_fn(p):
return jax.grad(loss_fn)(p, *args)
return jax.jvp(grad_fn, [params], [v])[1]
| discretisation_drift-main | principal_flow_instability_single_objective/dal.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script for setuptools."""
import os
from setuptools import find_namespace_packages
from setuptools import setup
_CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
def _get_version():
with open(os.path.join(_CURRENT_DIR, 'distrax', '__init__.py')) as fp:
for line in fp:
if line.startswith('__version__') and '=' in line:
version = line[line.find('=') + 1:].strip(' \'"\n')
if version:
return version
raise ValueError('`__version__` not defined in `distrax/__init__.py`')
def _parse_requirements(path):
with open(os.path.join(_CURRENT_DIR, path)) as f:
return [
line.rstrip()
for line in f
if not (line.isspace() or line.startswith('#'))
]
setup(
name='distrax',
version=_get_version(),
url='https://github.com/deepmind/distrax',
license='Apache 2.0',
author='DeepMind',
description=('Distrax: Probability distributions in JAX.'),
long_description=open(os.path.join(_CURRENT_DIR, 'README.md')).read(),
long_description_content_type='text/markdown',
author_email='[email protected]',
keywords='jax probability distribution python machine learning',
packages=find_namespace_packages(exclude=['*_test.py']),
install_requires=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements.txt')),
tests_require=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements-tests.txt')),
zip_safe=False, # Required for full installation.
include_package_data=True,
python_requires='>=3.9',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| distrax-master | setup.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for distrax."""
from absl.testing import absltest
import distrax
class DistraxTest(absltest.TestCase):
"""Test distrax can be imported correctly."""
def test_import(self):
self.assertTrue(hasattr(distrax, 'Uniform'))
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/distrax_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distrax: Probability distributions in JAX."""
# Bijectors.
from distrax._src.bijectors.bijector import Bijector
from distrax._src.bijectors.bijector import BijectorLike
from distrax._src.bijectors.block import Block
from distrax._src.bijectors.chain import Chain
from distrax._src.bijectors.diag_linear import DiagLinear
from distrax._src.bijectors.diag_plus_low_rank_linear import DiagPlusLowRankLinear
from distrax._src.bijectors.gumbel_cdf import GumbelCDF
from distrax._src.bijectors.inverse import Inverse
from distrax._src.bijectors.lambda_bijector import Lambda
from distrax._src.bijectors.linear import Linear
from distrax._src.bijectors.lower_upper_triangular_affine import LowerUpperTriangularAffine
from distrax._src.bijectors.masked_coupling import MaskedCoupling
from distrax._src.bijectors.rational_quadratic_spline import RationalQuadraticSpline
from distrax._src.bijectors.scalar_affine import ScalarAffine
from distrax._src.bijectors.shift import Shift
from distrax._src.bijectors.sigmoid import Sigmoid
from distrax._src.bijectors.split_coupling import SplitCoupling
from distrax._src.bijectors.tanh import Tanh
from distrax._src.bijectors.triangular_linear import TriangularLinear
from distrax._src.bijectors.unconstrained_affine import UnconstrainedAffine
# Distributions.
from distrax._src.distributions.bernoulli import Bernoulli
from distrax._src.distributions.beta import Beta
from distrax._src.distributions.categorical import Categorical
from distrax._src.distributions.categorical_uniform import CategoricalUniform
from distrax._src.distributions.clipped import Clipped
from distrax._src.distributions.clipped import ClippedLogistic
from distrax._src.distributions.clipped import ClippedNormal
from distrax._src.distributions.deterministic import Deterministic
from distrax._src.distributions.dirichlet import Dirichlet
from distrax._src.distributions.distribution import Distribution
from distrax._src.distributions.distribution import DistributionLike
from distrax._src.distributions.epsilon_greedy import EpsilonGreedy
from distrax._src.distributions.gamma import Gamma
from distrax._src.distributions.greedy import Greedy
from distrax._src.distributions.gumbel import Gumbel
from distrax._src.distributions.independent import Independent
from distrax._src.distributions.joint import Joint
from distrax._src.distributions.laplace import Laplace
from distrax._src.distributions.log_stddev_normal import LogStddevNormal
from distrax._src.distributions.logistic import Logistic
from distrax._src.distributions.mixture_of_two import MixtureOfTwo
from distrax._src.distributions.mixture_same_family import MixtureSameFamily
from distrax._src.distributions.multinomial import Multinomial
from distrax._src.distributions.mvn_diag import MultivariateNormalDiag
from distrax._src.distributions.mvn_diag_plus_low_rank import MultivariateNormalDiagPlusLowRank
from distrax._src.distributions.mvn_from_bijector import MultivariateNormalFromBijector
from distrax._src.distributions.mvn_full_covariance import MultivariateNormalFullCovariance
from distrax._src.distributions.mvn_tri import MultivariateNormalTri
from distrax._src.distributions.normal import Normal
from distrax._src.distributions.one_hot_categorical import OneHotCategorical
from distrax._src.distributions.quantized import Quantized
from distrax._src.distributions.softmax import Softmax
from distrax._src.distributions.straight_through import straight_through_wrapper
from distrax._src.distributions.transformed import Transformed
from distrax._src.distributions.uniform import Uniform
from distrax._src.distributions.von_mises import VonMises
# Utilities.
from distrax._src.utils.conversion import as_bijector
from distrax._src.utils.conversion import as_distribution
from distrax._src.utils.conversion import to_tfp
from distrax._src.utils.hmm import HMM
from distrax._src.utils.importance_sampling import importance_sampling_ratios
from distrax._src.utils.math import multiply_no_nan
from distrax._src.utils.monte_carlo import estimate_kl_best_effort
from distrax._src.utils.monte_carlo import mc_estimate_kl
from distrax._src.utils.monte_carlo import mc_estimate_kl_with_reparameterized
from distrax._src.utils.monte_carlo import mc_estimate_mode
from distrax._src.utils.transformations import register_inverse
__version__ = "0.1.4"
__all__ = (
"as_bijector",
"as_distribution",
"Bernoulli",
"Beta",
"Bijector",
"BijectorLike",
"Block",
"Categorical",
"CategoricalUniform",
"Chain",
"Clipped",
"ClippedLogistic",
"ClippedNormal",
"Deterministic",
"DiagLinear",
"DiagPlusLowRankLinear",
"Dirichlet",
"Distribution",
"DistributionLike",
"EpsilonGreedy",
"estimate_kl_best_effort",
"Gamma",
"Greedy",
"Gumbel",
"GumbelCDF",
"HMM",
"importance_sampling_ratios",
"Independent",
"Inverse",
"Joint",
"Lambda",
"Laplace",
"Linear",
"Logistic",
"LogStddevNormal",
"LowerUpperTriangularAffine",
"MaskedCoupling",
"mc_estimate_kl",
"mc_estimate_kl_with_reparameterized",
"mc_estimate_mode",
"MixtureOfTwo",
"MixtureSameFamily",
"Multinomial",
"multiply_no_nan",
"MultivariateNormalDiag",
"MultivariateNormalDiagPlusLowRank",
"MultivariateNormalFromBijector",
"MultivariateNormalFullCovariance",
"MultivariateNormalTri",
"Normal",
"OneHotCategorical",
"Quantized",
"RationalQuadraticSpline",
"register_inverse",
"ScalarAffine",
"Shift",
"Sigmoid",
"Softmax",
"SplitCoupling",
"straight_through_wrapper",
"Tanh",
"to_tfp",
"Transformed",
"TriangularLinear",
"UnconstrainedAffine",
"Uniform",
"VonMises",
)
# _________________________________________
# / Please don't use symbols in `_src` they \
# \ are not part of the Distrax public API. /
# -----------------------------------------
# \ ^__^
# \ (oo)\_______
# (__)\ )\/\
# ||----w |
# || ||
#
| distrax-master | distrax/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| distrax-master | distrax/_src/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `diag_plus_low_rank_linear.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors.diag_plus_low_rank_linear import DiagPlusLowRankLinear
from distrax._src.bijectors.tanh import Tanh
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class DiagPlusLowRankLinearTest(parameterized.TestCase):
def test_static_properties(self):
bij = DiagPlusLowRankLinear(
diag=jnp.ones((4,)),
u_matrix=jnp.ones((4, 2)),
v_matrix=jnp.ones((4, 2)))
self.assertTrue(bij.is_constant_jacobian)
self.assertTrue(bij.is_constant_log_det)
self.assertEqual(bij.event_ndims_in, 1)
self.assertEqual(bij.event_ndims_out, 1)
@parameterized.parameters(
{'batch_shape': (), 'dtype': jnp.float16},
{'batch_shape': (2, 3), 'dtype': jnp.float32},
)
def test_properties(self, batch_shape, dtype):
bij = DiagPlusLowRankLinear(
diag=jnp.ones(batch_shape + (4,), dtype),
u_matrix=2. * jnp.ones(batch_shape + (4, 2), dtype),
v_matrix=3. * jnp.ones(batch_shape + (4, 2), dtype))
self.assertEqual(bij.event_dims, 4)
self.assertEqual(bij.batch_shape, batch_shape)
self.assertEqual(bij.dtype, dtype)
self.assertEqual(bij.diag.shape, batch_shape + (4,))
self.assertEqual(bij.u_matrix.shape, batch_shape + (4, 2))
self.assertEqual(bij.v_matrix.shape, batch_shape + (4, 2))
self.assertEqual(bij.matrix.shape, batch_shape + (4, 4))
self.assertEqual(bij.diag.dtype, dtype)
self.assertEqual(bij.u_matrix.dtype, dtype)
self.assertEqual(bij.v_matrix.dtype, dtype)
self.assertEqual(bij.matrix.dtype, dtype)
np.testing.assert_allclose(bij.diag, 1., atol=1e-6)
np.testing.assert_allclose(bij.u_matrix, 2., atol=1e-6)
np.testing.assert_allclose(bij.v_matrix, 3., atol=1e-6)
np.testing.assert_allclose(
bij.matrix, np.tile(np.eye(4) + 12., batch_shape + (1, 1)), atol=1e-6)
@parameterized.named_parameters(
('diag is 0d', {'diag': np.ones(()),
'u_matrix': np.ones((4, 2)),
'v_matrix': np.ones((4, 2))}),
('u_matrix is 1d', {'diag': np.ones((4,)),
'u_matrix': np.ones((4,)),
'v_matrix': np.ones((4, 2))}),
('v_matrix is 1d', {'diag': np.ones((4,)),
'u_matrix': np.ones((4, 2)),
'v_matrix': np.ones((4,))}),
('diag has wrong dim', {'diag': np.ones((3,)),
'u_matrix': np.ones((4, 2)),
'v_matrix': np.ones((4, 2))}),
('u_matrix has wrong dim', {'diag': np.ones((4,)),
'u_matrix': np.ones((3, 2)),
'v_matrix': np.ones((4, 2))}),
('v_matrix has wrong dim', {'diag': np.ones((4,)),
'u_matrix': np.ones((4, 2)),
'v_matrix': np.ones((3, 2))}),
)
def test_raises_with_invalid_parameters(self, params):
with self.assertRaises(ValueError):
DiagPlusLowRankLinear(**params)
@chex.all_variants
@parameterized.parameters(
((5,), (5,), (5,), (5,)),
((5,), (), (), ()),
((), (5,), (), ()),
((), (), (5,), ()),
((), (), (), (5,)),
)
def test_batched_parameters(self, diag_batch_shape, u_matrix_batch_shape,
v_matrix_batch_shape, input_batch_shape):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
diag = jax.random.uniform(next(prng), diag_batch_shape + (4,)) + 0.5
u_matrix = jax.random.uniform(next(prng), u_matrix_batch_shape + (4, 1))
v_matrix = jax.random.uniform(next(prng), v_matrix_batch_shape + (4, 1))
bij = DiagPlusLowRankLinear(diag, u_matrix, v_matrix)
x = jax.random.normal(next(prng), input_batch_shape + (4,))
y, logdet_fwd = self.variant(bij.forward_and_log_det)(x)
z, logdet_inv = self.variant(bij.inverse_and_log_det)(x)
output_batch_shape = jnp.broadcast_shapes(
diag_batch_shape, u_matrix_batch_shape, v_matrix_batch_shape,
input_batch_shape)
self.assertEqual(y.shape, output_batch_shape + (4,))
self.assertEqual(z.shape, output_batch_shape + (4,))
self.assertEqual(logdet_fwd.shape, output_batch_shape)
self.assertEqual(logdet_inv.shape, output_batch_shape)
diag = jnp.broadcast_to(diag, output_batch_shape + (4,)).reshape((-1, 4))
u_matrix = jnp.broadcast_to(
u_matrix, output_batch_shape + (4, 1)).reshape((-1, 4, 1))
v_matrix = jnp.broadcast_to(
v_matrix, output_batch_shape + (4, 1)).reshape((-1, 4, 1))
x = jnp.broadcast_to(x, output_batch_shape + (4,)).reshape((-1, 4))
y = y.reshape((-1, 4))
z = z.reshape((-1, 4))
logdet_fwd = logdet_fwd.flatten()
logdet_inv = logdet_inv.flatten()
for i in range(np.prod(output_batch_shape)):
bij = DiagPlusLowRankLinear(diag[i], u_matrix[i], v_matrix[i])
this_y, this_logdet_fwd = self.variant(bij.forward_and_log_det)(x[i])
this_z, this_logdet_inv = self.variant(bij.inverse_and_log_det)(x[i])
np.testing.assert_allclose(this_y, y[i], atol=1e-6)
np.testing.assert_allclose(this_z, z[i], atol=1e-6)
np.testing.assert_allclose(this_logdet_fwd, logdet_fwd[i], atol=1e-6)
np.testing.assert_allclose(this_logdet_inv, logdet_inv[i], atol=1e-6)
@chex.all_variants
@parameterized.parameters(
{'batch_shape': (), 'param_shape': ()},
{'batch_shape': (2, 3), 'param_shape': (3,)},
)
def test_identity_initialization(self, batch_shape, param_shape):
bij = DiagPlusLowRankLinear(
diag=jnp.ones(param_shape + (4,)),
u_matrix=jnp.zeros(param_shape + (4, 1)),
v_matrix=jnp.zeros(param_shape + (4, 1)))
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
x = jax.random.normal(next(prng), batch_shape + (4,))
# Forward methods.
y, logdet = self.variant(bij.forward_and_log_det)(x)
np.testing.assert_array_equal(y, x)
np.testing.assert_array_equal(logdet, jnp.zeros(batch_shape))
# Inverse methods.
x_rec, logdet = self.variant(bij.inverse_and_log_det)(y)
np.testing.assert_array_equal(x_rec, y)
np.testing.assert_array_equal(logdet, jnp.zeros(batch_shape))
@chex.all_variants
@parameterized.parameters(
{'batch_shape': (), 'param_shape': ()},
{'batch_shape': (2, 3), 'param_shape': (3,)}
)
def test_inverse_methods(self, batch_shape, param_shape):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
diag = jax.random.uniform(next(prng), param_shape + (4,)) + 0.5
u_matrix = jax.random.uniform(next(prng), param_shape + (4, 1))
v_matrix = jax.random.uniform(next(prng), param_shape + (4, 1))
bij = DiagPlusLowRankLinear(diag, u_matrix, v_matrix)
x = jax.random.normal(next(prng), batch_shape + (4,))
y, logdet_fwd = self.variant(bij.forward_and_log_det)(x)
x_rec, logdet_inv = self.variant(bij.inverse_and_log_det)(y)
np.testing.assert_allclose(x_rec, x, atol=1e-6)
np.testing.assert_allclose(logdet_fwd, -logdet_inv, atol=1e-6)
@chex.all_variants
def test_forward_jacobian_det(self):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
diag = jax.random.uniform(next(prng), (4,)) + 0.5
u_matrix = jax.random.uniform(next(prng), (4, 1))
v_matrix = jax.random.uniform(next(prng), (4, 1))
bij = DiagPlusLowRankLinear(diag, u_matrix, v_matrix)
batched_x = jax.random.normal(next(prng), (10, 4))
single_x = jax.random.normal(next(prng), (4,))
batched_logdet = self.variant(bij.forward_log_det_jacobian)(batched_x)
jacobian_fn = jax.jacfwd(bij.forward)
logdet_numerical = jnp.linalg.slogdet(jacobian_fn(single_x))[1]
for logdet in batched_logdet:
np.testing.assert_allclose(logdet, logdet_numerical, atol=5e-4)
@chex.all_variants
def test_inverse_jacobian_det(self):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
diag = jax.random.uniform(next(prng), (4,)) + 0.5
u_matrix = jax.random.uniform(next(prng), (4, 1))
v_matrix = jax.random.uniform(next(prng), (4, 1))
bij = DiagPlusLowRankLinear(diag, u_matrix, v_matrix)
batched_y = jax.random.normal(next(prng), (10, 4))
single_y = jax.random.normal(next(prng), (4,))
batched_logdet = self.variant(bij.inverse_log_det_jacobian)(batched_y)
jacobian_fn = jax.jacfwd(bij.inverse)
logdet_numerical = jnp.linalg.slogdet(jacobian_fn(single_y))[1]
for logdet in batched_logdet:
np.testing.assert_allclose(logdet, logdet_numerical, atol=5e-4)
def test_raises_on_invalid_input_shape(self):
bij = DiagPlusLowRankLinear(
diag=jnp.ones((4,)),
u_matrix=jnp.ones((4, 2)),
v_matrix=jnp.ones((4, 2)))
for fn in [bij.forward, bij.inverse,
bij.forward_log_det_jacobian, bij.inverse_log_det_jacobian,
bij.forward_and_log_det, bij.inverse_and_log_det]:
with self.subTest(fn=fn):
with self.assertRaises(ValueError):
fn(jnp.array(0))
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bij = DiagPlusLowRankLinear(
diag=jnp.ones((4,)),
u_matrix=jnp.ones((4, 2)),
v_matrix=jnp.ones((4, 2)))
x = np.zeros((4,))
f(x, bij)
def test_same_as_itself(self):
bij = DiagPlusLowRankLinear(
diag=jnp.ones((4,)),
u_matrix=jnp.ones((4, 2)),
v_matrix=jnp.ones((4, 2)))
self.assertTrue(bij.same_as(bij))
def test_not_same_as_others(self):
bij = DiagPlusLowRankLinear(
diag=jnp.ones((4,)),
u_matrix=jnp.ones((4, 2)),
v_matrix=jnp.ones((4, 2)))
other = DiagPlusLowRankLinear(
diag=2. * jnp.ones((4,)),
u_matrix=jnp.ones((4, 2)),
v_matrix=jnp.ones((4, 2)))
self.assertFalse(bij.same_as(other))
self.assertFalse(bij.same_as(Tanh()))
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/diag_plus_low_rank_linear_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Scalar affine bijector."""
from typing import Optional, Tuple, Union
from distrax._src.bijectors import bijector as base
import jax
import jax.numpy as jnp
Array = base.Array
Numeric = Union[Array, float]
class ScalarAffine(base.Bijector):
"""An affine bijector that acts elementwise.
The bijector is defined as follows:
- Forward: `y = scale * x + shift`
- Forward Jacobian determinant: `log|det J(x)| = log|scale|`
- Inverse: `x = (y - shift) / scale`
- Inverse Jacobian determinant: `log|det J(y)| = -log|scale|`
where `scale` and `shift` are the bijector's parameters.
"""
def __init__(self,
shift: Numeric,
scale: Optional[Numeric] = None,
log_scale: Optional[Numeric] = None):
"""Initializes a ScalarAffine bijector.
Args:
shift: the bijector's shift parameter. Can also be batched.
scale: the bijector's scale parameter. Can also be batched. NOTE: `scale`
must be non-zero, otherwise the bijector is not invertible. It is the
user's responsibility to make sure `scale` is non-zero; the class will
make no attempt to verify this.
log_scale: the log of the scale parameter. Can also be batched. If
specified, the bijector's scale is set equal to `exp(log_scale)`. Unlike
`scale`, `log_scale` is an unconstrained parameter. NOTE: either `scale`
or `log_scale` can be specified, but not both. If neither is specified,
the bijector's scale will default to 1.
Raises:
ValueError: if both `scale` and `log_scale` are not None.
"""
super().__init__(event_ndims_in=0, is_constant_jacobian=True)
self._shift = shift
if scale is None and log_scale is None:
self._scale = 1.
self._inv_scale = 1.
self._log_scale = 0.
elif log_scale is None:
self._scale = scale
self._inv_scale = 1. / scale
self._log_scale = jnp.log(jnp.abs(scale))
elif scale is None:
self._scale = jnp.exp(log_scale)
self._inv_scale = jnp.exp(jnp.negative(log_scale))
self._log_scale = log_scale
else:
raise ValueError(
'Only one of `scale` and `log_scale` can be specified, not both.')
self._batch_shape = jax.lax.broadcast_shapes(
jnp.shape(self._shift), jnp.shape(self._scale))
@property
def shift(self) -> Numeric:
"""The bijector's shift."""
return self._shift
@property
def log_scale(self) -> Numeric:
"""The log of the bijector's scale."""
return self._log_scale
@property
def scale(self) -> Numeric:
"""The bijector's scale."""
assert self._scale is not None # By construction.
return self._scale
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
batch_shape = jax.lax.broadcast_shapes(self._batch_shape, x.shape)
batched_scale = jnp.broadcast_to(self._scale, batch_shape)
batched_shift = jnp.broadcast_to(self._shift, batch_shape)
return batched_scale * x + batched_shift
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
batch_shape = jax.lax.broadcast_shapes(self._batch_shape, x.shape)
return jnp.broadcast_to(self._log_scale, batch_shape)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return self.forward(x), self.forward_log_det_jacobian(x)
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
batch_shape = jax.lax.broadcast_shapes(self._batch_shape, y.shape)
batched_inv_scale = jnp.broadcast_to(self._inv_scale, batch_shape)
batched_shift = jnp.broadcast_to(self._shift, batch_shape)
return batched_inv_scale * (y - batched_shift)
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
batch_shape = jax.lax.broadcast_shapes(self._batch_shape, y.shape)
return jnp.broadcast_to(jnp.negative(self._log_scale), batch_shape)
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
return self.inverse(y), self.inverse_log_det_jacobian(y)
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is ScalarAffine: # pylint: disable=unidiomatic-typecheck
return all((
self.shift is other.shift,
self.scale is other.scale,
self.log_scale is other.log_scale,
))
else:
return False
| distrax-master | distrax/_src/bijectors/scalar_affine.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GumbelCDF bijector."""
from typing import Tuple
from distrax._src.bijectors import bijector as base
import jax.numpy as jnp
Array = base.Array
class GumbelCDF(base.Bijector):
"""A bijector that computes the Gumbel cumulative density function (CDF).
The Gumbel CDF is given by `y = f(x) = exp(-exp(-x))` for a scalar input `x`.
Its inverse is `x = -log(-log(y))`. The log-det Jacobian of the transformation
is `log df/dx = -exp(-x) - x`.
"""
def __init__(self):
"""Initializes a GumbelCDF bijector."""
super().__init__(event_ndims_in=0)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
exp_neg_x = jnp.exp(-x)
y = jnp.exp(-exp_neg_x)
log_det = - x - exp_neg_x
return y, log_det
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
log_y = jnp.log(y)
x = -jnp.log(-log_y)
return x, x - log_y
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
return type(other) is GumbelCDF # pylint: disable=unidiomatic-typecheck
| distrax-master | distrax/_src/bijectors/gumbel_cdf.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `inverse.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import bijector as base_bijector
from distrax._src.bijectors import inverse
from distrax._src.bijectors import scalar_affine
from distrax._src.distributions import normal
from distrax._src.distributions import transformed
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
RTOL = 1e-2
def _with_additional_parameters(params, all_named_parameters):
"""Convenience function for appending a cartesian product of parameters."""
for name, param in params:
for named_params in all_named_parameters:
yield (f'{named_params[0]}; {name}',) + named_params[1:] + (param,)
def _with_base_dists(*all_named_parameters):
"""Partial of _with_additional_parameters to specify distrax and TFP base."""
base_dists = (
('tfp_base', tfd.Normal),
('distrax_base', normal.Normal),
)
return _with_additional_parameters(base_dists, all_named_parameters)
class InverseTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.seed = jax.random.PRNGKey(1234)
def test_properties(self):
bijector = inverse.Inverse(tfb.Scale(2))
assert isinstance(bijector.bijector, base_bijector.Bijector)
@parameterized.named_parameters(_with_base_dists(
('1d std normal', 0, 1),
('2d std normal', np.zeros(2), np.ones(2)),
('broadcasted loc', 0, np.ones(3)),
('broadcasted scale', np.ones(3), 1),
))
def test_event_shape(self, mu, sigma, base_dist):
base = base_dist(mu, sigma)
bijector = inverse.Inverse(tfb.Scale(2))
dist = transformed.Transformed(base, bijector)
tfp_bijector = tfb.Invert(tfb.Scale(2))
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
assert dist.event_shape == tfp_dist.event_shape
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('1d std normal, no shape', 0, 1, ()),
('1d std normal, int shape', 0, 1, 1),
('1d std normal, 1-tuple shape', 0, 1, (1,)),
('1d std normal, 2-tuple shape', 0, 1, (2, 2)),
('2d std normal, no shape', np.zeros(2), np.ones(2), ()),
('2d std normal, int shape', [0, 0], [1, 1], 1),
('2d std normal, 1-tuple shape', np.zeros(2), np.ones(2), (1,)),
('2d std normal, 2-tuple shape', [0, 0], [1, 1], (2, 2)),
('rank 2 std normal, 2-tuple shape', np.zeros(
(3, 2)), np.ones((3, 2)), (2, 2)),
('broadcasted loc', 0, np.ones(3), (2, 2)),
('broadcasted scale', np.ones(3), 1, ()),
))
def test_sample_shape(self, mu, sigma, sample_shape, base_dist):
base = base_dist(mu, sigma)
bijector = inverse.Inverse(tfb.Scale(2))
dist = transformed.Transformed(base, bijector)
def sample_fn(seed, sample_shape):
return dist.sample(seed=seed, sample_shape=sample_shape)
samples = self.variant(sample_fn, ignore_argnums=(1,), static_argnums=1)(
self.seed, sample_shape)
tfp_bijector = tfb.Invert(tfb.Scale(2))
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
tfp_samples = tfp_dist.sample(sample_shape=sample_shape, seed=self.seed)
chex.assert_equal_shape([samples, tfp_samples])
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('1d dist, 1d value', 0, 1, 1.),
('1d dist, 2d value', 0., 1., np.array([1., 2.])),
('2d dist, 1d value', np.zeros(2), np.ones(2), 1.),
('2d broadcasted dist, 1d value', np.zeros(2), 1, 1.),
('2d dist, 2d value', np.zeros(2), np.ones(2), np.array([1., 2.])),
('1d dist, 1d value, edge case', 0, 1, 200.),
))
def test_log_prob(self, mu, sigma, value, base_dist):
base = base_dist(mu, sigma)
bijector = inverse.Inverse(tfb.Scale(2))
dist = transformed.Transformed(base, bijector)
actual = self.variant(dist.log_prob)(value)
tfp_bijector = tfb.Invert(tfb.Scale(2))
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
expected = tfp_dist.log_prob(value)
np.testing.assert_allclose(actual, expected, atol=1e-6)
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('1d dist, 1d value', 0, 1, 1.),
('1d dist, 2d value', 0., 1., np.array([1., 2.])),
('2d dist, 1d value', np.zeros(2), np.ones(2), 1.),
('2d broadcasted dist, 1d value', np.zeros(2), 1, 1.),
('2d dist, 2d value', np.zeros(2), np.ones(2), np.array([1., 2.])),
('1d dist, 1d value, edge case', 0, 1, 200.),
))
def test_prob(self, mu, sigma, value, base_dist):
base = base_dist(mu, sigma)
bijector = inverse.Inverse(tfb.Scale(2))
dist = transformed.Transformed(base, bijector)
actual = self.variant(dist.prob)(value)
tfp_bijector = tfb.Invert(tfb.Scale(2))
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
expected = tfp_dist.prob(value)
np.testing.assert_allclose(actual, expected, atol=1e-9)
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('1d std normal, no shape', 0, 1, ()),
('1d std normal, int shape', 0, 1, 1),
('1d std normal, 1-tuple shape', 0, 1, (1,)),
('1d std normal, 2-tuple shape', 0, 1, (2, 2)),
('2d std normal, no shape', np.zeros(2), np.ones(2), ()),
('2d std normal, int shape', [0, 0], [1, 1], 1),
('2d std normal, 1-tuple shape', np.zeros(2), np.ones(2), (1,)),
('2d std normal, 2-tuple shape', [0, 0], [1, 1], (2, 2)),
('rank 2 std normal, 2-tuple shape', np.zeros(
(3, 2)), np.ones((3, 2)), (2, 2)),
('broadcasted loc', 0, np.ones(3), (2, 2)),
('broadcasted scale', np.ones(3), 1, ()),
))
def test_sample_and_log_prob(self, mu, sigma, sample_shape, base_dist):
base = base_dist(mu, sigma)
bijector = inverse.Inverse(tfb.Scale(2))
dist = transformed.Transformed(base, bijector)
def sample_and_log_prob_fn(seed, sample_shape):
return dist.sample_and_log_prob(seed=seed, sample_shape=sample_shape)
samples, log_prob = self.variant(
sample_and_log_prob_fn, ignore_argnums=(1,), static_argnums=(1,))(
self.seed, sample_shape)
expected_samples = bijector.forward(
base.sample(seed=self.seed, sample_shape=sample_shape))
tfp_bijector = tfb.Invert(tfb.Scale(2))
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
tfp_samples = tfp_dist.sample(seed=self.seed, sample_shape=sample_shape)
tfp_log_prob = tfp_dist.log_prob(samples)
chex.assert_equal_shape([samples, tfp_samples])
np.testing.assert_allclose(log_prob, tfp_log_prob, rtol=RTOL)
np.testing.assert_allclose(samples, expected_samples, rtol=RTOL)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(_with_base_dists(
('entropy', 'entropy', 0., 1.),
('mean', 'mean', 0, 1),
('mean from list params', 'mean', [-1, 1], [1, 2]),
('mode', 'mode', 0, 1),
))
def test_method(self, function_string, mu, sigma, base_dist):
base = base_dist(mu, sigma)
bijector = inverse.Inverse(tfb.Scale(2))
dist = transformed.Transformed(base, bijector)
tfp_bijector = tfb.Invert(tfb.Scale(2))
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
np.testing.assert_allclose(
self.variant(getattr(dist, function_string))(),
getattr(tfp_dist, function_string)())
@chex.all_variants
@parameterized.named_parameters(
('int16', np.array([1, 2], dtype=np.int16)),
('int32', np.array([1, 2], dtype=np.int32)),
('int64', np.array([1, 2], dtype=np.int64)),
)
def test_integer_inputs(self, inputs):
bijector = inverse.Inverse(scalar_affine.ScalarAffine(shift=1.0))
output, log_det = self.variant(bijector.forward_and_log_det)(inputs)
expected_out = jnp.array(inputs, dtype=jnp.float32) - 1.0
expected_log_det = jnp.zeros_like(inputs, dtype=jnp.float32)
np.testing.assert_array_equal(output, expected_out)
np.testing.assert_array_equal(log_det, expected_log_det)
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bijector = inverse.Inverse(scalar_affine.ScalarAffine(0, 1))
x = np.zeros(())
f(x, bijector)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/inverse_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unconstrained affine bijector."""
from typing import Tuple
from distrax._src.bijectors import bijector as base
import jax
import jax.numpy as jnp
Array = base.Array
def check_affine_parameters(matrix: Array, bias: Array) -> None:
"""Checks that `matrix` and `bias` have valid shapes.
Args:
matrix: a matrix, or a batch of matrices.
bias: a vector, or a batch of vectors.
Raises:
ValueError: if the shapes of `matrix` and `bias` are invalid.
"""
if matrix.ndim < 2:
raise ValueError(f"`matrix` must have at least 2 dimensions, got"
f" {matrix.ndim}.")
if bias.ndim < 1:
raise ValueError("`bias` must have at least 1 dimension.")
if matrix.shape[-2] != matrix.shape[-1]:
raise ValueError(f"`matrix` must be square; instead, it has shape"
f" {matrix.shape[-2:]}.")
if matrix.shape[-1] != bias.shape[-1]:
raise ValueError(f"`matrix` and `bias` have inconsistent shapes: `matrix`"
f" is {matrix.shape[-2:]}, `bias` is {bias.shape[-1:]}.")
class UnconstrainedAffine(base.Bijector):
"""An unconstrained affine bijection.
This bijector is a linear-plus-bias transformation `f(x) = Ax + b`, where `A`
is a `D x D` square matrix and `b` is a `D`-dimensional vector.
The bijector is invertible if and only if `A` is an invertible matrix. It is
the responsibility of the user to make sure that this is the case; the class
will make no attempt to verify that the bijector is invertible.
The Jacobian determinant is equal to `det(A)`. The inverse is computed by
solving the linear system `Ax = y - b`.
WARNING: Both the determinant and the inverse cost `O(D^3)` to compute. Thus,
this bijector is recommended only for small `D`.
"""
def __init__(self, matrix: Array, bias: Array):
"""Initializes an `UnconstrainedAffine` bijector.
Args:
matrix: the matrix `A` in `Ax + b`. Must be square and invertible. Can
also be a batch of matrices.
bias: the vector `b` in `Ax + b`. Can also be a batch of vectors.
"""
check_affine_parameters(matrix, bias)
super().__init__(event_ndims_in=1, is_constant_jacobian=True)
self._batch_shape = jnp.broadcast_shapes(matrix.shape[:-2], bias.shape[:-1])
self._matrix = matrix
self._bias = bias
self._logdet = jnp.linalg.slogdet(matrix)[1]
@property
def matrix(self) -> Array:
"""The matrix `A` of the transformation."""
return self._matrix
@property
def bias(self) -> Array:
"""The shift `b` of the transformation."""
return self._bias
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
self._check_forward_input_shape(x)
def unbatched(single_x, matrix, bias):
return matrix @ single_x + bias
batched = jnp.vectorize(unbatched, signature="(m),(m,m),(m)->(m)")
return batched(x, self._matrix, self._bias)
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
self._check_forward_input_shape(x)
batch_shape = jax.lax.broadcast_shapes(self._batch_shape, x.shape[:-1])
return jnp.broadcast_to(self._logdet, batch_shape)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return self.forward(x), self.forward_log_det_jacobian(x)
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
self._check_inverse_input_shape(y)
def unbatched(single_y, matrix, bias):
return jnp.linalg.solve(matrix, single_y - bias)
batched = jnp.vectorize(unbatched, signature="(m),(m,m),(m)->(m)")
return batched(y, self._matrix, self._bias)
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
return -self.forward_log_det_jacobian(y)
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
return self.inverse(y), self.inverse_log_det_jacobian(y)
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is UnconstrainedAffine: # pylint: disable=unidiomatic-typecheck
return all((
self.matrix is other.matrix,
self.bias is other.bias,
))
return False
| distrax-master | distrax/_src/bijectors/unconstrained_affine.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `gumbel_cdf.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import gumbel_cdf
from distrax._src.bijectors import tanh
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfb = tfp.bijectors
RTOL = 1e-5
class GumbelCDFTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.seed = jax.random.PRNGKey(1234)
def test_properties(self):
bijector = gumbel_cdf.GumbelCDF()
self.assertEqual(bijector.event_ndims_in, 0)
self.assertEqual(bijector.event_ndims_out, 0)
self.assertFalse(bijector.is_constant_jacobian)
self.assertFalse(bijector.is_constant_log_det)
@chex.all_variants
@parameterized.parameters(
{'x_shape': (2,)},
{'x_shape': (2, 3)},
{'x_shape': (2, 3, 4)})
def test_forward_shapes(self, x_shape):
x = jnp.zeros(x_shape)
bijector = gumbel_cdf.GumbelCDF()
y1 = self.variant(bijector.forward)(x)
logdet1 = self.variant(bijector.forward_log_det_jacobian)(x)
y2, logdet2 = self.variant(bijector.forward_and_log_det)(x)
self.assertEqual(y1.shape, x_shape)
self.assertEqual(y2.shape, x_shape)
self.assertEqual(logdet1.shape, x_shape)
self.assertEqual(logdet2.shape, x_shape)
@chex.all_variants
@parameterized.parameters(
{'y_shape': (2,)},
{'y_shape': (2, 3)},
{'y_shape': (2, 3, 4)})
def test_inverse_shapes(self, y_shape):
y = jnp.zeros(y_shape)
bijector = gumbel_cdf.GumbelCDF()
x1 = self.variant(bijector.inverse)(y)
logdet1 = self.variant(bijector.inverse_log_det_jacobian)(y)
x2, logdet2 = self.variant(bijector.inverse_and_log_det)(y)
self.assertEqual(x1.shape, y_shape)
self.assertEqual(x2.shape, y_shape)
self.assertEqual(logdet1.shape, y_shape)
self.assertEqual(logdet2.shape, y_shape)
@chex.all_variants
def test_forward(self):
x = jax.random.normal(self.seed, (100,))
bijector = gumbel_cdf.GumbelCDF()
y = self.variant(bijector.forward)(x)
expected_y = jnp.exp(-jnp.exp(-x))
np.testing.assert_allclose(y, expected_y, rtol=RTOL)
@chex.all_variants
def test_forward_log_det_jacobian(self):
x = jax.random.normal(self.seed, (100,))
bijector = gumbel_cdf.GumbelCDF()
fwd_logdet = self.variant(bijector.forward_log_det_jacobian)(x)
actual = jnp.log(jax.vmap(jax.grad(bijector.forward))(x))
np.testing.assert_allclose(fwd_logdet, actual, rtol=1e-3)
@chex.all_variants
def test_forward_and_log_det(self):
x = jax.random.normal(self.seed, (100,))
bijector = gumbel_cdf.GumbelCDF()
y1 = self.variant(bijector.forward)(x)
logdet1 = self.variant(bijector.forward_log_det_jacobian)(x)
y2, logdet2 = self.variant(bijector.forward_and_log_det)(x)
np.testing.assert_allclose(y1, y2, rtol=RTOL)
np.testing.assert_allclose(logdet1, logdet2, rtol=RTOL)
@chex.all_variants
def test_inverse(self):
x = jax.random.normal(self.seed, (100,))
bijector = gumbel_cdf.GumbelCDF()
y = self.variant(bijector.forward)(x)
x_rec = self.variant(bijector.inverse)(y)
np.testing.assert_allclose(x_rec, x, rtol=1e-3)
@chex.all_variants
def test_inverse_log_det_jacobian(self):
x = jax.random.normal(self.seed, (100,))
bijector = gumbel_cdf.GumbelCDF()
y = self.variant(bijector.forward)(x)
fwd_logdet = self.variant(bijector.forward_log_det_jacobian)(x)
inv_logdet = self.variant(bijector.inverse_log_det_jacobian)(y)
np.testing.assert_allclose(inv_logdet, -fwd_logdet, rtol=1e-3)
@chex.all_variants
def test_inverse_and_log_det(self):
y = jax.random.uniform(self.seed, (100,))
bijector = gumbel_cdf.GumbelCDF()
x1 = self.variant(bijector.inverse)(y)
logdet1 = self.variant(bijector.inverse_log_det_jacobian)(y)
x2, logdet2 = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_allclose(x1, x2, rtol=RTOL)
np.testing.assert_allclose(logdet1, logdet2, rtol=RTOL)
@chex.all_variants
def test_stability(self):
bijector = gumbel_cdf.GumbelCDF()
tfp_bijector = tfb.GumbelCDF()
x = np.array([-10.0, -3.3, 0.0, 3.3, 10.0], dtype=np.float32)
fldj = tfp_bijector.forward_log_det_jacobian(x, event_ndims=0)
fldj_ = self.variant(bijector.forward_log_det_jacobian)(x)
np.testing.assert_allclose(fldj_, fldj, rtol=RTOL)
y = bijector.forward(x) # pytype: disable=wrong-arg-types # jax-ndarray
ildj = tfp_bijector.inverse_log_det_jacobian(y, event_ndims=0)
ildj_ = self.variant(bijector.inverse_log_det_jacobian)(y)
np.testing.assert_allclose(ildj_, ildj, rtol=RTOL)
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bijector = gumbel_cdf.GumbelCDF()
x = np.zeros(())
f(x, bijector)
def test_same_as(self):
bijector = gumbel_cdf.GumbelCDF()
self.assertTrue(bijector.same_as(bijector))
self.assertTrue(bijector.same_as(gumbel_cdf.GumbelCDF()))
self.assertFalse(bijector.same_as(tanh.Tanh()))
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/gumbel_cdf_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tanh bijector."""
from typing import Tuple
from distrax._src.bijectors import bijector as base
import jax
import jax.numpy as jnp
Array = base.Array
class Tanh(base.Bijector):
"""A bijector that computes the hyperbolic tangent.
The log-determinant implementation in this bijector is more numerically stable
than relying on the automatic differentiation approach used by Lambda, so this
bijector should be preferred over Lambda(jnp.tanh) where possible. See
`tfp.bijectors.Tanh` for details.
When the absolute value of the input is large, `Tanh` becomes close to a
constant, so that it is not possible to recover the input `x` from the output
`y` within machine precision. In cases where it is needed to compute both the
forward mapping and the backward mapping one after the other to recover the
original input `x`, it is the user's responsibility to simplify the operation
to avoid numerical issues; this is unlike the `tfp.bijectors.Tanh`. One
example of such case is to use the bijector within a `Transformed`
distribution and to obtain the log-probability of samples obtained from the
distribution's `sample` method. For values of the samples for which it is not
possible to apply the inverse bijector accurately, `log_prob` returns NaN.
This can be avoided by using `sample_and_log_prob` instead of `sample`
followed by `log_prob`.
"""
def __init__(self):
"""Initializes a Tanh bijector."""
super().__init__(event_ndims_in=0)
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
return 2 * (jnp.log(2) - x - jax.nn.softplus(-2 * x))
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return jnp.tanh(x), self.forward_log_det_jacobian(x)
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
x = jnp.arctanh(y)
return x, -self.forward_log_det_jacobian(x)
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
return type(other) is Tanh # pylint: disable=unidiomatic-typecheck
| distrax-master | distrax/_src/bijectors/tanh.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bijector abstract base class."""
import abc
import typing
from typing import Callable, Optional, Tuple, Union
import chex
from distrax._src.utils import jittable
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfb = tfp.bijectors
Array = chex.Array
class Bijector(jittable.Jittable, metaclass=abc.ABCMeta):
"""Differentiable bijection that knows to compute its Jacobian determinant.
A bijector implements a differentiable and bijective transformation `f`, whose
inverse is also differentiable (`f` is called a "diffeomorphism"). A bijector
can be used to transform a continuous random variable `X` to a continuous
random variable `Y = f(X)` in the context of `TransformedDistribution`.
Typically, a bijector subclass will implement the following methods:
- `forward_and_log_det(x)` (required)
- `inverse_and_log_det(y)` (optional)
The remaining methods are defined in terms of the above by default.
Subclass requirements:
- Subclasses must ensure that `f` is differentiable and bijective, and that
their methods correctly implement `f^{-1}`, `J(f)` and `J(f^{-1})`. Distrax
will assume these properties hold, and will make no attempt to verify them.
- Distrax assumes that `f` acts on array-valued variables called "events", and
that the bijector operates on batched events. Specifically, Distrax assumes
the following:
* `f` acts on events of shape [M1, ..., Mn] and returns events of shape
[L1, ..., Lq]. `n` is referred to as `event_ndims_in`, and `q` as
`event_ndims_out`. `event_ndims_in` and `event_ndims_out` must be static
properties of the bijector, and must be known to it at construction time.
* The bijector acts on batched events of shape [N1, ..., Nk, M1, ..., Mn],
where [N1, ..., Nk] are batch dimensions, and returns batched events of
shape [K1, ..., Kp, L1, ..., Lq], where [K1, ..., Kp] are (possibly
different) batch dimensions. Distrax requires that bijectors always
broadcast against batched events, that is, that they apply `f` identically
to each event. Distrax also allows for events to broadcast against batched
bijectors, meaning that multiple instantiations of `f` are applied to the
same event, although this is not a subclass requirement.
"""
def __init__(self,
event_ndims_in: int,
event_ndims_out: Optional[int] = None,
is_constant_jacobian: bool = False,
is_constant_log_det: Optional[bool] = None):
"""Initializes a Bijector.
Args:
event_ndims_in: Number of input event dimensions. The bijector acts on
events of shape [M1, ..., Mn], where `n == event_ndims_in`.
event_ndims_out: Number of output event dimensions. The bijector returns
events of shape [L1, ..., Lq], where `q == event_ndims_out`. If None, it
defaults to `event_ndims_in`.
is_constant_jacobian: Whether the Jacobian is promised to be constant
(which is the case if and only if the bijector is affine). A value of
False will be interpreted as "we don't know whether the Jacobian is
constant", rather than "the Jacobian is definitely not constant". Only
set to True if you're absolutely sure the Jacobian is constant; if
you're not sure, set to False.
is_constant_log_det: Whether the Jacobian determinant is promised to be
constant (which is the case for, e.g., volume-preserving bijectors). If
None, it defaults to `is_constant_jacobian`. Note that the Jacobian
determinant can be constant without the Jacobian itself being constant.
Only set to True if you're absoltely sure the Jacobian determinant is
constant; if you're not sure, set to None.
"""
if event_ndims_out is None:
event_ndims_out = event_ndims_in
if event_ndims_in < 0:
raise ValueError(
f"`event_ndims_in` can't be negative. Got {event_ndims_in}.")
if event_ndims_out < 0:
raise ValueError(
f"`event_ndims_out` can't be negative. Got {event_ndims_out}.")
if is_constant_log_det is None:
is_constant_log_det = is_constant_jacobian
if is_constant_jacobian and not is_constant_log_det:
raise ValueError("The Jacobian is said to be constant, but its "
"determinant is said not to be, which is impossible.")
self._event_ndims_in = event_ndims_in
self._event_ndims_out = event_ndims_out
self._is_constant_jacobian = is_constant_jacobian
self._is_constant_log_det = is_constant_log_det
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
y, _ = self.forward_and_log_det(x)
return y
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
x, _ = self.inverse_and_log_det(y)
return x
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
_, logdet = self.forward_and_log_det(x)
return logdet
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
_, logdet = self.inverse_and_log_det(y)
return logdet
@abc.abstractmethod
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
raise NotImplementedError(
f"Bijector {self.name} does not implement `inverse_and_log_det`.")
@property
def event_ndims_in(self) -> int:
"""Number of input event dimensions."""
return self._event_ndims_in
@property
def event_ndims_out(self) -> int:
"""Number of output event dimensions."""
return self._event_ndims_out
@property
def is_constant_jacobian(self) -> bool:
"""Whether the Jacobian is promised to be constant."""
return self._is_constant_jacobian
@property
def is_constant_log_det(self) -> bool:
"""Whether the Jacobian determinant is promised to be constant."""
return self._is_constant_log_det
@property
def name(self) -> str:
"""Name of the bijector."""
return self.__class__.__name__
def same_as(self, other: "Bijector") -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
del other
return False
def _check_forward_input_shape(self, x: Array) -> None:
"""Checks that the input `x` to a forward method has valid shape."""
x_ndims = len(jnp.shape(x))
if x_ndims < self.event_ndims_in:
raise ValueError(
f"Bijector {self.name} has `event_ndims_in=={self.event_ndims_in}`,"
f" but the input has only {x_ndims} array dimensions.")
def _check_inverse_input_shape(self, y: Array) -> None:
"""Checks that the input `y` to an inverse method has valid shape."""
y_ndims = len(jnp.shape(y))
if y_ndims < self.event_ndims_out:
raise ValueError(
f"Bijector {self.name} has `event_ndims_out=={self.event_ndims_out}`,"
f" but the input has only {y_ndims} array dimensions.")
BijectorLike = Union[Bijector, tfb.Bijector, Callable[[Array], Array]]
BijectorT = typing.TypeVar("BijectorT", bound=Bijector)
| distrax-master | distrax/_src/bijectors/bijector.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `unconstrained_affine.py`."""
import copy
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors.tanh import Tanh
from distrax._src.bijectors.unconstrained_affine import UnconstrainedAffine
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class UnconstrainedAffineTest(parameterized.TestCase):
def test_properties(self):
bijector = UnconstrainedAffine(matrix=jnp.eye(4), bias=jnp.zeros((4,)))
self.assertTrue(bijector.is_constant_jacobian)
self.assertTrue(bijector.is_constant_log_det)
np.testing.assert_allclose(bijector.matrix, np.eye(4))
np.testing.assert_allclose(bijector.bias, np.zeros((4,)))
@parameterized.named_parameters(
('matrix is 0d', {'matrix': np.zeros(()), 'bias': np.zeros((4,))}),
('matrix is 1d', {'matrix': np.zeros((4,)), 'bias': np.zeros((4,))}),
('bias is 0d', {'matrix': np.zeros((4, 4)), 'bias': np.zeros(())}),
('matrix is not square',
{'matrix': np.zeros((3, 4)), 'bias': np.zeros((4,))}),
('matrix and bias shapes do not agree',
{'matrix': np.zeros((4, 4)), 'bias': np.zeros((3,))}),
)
def test_invalid_properties(self, bij_params):
with self.assertRaises(ValueError):
UnconstrainedAffine(**bij_params)
@chex.all_variants
@parameterized.parameters(
((5,), (5,), (5,)),
((5,), (5,), ()),
((5,), (), (5,)),
((), (5,), (5,)),
((), (), (5,)),
((), (5,), ()),
((5,), (), ()),
)
def test_batched_parameters(self, matrix_batch_shape, bias_batch_shape,
input_batch_shape):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
matrix = jax.random.uniform(
next(prng), matrix_batch_shape + (4, 4)) + jnp.eye(4)
bias = jax.random.normal(next(prng), bias_batch_shape + (4,))
bijector = UnconstrainedAffine(matrix, bias)
x = jax.random.normal(next(prng), input_batch_shape + (4,))
y, logdet_fwd = self.variant(bijector.forward_and_log_det)(x)
z, logdet_inv = self.variant(bijector.inverse_and_log_det)(x)
output_batch_shape = jnp.broadcast_arrays(
matrix[..., 0, 0], bias[..., 0], x[..., 0])[0].shape
self.assertEqual(y.shape, output_batch_shape + (4,))
self.assertEqual(z.shape, output_batch_shape + (4,))
self.assertEqual(logdet_fwd.shape, output_batch_shape)
self.assertEqual(logdet_inv.shape, output_batch_shape)
matrix = jnp.broadcast_to(
matrix, output_batch_shape + (4, 4)).reshape((-1, 4, 4))
bias = jnp.broadcast_to(bias, output_batch_shape + (4,)).reshape((-1, 4))
x = jnp.broadcast_to(x, output_batch_shape + (4,)).reshape((-1, 4))
y = y.reshape((-1, 4))
z = z.reshape((-1, 4))
logdet_fwd = logdet_fwd.flatten()
logdet_inv = logdet_inv.flatten()
for i in range(np.prod(output_batch_shape)):
bijector = UnconstrainedAffine(matrix[i], bias[i])
this_y, this_logdet_fwd = self.variant(bijector.forward_and_log_det)(x[i])
this_z, this_logdet_inv = self.variant(bijector.inverse_and_log_det)(x[i])
np.testing.assert_allclose(this_y, y[i], atol=6e-3)
np.testing.assert_allclose(this_z, z[i], atol=7e-6)
np.testing.assert_allclose(this_logdet_fwd, logdet_fwd[i], atol=1e-7)
np.testing.assert_allclose(this_logdet_inv, logdet_inv[i], atol=7e-6)
@chex.all_variants
@parameterized.parameters(
{'batch_shape': (), 'param_shape': ()},
{'batch_shape': (3,), 'param_shape': (3,)},
{'batch_shape': (2, 3), 'param_shape': (3,)},
)
def test_identity_initialization(self, batch_shape, param_shape):
bijector = UnconstrainedAffine(
matrix=jnp.eye(4),
bias=jnp.zeros(param_shape + (4,)))
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
x = jax.random.normal(next(prng), batch_shape + (4,))
# Forward methods.
y, logdet = self.variant(bijector.forward_and_log_det)(x)
np.testing.assert_allclose(y, x, atol=8e-3)
np.testing.assert_array_equal(logdet, jnp.zeros(batch_shape))
# Inverse methods.
x_rec, logdet = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_array_equal(x_rec, y)
np.testing.assert_array_equal(logdet, jnp.zeros(batch_shape))
@chex.all_variants
@parameterized.parameters(
{'batch_shape': (), 'param_shape': ()},
{'batch_shape': (3,), 'param_shape': (3,)},
{'batch_shape': (2, 3), 'param_shape': (3,)},
)
def test_inverse_methods(self, batch_shape, param_shape):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
matrix = jax.random.uniform(next(prng), param_shape + (4, 4)) + jnp.eye(4)
bias = jax.random.normal(next(prng), param_shape + (4,))
bijector = UnconstrainedAffine(matrix, bias)
x = jax.random.normal(next(prng), batch_shape + (4,))
y, logdet_fwd = self.variant(bijector.forward_and_log_det)(x)
x_rec, logdet_inv = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_allclose(x_rec, x, atol=8e-3)
np.testing.assert_array_equal(logdet_fwd, -logdet_inv)
@chex.all_variants
def test_forward_jacobian_det(self):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
matrix = jax.random.uniform(next(prng), (4, 4)) + jnp.eye(4)
bias = jax.random.normal(next(prng), (4,))
bijector = UnconstrainedAffine(matrix, bias)
batched_x = jax.random.normal(next(prng), (10, 4))
single_x = jax.random.normal(next(prng), (4,))
batched_logdet = self.variant(bijector.forward_log_det_jacobian)(batched_x)
jacobian_fn = jax.jacfwd(bijector.forward)
logdet_numerical = jnp.linalg.slogdet(jacobian_fn(single_x))[1]
for logdet in batched_logdet:
np.testing.assert_allclose(logdet, logdet_numerical, atol=7e-3)
@chex.all_variants
def test_inverse_jacobian_det(self):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
matrix = jax.random.uniform(next(prng), (4, 4)) + jnp.eye(4)
bias = jax.random.normal(next(prng), (4,))
bijector = UnconstrainedAffine(matrix, bias)
batched_y = jax.random.normal(next(prng), (10, 4))
single_y = jax.random.normal(next(prng), (4,))
batched_logdet = self.variant(bijector.inverse_log_det_jacobian)(batched_y)
jacobian_fn = jax.jacfwd(bijector.inverse)
logdet_numerical = jnp.linalg.slogdet(jacobian_fn(single_y))[1]
for logdet in batched_logdet:
np.testing.assert_allclose(logdet, logdet_numerical, atol=1e-4)
def test_raises_on_invalid_input_shape(self):
bij = UnconstrainedAffine(matrix=jnp.eye(4), bias=jnp.zeros((4,)))
for fn in [bij.forward, bij.inverse,
bij.forward_log_det_jacobian, bij.inverse_log_det_jacobian,
bij.forward_and_log_det, bij.inverse_and_log_det]:
with self.assertRaises(ValueError):
fn(jnp.array(0))
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bijector = UnconstrainedAffine(matrix=jnp.eye(4), bias=jnp.zeros((4,)))
x = np.zeros((4,))
f(x, bijector)
def test_same_as_itself(self):
bij = UnconstrainedAffine(matrix=jnp.eye(4), bias=jnp.zeros((4,)))
self.assertTrue(bij.same_as(bij))
self.assertTrue(bij.same_as(copy.copy(bij)))
def test_not_same_as_others(self):
bij = UnconstrainedAffine(matrix=jnp.eye(4), bias=jnp.zeros((4,)))
other = UnconstrainedAffine(matrix=jnp.eye(4), bias=jnp.ones((4,)))
self.assertFalse(bij.same_as(other))
self.assertFalse(bij.same_as(Tanh()))
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/unconstrained_affine_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `lower_upper_triangular_affine.py`."""
import copy
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors.lower_upper_triangular_affine import LowerUpperTriangularAffine
from distrax._src.bijectors.tanh import Tanh
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class LowerUpperTriangularAffineTest(parameterized.TestCase):
def test_jacobian_is_constant_property(self):
bijector = LowerUpperTriangularAffine(
matrix=jnp.eye(4), bias=jnp.zeros((4,)))
self.assertTrue(bijector.is_constant_jacobian)
self.assertTrue(bijector.is_constant_log_det)
def test_properties(self):
bijector = LowerUpperTriangularAffine(
matrix=jnp.array([[2., 3.], [4., 5.]]),
bias=jnp.ones((2,)))
lower = np.array([[1., 0.], [4., 1.]])
upper = np.array([[2., 3.], [0., 5.]])
np.testing.assert_allclose(bijector.lower, lower, atol=1e-6)
np.testing.assert_allclose(bijector.upper, upper, atol=1e-6)
np.testing.assert_allclose(bijector.matrix, lower @ upper, atol=1e-6)
np.testing.assert_allclose(bijector.bias, np.ones((2,)), atol=1e-6)
@parameterized.named_parameters(
('matrix is 0d', {'matrix': np.zeros(()), 'bias': np.zeros((4,))}),
('matrix is 1d', {'matrix': np.zeros((4,)), 'bias': np.zeros((4,))}),
('bias is 0d', {'matrix': np.zeros((4, 4)), 'bias': np.zeros(())}),
('matrix is not square',
{'matrix': np.zeros((3, 4)), 'bias': np.zeros((4,))}),
('matrix and bias shapes do not agree',
{'matrix': np.zeros((4, 4)), 'bias': np.zeros((3,))}),
)
def test_raises_with_invalid_parameters(self, bij_params):
with self.assertRaises(ValueError):
LowerUpperTriangularAffine(**bij_params)
@chex.all_variants
@parameterized.parameters(
((5,), (5,), (5,)),
((5,), (5,), ()),
((5,), (), (5,)),
((), (5,), (5,)),
((), (), (5,)),
((), (5,), ()),
((5,), (), ()),
)
def test_batched_parameters(self, matrix_batch_shape, bias_batch_shape,
input_batch_shape):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
matrix = jax.random.uniform(
next(prng), matrix_batch_shape + (4, 4)) + jnp.eye(4)
bias = jax.random.normal(next(prng), bias_batch_shape + (4,))
bijector = LowerUpperTriangularAffine(matrix, bias)
x = jax.random.normal(next(prng), input_batch_shape + (4,))
y, logdet_fwd = self.variant(bijector.forward_and_log_det)(x)
z, logdet_inv = self.variant(bijector.inverse_and_log_det)(x)
output_batch_shape = jnp.broadcast_arrays(
matrix[..., 0, 0], bias[..., 0], x[..., 0])[0].shape
self.assertEqual(y.shape, output_batch_shape + (4,))
self.assertEqual(z.shape, output_batch_shape + (4,))
self.assertEqual(logdet_fwd.shape, output_batch_shape)
self.assertEqual(logdet_inv.shape, output_batch_shape)
matrix = jnp.broadcast_to(
matrix, output_batch_shape + (4, 4)).reshape((-1, 4, 4))
bias = jnp.broadcast_to(bias, output_batch_shape + (4,)).reshape((-1, 4))
x = jnp.broadcast_to(x, output_batch_shape + (4,)).reshape((-1, 4))
y = y.reshape((-1, 4))
z = z.reshape((-1, 4))
logdet_fwd = logdet_fwd.flatten()
logdet_inv = logdet_inv.flatten()
for i in range(np.prod(output_batch_shape)):
bijector = LowerUpperTriangularAffine(matrix[i], bias[i])
this_y, this_logdet_fwd = self.variant(bijector.forward_and_log_det)(x[i])
this_z, this_logdet_inv = self.variant(bijector.inverse_and_log_det)(x[i])
np.testing.assert_allclose(this_y, y[i], atol=9e-3)
np.testing.assert_allclose(this_z, z[i], atol=7e-6)
np.testing.assert_allclose(this_logdet_fwd, logdet_fwd[i], atol=1e-7)
np.testing.assert_allclose(this_logdet_inv, logdet_inv[i], atol=7e-6)
@chex.all_variants
@parameterized.parameters(
{'batch_shape': (), 'param_shape': ()},
{'batch_shape': (3,), 'param_shape': (3,)},
{'batch_shape': (2, 3), 'param_shape': (3,)},
)
def test_identity_initialization(self, batch_shape, param_shape):
bijector = LowerUpperTriangularAffine(
matrix=jnp.eye(4),
bias=jnp.zeros(param_shape + (4,)))
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
x = jax.random.normal(next(prng), batch_shape + (4,))
# Forward methods.
y, logdet = self.variant(bijector.forward_and_log_det)(x)
np.testing.assert_allclose(y, x, 8e-3)
np.testing.assert_array_equal(logdet, jnp.zeros(batch_shape))
# Inverse methods.
x_rec, logdet = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_array_equal(x_rec, y)
np.testing.assert_array_equal(logdet, jnp.zeros(batch_shape))
@chex.all_variants
@parameterized.parameters(
{'batch_shape': (), 'param_shape': ()},
{'batch_shape': (3,), 'param_shape': (3,)},
{'batch_shape': (2, 3), 'param_shape': (3,)}
)
def test_inverse_methods(self, batch_shape, param_shape):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
matrix = jax.random.uniform(next(prng), param_shape + (4, 4)) + jnp.eye(4)
bias = jax.random.normal(next(prng), param_shape + (4,))
bijector = LowerUpperTriangularAffine(matrix, bias)
x = jax.random.normal(next(prng), batch_shape + (4,))
y, logdet_fwd = self.variant(bijector.forward_and_log_det)(x)
x_rec, logdet_inv = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_allclose(x_rec, x, atol=9e-3)
np.testing.assert_array_equal(logdet_fwd, -logdet_inv)
@chex.all_variants
def test_forward_jacobian_det(self):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
matrix = jax.random.uniform(next(prng), (4, 4)) + jnp.eye(4)
bias = jax.random.normal(next(prng), (4,))
bijector = LowerUpperTriangularAffine(matrix, bias)
batched_x = jax.random.normal(next(prng), (10, 4))
single_x = jax.random.normal(next(prng), (4,))
batched_logdet = self.variant(bijector.forward_log_det_jacobian)(batched_x)
jacobian_fn = jax.jacfwd(bijector.forward)
logdet_numerical = jnp.linalg.slogdet(jacobian_fn(single_x))[1]
for logdet in batched_logdet:
np.testing.assert_allclose(logdet, logdet_numerical, atol=5e-3)
@chex.all_variants
def test_inverse_jacobian_det(self):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
matrix = jax.random.uniform(next(prng), (4, 4)) + jnp.eye(4)
bias = jax.random.normal(next(prng), (4,))
bijector = LowerUpperTriangularAffine(matrix, bias)
batched_y = jax.random.normal(next(prng), (10, 4))
single_y = jax.random.normal(next(prng), (4,))
batched_logdet = self.variant(bijector.inverse_log_det_jacobian)(batched_y)
jacobian_fn = jax.jacfwd(bijector.inverse)
logdet_numerical = jnp.linalg.slogdet(jacobian_fn(single_y))[1]
for logdet in batched_logdet:
np.testing.assert_allclose(logdet, logdet_numerical, atol=5e-5)
def test_raises_on_invalid_input_shape(self):
bij = LowerUpperTriangularAffine(matrix=jnp.eye(4), bias=jnp.zeros((4,)))
for fn in [bij.forward, bij.inverse,
bij.forward_log_det_jacobian, bij.inverse_log_det_jacobian,
bij.forward_and_log_det, bij.inverse_and_log_det]:
with self.assertRaises(ValueError):
fn(jnp.array(0))
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bijector = LowerUpperTriangularAffine(
matrix=jnp.eye(4),
bias=jnp.zeros((4,)))
x = np.zeros((4,))
f(x, bijector)
def test_same_as_itself(self):
bij = LowerUpperTriangularAffine(matrix=jnp.eye(4), bias=jnp.zeros((4,)))
self.assertTrue(bij.same_as(bij))
self.assertTrue(bij.same_as(copy.copy(bij)))
def test_not_same_as_others(self):
bij = LowerUpperTriangularAffine(matrix=jnp.eye(4), bias=jnp.zeros((4,)))
other = LowerUpperTriangularAffine(matrix=jnp.eye(4), bias=jnp.ones((4,)))
self.assertFalse(bij.same_as(other))
self.assertFalse(bij.same_as(Tanh()))
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/lower_upper_triangular_affine_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distrax bijector for automatically turning JAX functions into Bijectors."""
from typing import Callable, Optional, Tuple
from distrax._src.bijectors import bijector as base
from distrax._src.utils import transformations
Array = base.Array
class Lambda(base.Bijector):
"""Wrapper to automatically turn JAX functions into fully fledged bijectors.
This class takes in JAX functions that implement bijector methods (such as
`forward`, `inverse`, `forward_log_det_jacobian`, etc.), and constructs a
bijector out of them. Any functions not explicitly specified by the user will
be automatically derived from the existing functions where possible, by
tracing their JAXPR representation. Missing functions will be derived on
demand: if a missing function is not used, it will not be derived. At a
minimum, either `forward` or `inverse` must be given; all other methods will
be derived (where possible).
The Lambda bijector can be useful for creating simple one-line bijectors that
would otherwise be tedious to define. Examples of scalar bijectors that can be
easily constructed with Lambda are:
- Identity: `Lambda(lambda x: x)`
- Affine: `Lambda(lambda x: a*x + b)`
- Tanh: `Lambda(jnp.tanh)`
- Composite: `Lambda(lambda x: jnp.tanh(a*x + b))`
Requirements and limitations:
- Only functions composed entirely of invertible primitives can be
automatically inverted (see `bijection_utils.py` for a list of invertible
primitives). If the inverse is needed but is not automatically derivable,
the user must provide it explicitly.
- If log-determinant functions are not provided, Lambda will assume that
`forward` and `inverse` are scalar functions applied elementwise. If the
bijector is not meant to be scalar, its log-determinant functions must be
provided explicitly by the user.
"""
def __init__(
self,
forward: Optional[Callable[[Array], Array]] = None,
inverse: Optional[Callable[[Array], Array]] = None,
forward_log_det_jacobian: Optional[Callable[[Array], Array]] = None,
inverse_log_det_jacobian: Optional[Callable[[Array], Array]] = None,
event_ndims_in: Optional[int] = None,
event_ndims_out: Optional[int] = None,
is_constant_jacobian: Optional[bool] = None):
"""Initializes a Lambda bijector with methods specified as args."""
if forward is None and inverse is None:
raise ValueError("The Lambda bijector requires at least one of `forward` "
"or `inverse` to be specified, but neither is.")
jac_functions_specified = (forward_log_det_jacobian is not None
or inverse_log_det_jacobian is not None)
if jac_functions_specified:
if event_ndims_in is None:
raise ValueError("When log det Jacobian functions are specified, you "
"must also specify `event_ndims_in`.")
else:
if event_ndims_in is not None or event_ndims_out is not None:
raise ValueError("When log det Jacobian functions are unspecified, you "
"must leave `event_ndims_in` and `event_ndims_out` "
"unspecified; they will default to 0.")
event_ndims_in = 0
if is_constant_jacobian is None:
fn = inverse if forward is None else forward
is_constant_jacobian = transformations.is_constant_jacobian(fn)
super().__init__(
event_ndims_in=event_ndims_in,
event_ndims_out=event_ndims_out,
is_constant_jacobian=is_constant_jacobian)
self._forward = forward
self._inverse = inverse
self._forward_log_det_jacobian = forward_log_det_jacobian
self._inverse_log_det_jacobian = inverse_log_det_jacobian
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
self._check_forward_input_shape(x)
if self._forward is None:
self._forward = transformations.inv(self._inverse)
return self._forward(x)
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
self._check_inverse_input_shape(y)
if self._inverse is None:
self._inverse = transformations.inv(self._forward)
return self._inverse(y)
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
self._check_forward_input_shape(x)
if self._forward_log_det_jacobian is None:
self._forward_log_det_jacobian = transformations.log_det_scalar(
self.forward)
return self._forward_log_det_jacobian(x)
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
self._check_inverse_input_shape(y)
if self._inverse_log_det_jacobian is None:
self._inverse_log_det_jacobian = transformations.log_det_scalar(
self.inverse)
return self._inverse_log_det_jacobian(y)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return self.forward(x), self.forward_log_det_jacobian(x)
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
return self.inverse(y), self.inverse_log_det_jacobian(y)
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is Lambda: # pylint: disable=unidiomatic-typecheck
return all((
self.forward is other.forward,
self.inverse is other.inverse,
self.forward_log_det_jacobian is other.forward_log_det_jacobian,
self.inverse_log_det_jacobian is other.inverse_log_det_jacobian,
self.forward_and_log_det is other.forward_and_log_det,
self.inverse_and_log_det is other.inverse_and_log_det,
))
return False
| distrax-master | distrax/_src/bijectors/lambda_bijector.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Chain Bijector for composing a sequence of Bijector transformations."""
from typing import List, Sequence, Tuple
from distrax._src.bijectors import bijector as base
from distrax._src.utils import conversion
Array = base.Array
BijectorLike = base.BijectorLike
BijectorT = base.BijectorT
class Chain(base.Bijector):
"""Composition of a sequence of bijectors into a single bijector.
Bijectors are composable: if `f` and `g` are bijectors, then `g o f` is also
a bijector. Given a sequence of bijectors `[f1, ..., fN]`, this class
implements the bijector defined by `fN o ... o f1`.
NOTE: the bijectors are applied in reverse order from the order they appear in
the sequence. For example, consider the following code where `f` and `g` are
two bijectors:
```
layers = []
layers.append(f)
layers.append(g)
bijector = distrax.Chain(layers)
y = bijector.forward(x)
```
The above code will transform `x` by first applying `g`, then `f`, so that
`y = f(g(x))`.
"""
def __init__(self, bijectors: Sequence[BijectorLike]):
"""Initializes a Chain bijector.
Args:
bijectors: a sequence of bijectors to be composed into one. Each bijector
can be a distrax bijector, a TFP bijector, or a callable to be wrapped
by `Lambda`. The sequence must contain at least one bijector.
"""
if not bijectors:
raise ValueError("The sequence of bijectors cannot be empty.")
self._bijectors = [conversion.as_bijector(b) for b in bijectors]
# Check that neighboring bijectors in the chain have compatible dimensions
for i, (outer, inner) in enumerate(zip(self._bijectors[:-1],
self._bijectors[1:])):
if outer.event_ndims_in != inner.event_ndims_out:
raise ValueError(
f"The chain of bijector event shapes are incompatible. Bijector "
f"{i} ({outer.name}) expects events with {outer.event_ndims_in} "
f"dimensions, while Bijector {i+1} ({inner.name}) produces events "
f"with {inner.event_ndims_out} dimensions.")
is_constant_jacobian = all(b.is_constant_jacobian for b in self._bijectors)
is_constant_log_det = all(b.is_constant_log_det for b in self._bijectors)
super().__init__(
event_ndims_in=self._bijectors[-1].event_ndims_in,
event_ndims_out=self._bijectors[0].event_ndims_out,
is_constant_jacobian=is_constant_jacobian,
is_constant_log_det=is_constant_log_det)
@property
def bijectors(self) -> List[BijectorT]:
"""The list of bijectors in the chain."""
return self._bijectors
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
for bijector in reversed(self._bijectors):
x = bijector.forward(x)
return x
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
for bijector in self._bijectors:
y = bijector.inverse(y)
return y
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
x, log_det = self._bijectors[-1].forward_and_log_det(x)
for bijector in reversed(self._bijectors[:-1]):
x, ld = bijector.forward_and_log_det(x)
log_det += ld
return x, log_det
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
y, log_det = self._bijectors[0].inverse_and_log_det(y)
for bijector in self._bijectors[1:]:
y, ld = bijector.inverse_and_log_det(y)
log_det += ld
return y, log_det
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is Chain: # pylint: disable=unidiomatic-typecheck
if len(self.bijectors) != len(other.bijectors):
return False
for bij1, bij2 in zip(self.bijectors, other.bijectors):
if not bij1.same_as(bij2):
return False
return True
elif len(self.bijectors) == 1:
return self.bijectors[0].same_as(other)
return False
| distrax-master | distrax/_src/bijectors/chain.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `bijector_from_tfp.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import bijector_from_tfp
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfb = tfp.bijectors
class BijectorFromTFPTest(parameterized.TestCase):
def setUp(self):
super().setUp()
bjs = {}
bjs['BatchedChain'] = tfb.Chain([
tfb.Shift(jnp.zeros((4, 2, 3))),
tfb.ScaleMatvecDiag([[1., 2., 3.], [4., 5., 6.]])
])
bjs['Square'] = tfb.Square()
bjs['ScaleScalar'] = tfb.Scale(2.)
bjs['ScaleMatrix'] = tfb.Scale(2. * jnp.ones((3, 2)))
bjs['Reshape'] = tfb.Reshape((2, 3), (6,))
# To parallelize pytest runs.
# See https://github.com/pytest-dev/pytest-xdist/issues/432.
for name, bij in bjs.items():
bij.__repr__ = lambda _, name_=name: name_
self._test_bijectors = bjs
@chex.all_variants
@parameterized.parameters(
('Square', (), (), (), ()),
('Square', (2, 3), (), (2, 3), ()),
('ScaleScalar', (), (), (), ()),
('ScaleScalar', (2, 3), (), (2, 3), ()),
('ScaleMatrix', (), (), (3, 2), ()),
('ScaleMatrix', (2,), (), (3, 2), ()),
('ScaleMatrix', (1, 1), (), (3, 2), ()),
('ScaleMatrix', (4, 1, 1), (), (4, 3, 2), ()),
('ScaleMatrix', (4, 3, 2), (), (4, 3, 2), ()),
('Reshape', (), (6,), (), (2, 3)),
('Reshape', (10,), (6,), (10,), (2, 3)),
('BatchedChain', (), (3,), (4, 2), (3,)),
('BatchedChain', (2,), (3,), (4, 2), (3,)),
('BatchedChain', (4, 1), (3,), (4, 2), (3,)),
('BatchedChain', (5, 1, 2), (3,), (5, 4, 2), (3,)),
)
def test_forward_methods_are_correct(self, tfp_bij_name, batch_shape_in,
event_shape_in, batch_shape_out,
event_shape_out):
tfp_bij = self._test_bijectors[tfp_bij_name]
bij = bijector_from_tfp.BijectorFromTFP(tfp_bij)
key = jax.random.PRNGKey(42)
x = jax.random.uniform(key, batch_shape_in + event_shape_in)
y = self.variant(bij.forward)(x)
logdet = self.variant(bij.forward_log_det_jacobian)(x)
y_tfp = tfp_bij.forward(x)
logdet_tfp = tfp_bij.forward_log_det_jacobian(x, len(event_shape_in))
logdet_tfp = jnp.broadcast_to(logdet_tfp, batch_shape_out)
self.assertEqual(y.shape, batch_shape_out + event_shape_out)
self.assertEqual(logdet.shape, batch_shape_out)
np.testing.assert_allclose(y, y_tfp, atol=1e-8)
np.testing.assert_allclose(logdet, logdet_tfp, atol=1e-4)
@chex.all_variants
@parameterized.parameters(
('Square', (), (), (), ()),
('Square', (2, 3), (), (2, 3), ()),
('ScaleScalar', (), (), (), ()),
('ScaleScalar', (2, 3), (), (2, 3), ()),
('ScaleMatrix', (3, 2), (), (), ()),
('ScaleMatrix', (3, 2), (), (2,), ()),
('ScaleMatrix', (3, 2), (), (1, 1), ()),
('ScaleMatrix', (4, 3, 2), (), (4, 1, 1), ()),
('ScaleMatrix', (4, 3, 2), (), (4, 3, 2), ()),
('Reshape', (), (6,), (), (2, 3)),
('Reshape', (10,), (6,), (10,), (2, 3)),
('BatchedChain', (4, 2), (3,), (), (3,)),
('BatchedChain', (4, 2), (3,), (2,), (3,)),
('BatchedChain', (4, 2), (3,), (4, 1), (3,)),
('BatchedChain', (5, 4, 2), (3,), (5, 1, 2), (3,)),
)
def test_inverse_methods_are_correct(self, tfp_bij_name, batch_shape_in,
event_shape_in, batch_shape_out,
event_shape_out):
tfp_bij = self._test_bijectors[tfp_bij_name]
bij = bijector_from_tfp.BijectorFromTFP(tfp_bij)
key = jax.random.PRNGKey(42)
y = jax.random.uniform(key, batch_shape_out + event_shape_out)
x = self.variant(bij.inverse)(y)
logdet = self.variant(bij.inverse_log_det_jacobian)(y)
x_tfp = tfp_bij.inverse(y)
logdet_tfp = tfp_bij.inverse_log_det_jacobian(y, len(event_shape_out))
logdet_tfp = jnp.broadcast_to(logdet_tfp, batch_shape_in)
self.assertEqual(x.shape, batch_shape_in + event_shape_in)
self.assertEqual(logdet.shape, batch_shape_in)
np.testing.assert_allclose(x, x_tfp, atol=1e-8)
np.testing.assert_allclose(logdet, logdet_tfp, atol=1e-4)
@chex.all_variants
@parameterized.parameters(
('Square', (), (), (), ()),
('Square', (2, 3), (), (2, 3), ()),
('ScaleScalar', (), (), (), ()),
('ScaleScalar', (2, 3), (), (2, 3), ()),
('ScaleMatrix', (), (), (), ()),
('ScaleMatrix', (2,), (), (2,), ()),
('ScaleMatrix', (1, 1), (), (1, 1), ()),
('ScaleMatrix', (4, 1, 1), (), (4, 1, 1), ()),
('ScaleMatrix', (4, 3, 2), (), (4, 3, 2), ()),
('Reshape', (), (6,), (), (2, 3)),
('Reshape', (10,), (6,), (10,), (2, 3)),
('BatchedChain', (), (3,), (), (3,)),
('BatchedChain', (2,), (3,), (2,), (3,)),
('BatchedChain', (4, 1), (3,), (4, 1), (3,)),
('BatchedChain', (5, 1, 2), (3,), (5, 1, 2), (3,)),
)
def test_composite_methods_are_consistent(self, tfp_bij_name, batch_shape_in,
event_shape_in, batch_shape_out,
event_shape_out):
key1, key2 = jax.random.split(jax.random.PRNGKey(42))
tfp_bij = self._test_bijectors[tfp_bij_name]
bij = bijector_from_tfp.BijectorFromTFP(tfp_bij)
# Forward methods.
x = jax.random.uniform(key1, batch_shape_in + event_shape_in)
y1 = self.variant(bij.forward)(x)
logdet1 = self.variant(bij.forward_log_det_jacobian)(x)
y2, logdet2 = self.variant(bij.forward_and_log_det)(x)
self.assertEqual(y1.shape, y2.shape)
self.assertEqual(logdet1.shape, logdet2.shape)
np.testing.assert_allclose(y1, y2, atol=1e-8)
np.testing.assert_allclose(logdet1, logdet2, atol=1e-8)
# Inverse methods.
y = jax.random.uniform(key2, batch_shape_out + event_shape_out)
x1 = self.variant(bij.inverse)(y)
logdet1 = self.variant(bij.inverse_log_det_jacobian)(y)
x2, logdet2 = self.variant(bij.inverse_and_log_det)(y)
self.assertEqual(x1.shape, x2.shape)
self.assertEqual(logdet1.shape, logdet2.shape)
np.testing.assert_allclose(x1, x2, atol=1e-8)
np.testing.assert_allclose(logdet1, logdet2, atol=1e-8)
@chex.all_variants
@parameterized.parameters(
('Square', (), (), (), ()),
('Square', (2, 3), (), (2, 3), ()),
('ScaleScalar', (), (), (), ()),
('ScaleScalar', (2, 3), (), (2, 3), ()),
('ScaleMatrix', (), (), (), ()),
('ScaleMatrix', (2,), (), (2,), ()),
('ScaleMatrix', (1, 1), (), (1, 1), ()),
('ScaleMatrix', (4, 1, 1), (), (4, 1, 1), ()),
('ScaleMatrix', (4, 3, 2), (), (4, 3, 2), ()),
('Reshape', (), (6,), (), (2, 3)),
('Reshape', (10,), (6,), (10,), (2, 3)),
('BatchedChain', (), (3,), (), (3,)),
('BatchedChain', (2,), (3,), (2,), (3,)),
('BatchedChain', (4, 1), (3,), (4, 1), (3,)),
('BatchedChain', (5, 1, 2), (3,), (5, 1, 2), (3,)),
)
def test_works_with_tfp_caching(self, tfp_bij_name, batch_shape_in,
event_shape_in, batch_shape_out,
event_shape_out):
tfp_bij = self._test_bijectors[tfp_bij_name]
bij = bijector_from_tfp.BijectorFromTFP(tfp_bij)
key1, key2 = jax.random.split(jax.random.PRNGKey(42))
# Forward caching.
x = jax.random.uniform(key1, batch_shape_in + event_shape_in)
y = self.variant(bij.forward)(x)
x1 = self.variant(bij.inverse)(y)
logdet1 = self.variant(bij.inverse_log_det_jacobian)(y)
x2, logdet2 = self.variant(bij.inverse_and_log_det)(y)
self.assertEqual(x1.shape, x2.shape)
self.assertEqual(logdet1.shape, logdet2.shape)
np.testing.assert_allclose(x1, x2, atol=1e-8)
np.testing.assert_allclose(logdet1, logdet2, atol=1e-8)
# Inverse caching.
y = jax.random.uniform(key2, batch_shape_out + event_shape_out)
x = self.variant(bij.inverse)(y)
y1 = self.variant(bij.forward)(x)
logdet1 = self.variant(bij.forward_log_det_jacobian)(x)
y2, logdet2 = self.variant(bij.forward_and_log_det)(x)
self.assertEqual(y1.shape, y2.shape)
self.assertEqual(logdet1.shape, logdet2.shape)
np.testing.assert_allclose(y1, y2, atol=1e-8)
np.testing.assert_allclose(logdet1, logdet2, atol=1e-8)
def test_access_properties_tfp_bijector(self):
tfp_bij = self._test_bijectors['BatchedChain']
bij = bijector_from_tfp.BijectorFromTFP(tfp_bij)
# Access the attribute `bijectors`
np.testing.assert_allclose(
bij.bijectors[0].shift, tfp_bij.bijectors[0].shift, atol=1e-8)
np.testing.assert_allclose(
bij.bijectors[1].scale.diag, tfp_bij.bijectors[1].scale.diag, atol=1e-8)
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bijector = bijector_from_tfp.BijectorFromTFP(tfb.Tanh())
x = np.zeros(())
f(x, bijector)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/bijector_from_tfp_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear bijector."""
import abc
from typing import Sequence, Tuple
from distrax._src.bijectors import bijector as base
import jax.numpy as jnp
Array = base.Array
class Linear(base.Bijector, metaclass=abc.ABCMeta):
"""Base class for linear bijectors.
This class provides a base class for bijectors defined as `f(x) = Ax`,
where `A` is a `DxD` matrix and `x` is a `D`-dimensional vector.
"""
def __init__(self,
event_dims: int,
batch_shape: Sequence[int],
dtype: jnp.dtype):
"""Initializes a `Linear` bijector.
Args:
event_dims: the dimensionality `D` of the event `x`. It is assumed that
`x` is a vector of length `event_dims`.
batch_shape: the batch shape of the bijector.
dtype: the data type of matrix `A`.
"""
super().__init__(event_ndims_in=1, is_constant_jacobian=True)
self._event_dims = event_dims
self._batch_shape = tuple(batch_shape)
self._dtype = dtype
@property
def matrix(self) -> Array:
"""The matrix `A` of the transformation.
To be optionally implemented in a subclass.
Returns:
An array of shape `batch_shape + (event_dims, event_dims)` and data type
`dtype`.
"""
raise NotImplementedError(
f"Linear bijector {self.name} does not implement `matrix`.")
@property
def event_dims(self) -> int:
"""The dimensionality `D` of the event `x`."""
return self._event_dims
@property
def batch_shape(self) -> Tuple[int, ...]:
"""The batch shape of the bijector."""
return self._batch_shape
@property
def dtype(self) -> jnp.dtype:
"""The data type of matrix `A`."""
return self._dtype
| distrax-master | distrax/_src/bijectors/linear.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Triangular linear bijector."""
import functools
from typing import Tuple
from distrax._src.bijectors import bijector as base
from distrax._src.bijectors import linear
import jax
import jax.numpy as jnp
Array = base.Array
def _triangular_logdet(matrix: Array) -> Array:
"""Computes the log absolute determinant of a triangular matrix."""
return jnp.sum(jnp.log(jnp.abs(jnp.diag(matrix))))
def _forward_unbatched(x: Array, matrix: Array) -> Array:
return matrix @ x
def _inverse_unbatched(y: Array, matrix: Array, is_lower: bool) -> Array:
return jax.scipy.linalg.solve_triangular(matrix, y, lower=is_lower)
class TriangularLinear(linear.Linear):
"""A linear bijector whose weight matrix is triangular.
The bijector is defined as `f(x) = Ax` where `A` is a DxD triangular matrix.
The Jacobian determinant can be computed in O(D) as follows:
log|det J(x)| = log|det A| = sum(log|diag(A)|)
The inverse is computed in O(D^2) by solving the triangular system `Ax = y`.
The bijector is invertible if and only if all diagonal elements of `A` are
non-zero. It is the responsibility of the user to make sure that this is the
case; the class will make no attempt to verify that the bijector is
invertible.
"""
def __init__(self, matrix: Array, is_lower: bool = True):
"""Initializes a `TriangularLinear` bijector.
Args:
matrix: a square matrix whose triangular part defines `A`. Can also be a
batch of matrices. Whether `A` is the lower or upper triangular part of
`matrix` is determined by `is_lower`.
is_lower: if True, `A` is set to the lower triangular part of `matrix`. If
False, `A` is set to the upper triangular part of `matrix`.
"""
if matrix.ndim < 2:
raise ValueError(f"`matrix` must have at least 2 dimensions, got"
f" {matrix.ndim}.")
if matrix.shape[-2] != matrix.shape[-1]:
raise ValueError(f"`matrix` must be square; instead, it has shape"
f" {matrix.shape[-2:]}.")
super().__init__(
event_dims=matrix.shape[-1],
batch_shape=matrix.shape[:-2],
dtype=matrix.dtype)
self._matrix = jnp.tril(matrix) if is_lower else jnp.triu(matrix)
self._is_lower = is_lower
triangular_logdet = jnp.vectorize(_triangular_logdet, signature="(m,m)->()")
self._logdet = triangular_logdet(self._matrix)
@property
def matrix(self) -> Array:
"""The triangular matrix `A` of the transformation."""
return self._matrix
@property
def is_lower(self) -> bool:
"""True if `A` is lower triangular, False if upper triangular."""
return self._is_lower
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
self._check_forward_input_shape(x)
batched = jnp.vectorize(_forward_unbatched, signature="(m),(m,m)->(m)")
return batched(x, self._matrix)
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
self._check_forward_input_shape(x)
batch_shape = jax.lax.broadcast_shapes(self.batch_shape, x.shape[:-1])
return jnp.broadcast_to(self._logdet, batch_shape)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return self.forward(x), self.forward_log_det_jacobian(x)
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
self._check_inverse_input_shape(y)
batched = jnp.vectorize(
functools.partial(_inverse_unbatched, is_lower=self._is_lower),
signature="(m),(m,m)->(m)")
return batched(y, self._matrix)
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
return -self.forward_log_det_jacobian(y)
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
return self.inverse(y), self.inverse_log_det_jacobian(y)
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is TriangularLinear: # pylint: disable=unidiomatic-typecheck
return all((
self.matrix is other.matrix,
self.is_lower is other.is_lower,
))
return False
| distrax-master | distrax/_src/bijectors/triangular_linear.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `linear.py`."""
from absl.testing import absltest
from absl.testing import parameterized
from distrax._src.bijectors import linear
import jax.numpy as jnp
class MockLinear(linear.Linear):
def forward_and_log_det(self, x):
raise Exception # pylint:disable=broad-exception-raised
class LinearTest(parameterized.TestCase):
@parameterized.parameters(
{'event_dims': 1, 'batch_shape': (), 'dtype': jnp.float16},
{'event_dims': 10, 'batch_shape': (2, 3), 'dtype': jnp.float32})
def test_properties(self, event_dims, batch_shape, dtype):
bij = MockLinear(event_dims, batch_shape, dtype)
self.assertEqual(bij.event_ndims_in, 1)
self.assertEqual(bij.event_ndims_out, 1)
self.assertTrue(bij.is_constant_jacobian)
self.assertTrue(bij.is_constant_log_det)
self.assertEqual(bij.event_dims, event_dims)
self.assertEqual(bij.batch_shape, batch_shape)
self.assertEqual(bij.dtype, dtype)
with self.assertRaises(NotImplementedError):
bij.matrix # pylint: disable=pointless-statement
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/linear_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| distrax-master | distrax/_src/bijectors/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `block.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import bijector as base
from distrax._src.bijectors import block as block_bijector
from distrax._src.bijectors import scalar_affine
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
RTOL = 1e-6
class BlockTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.seed = jax.random.PRNGKey(1234)
def test_properties(self):
bijct = conversion.as_bijector(jnp.tanh)
block = block_bijector.Block(bijct, 1)
assert block.ndims == 1
assert isinstance(block.bijector, base.Bijector)
def test_invalid_properties(self):
bijct = conversion.as_bijector(jnp.tanh)
with self.assertRaises(ValueError):
block_bijector.Block(bijct, -1)
@chex.all_variants
@parameterized.named_parameters(
('scale_0', lambda: tfb.Scale(2), 0),
('scale_1', lambda: tfb.Scale(2), 1),
('scale_2', lambda: tfb.Scale(2), 2),
('reshape_0', lambda: tfb.Reshape([120], [4, 5, 6]), 0),
('reshape_1', lambda: tfb.Reshape([120], [4, 5, 6]), 1),
('reshape_2', lambda: tfb.Reshape([120], [4, 5, 6]), 2),
)
def test_against_tfp_semantics(self, tfp_bijector_fn, ndims):
tfp_bijector = tfp_bijector_fn()
x = jax.random.normal(self.seed, [2, 3, 4, 5, 6])
y = tfp_bijector(x)
fwd_event_ndims = ndims + tfp_bijector.forward_min_event_ndims
inv_event_ndims = ndims + tfp_bijector.inverse_min_event_ndims
block = block_bijector.Block(tfp_bijector, ndims)
np.testing.assert_allclose(
tfp_bijector.forward_log_det_jacobian(x, fwd_event_ndims),
self.variant(block.forward_log_det_jacobian)(x), atol=2e-5)
np.testing.assert_allclose(
tfp_bijector.inverse_log_det_jacobian(y, inv_event_ndims),
self.variant(block.inverse_log_det_jacobian)(y), atol=2e-5)
@chex.all_variants
@parameterized.named_parameters(
('dx_tanh_0', lambda: jnp.tanh, 0),
('dx_tanh_1', lambda: jnp.tanh, 1),
('dx_tanh_2', lambda: jnp.tanh, 2),
('tfp_tanh_0', tfb.Tanh, 0),
('tfp_tanh_1', tfb.Tanh, 1),
('tfp_tanh_2', tfb.Tanh, 2),
)
def test_forward_inverse_work_as_expected(self, bijector_fn, ndims):
bijct = conversion.as_bijector(bijector_fn())
x = jax.random.normal(self.seed, [2, 3])
block = block_bijector.Block(bijct, ndims)
np.testing.assert_array_equal(
self.variant(bijct.forward)(x),
self.variant(block.forward)(x))
np.testing.assert_array_equal(
self.variant(bijct.inverse)(x),
self.variant(block.inverse)(x))
np.testing.assert_allclose(
self.variant(bijct.forward_and_log_det)(x)[0],
self.variant(block.forward_and_log_det)(x)[0], atol=2e-7)
np.testing.assert_array_equal(
self.variant(bijct.inverse_and_log_det)(x)[0],
self.variant(block.inverse_and_log_det)(x)[0])
@chex.all_variants
@parameterized.named_parameters(
('dx_tanh_0', lambda: jnp.tanh, 0),
('dx_tanh_1', lambda: jnp.tanh, 1),
('dx_tanh_2', lambda: jnp.tanh, 2),
('tfp_tanh_0', tfb.Tanh, 0),
('tfp_tanh_1', tfb.Tanh, 1),
('tfp_tanh_2', tfb.Tanh, 2),
)
def test_log_det_jacobian_works_as_expected(self, bijector_fn, ndims):
bijct = conversion.as_bijector(bijector_fn())
x = jax.random.normal(self.seed, [2, 3])
block = block_bijector.Block(bijct, ndims)
axes = tuple(range(-ndims, 0))
np.testing.assert_allclose(
self.variant(bijct.forward_log_det_jacobian)(x).sum(axes),
self.variant(block.forward_log_det_jacobian)(x), rtol=RTOL)
np.testing.assert_allclose(
self.variant(bijct.inverse_log_det_jacobian)(x).sum(axes),
self.variant(block.inverse_log_det_jacobian)(x), rtol=RTOL)
np.testing.assert_allclose(
self.variant(bijct.forward_and_log_det)(x)[1].sum(axes),
self.variant(block.forward_and_log_det)(x)[1], rtol=RTOL)
np.testing.assert_allclose(
self.variant(bijct.inverse_and_log_det)(x)[1].sum(axes),
self.variant(block.inverse_and_log_det)(x)[1], rtol=RTOL)
def test_raises_on_invalid_input_shape(self):
bij = block_bijector.Block(lambda x: x, 1)
for fn in [bij.forward, bij.inverse,
bij.forward_log_det_jacobian, bij.inverse_log_det_jacobian,
bij.forward_and_log_det, bij.inverse_and_log_det]:
with self.assertRaises(ValueError):
fn(jnp.array(0))
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bijector = block_bijector.Block(scalar_affine.ScalarAffine(0), 1)
x = np.zeros((2, 3))
f(x, bijector)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/block_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `rational_quadratic_spline.py`.
Float64 is enabled in these tests. We keep them separate from other tests to
avoid interfering with types elsewhere.
"""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import rational_quadratic_spline
from jax.config import config as jax_config
import jax.numpy as jnp
def setUpModule():
jax_config.update('jax_enable_x64', True)
class RationalQuadraticSplineFloat64Test(chex.TestCase):
"""Tests for rational quadratic spline that use float64."""
def _assert_dtypes(self, bij, x, dtype):
"""Asserts dtypes."""
# Sanity check to make sure float64 is enabled.
x_64 = jnp.zeros([])
self.assertEqual(jnp.float64, x_64.dtype)
y, logd = self.variant(bij.forward_and_log_det)(x)
self.assertEqual(dtype, y.dtype)
self.assertEqual(dtype, logd.dtype)
y, logd = self.variant(bij.inverse_and_log_det)(x)
self.assertEqual(dtype, y.dtype)
self.assertEqual(dtype, logd.dtype)
@chex.all_variants
@parameterized.product(
dtypes=[(jnp.float32, jnp.float32, jnp.float32),
(jnp.float32, jnp.float64, jnp.float64),
(jnp.float64, jnp.float32, jnp.float64),
(jnp.float64, jnp.float64, jnp.float64)],
boundary_slopes=['unconstrained', 'lower_identity', 'upper_identity',
'identity', 'circular'])
def test_dtypes(self, dtypes, boundary_slopes):
x_dtype, params_dtype, result_dtype = dtypes
x = jnp.zeros([3], x_dtype)
self.assertEqual(x_dtype, x.dtype)
spline = rational_quadratic_spline.RationalQuadraticSpline(
jnp.zeros([25], params_dtype), 0., 1., boundary_slopes=boundary_slopes)
self._assert_dtypes(spline, x, result_dtype)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/rational_quadratic_spline_float64_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `chain.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import bijector as base_bijector
from distrax._src.bijectors import block
from distrax._src.bijectors import chain
from distrax._src.bijectors import scalar_affine
from distrax._src.distributions import normal
from distrax._src.distributions import transformed
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
RTOL = 1e-2
def _with_additional_parameters(params, all_named_parameters):
"""Convenience function for appending a cartesian product of parameters."""
for name, param in params:
for named_params in all_named_parameters:
yield (f'{named_params[0]}; {name}',) + named_params[1:] + (param,)
def _with_base_dists(*all_named_parameters):
"""Partial of _with_additional_parameters to specify distrax and TFP base."""
base_dists = (
('tfp_base', tfd.Normal),
('distrax_base', normal.Normal),
)
return _with_additional_parameters(base_dists, all_named_parameters)
class ChainTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.seed = jax.random.PRNGKey(1234)
def test_properties(self):
bijector = chain.Chain([tfb.Scale(2), tfb.Shift(3), jnp.tanh])
for bij in bijector.bijectors:
assert isinstance(bij, base_bijector.Bijector)
@parameterized.named_parameters(_with_base_dists(
('1d std normal', 0, 1),
('2d std normal', np.zeros(2), np.ones(2)),
('broadcasted loc', 0, np.ones(3)),
('broadcasted scale', np.ones(3), 1),
))
def test_event_shape(self, mu, sigma, base_dist):
base = base_dist(mu, sigma)
bijector = chain.Chain([tfb.Scale(2), tfb.Shift(3), tfb.Tanh()])
dist = transformed.Transformed(base, bijector)
tfp_bijector = tfb.Chain([tfb.Scale(2), tfb.Shift(3), tfb.Tanh()])
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
assert dist.event_shape == tfp_dist.event_shape
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('1d std normal, no shape', 0, 1, ()),
('1d std normal, int shape', 0, 1, 1),
('1d std normal, 1-tuple shape', 0, 1, (1,)),
('1d std normal, 2-tuple shape', 0, 1, (2, 2)),
('2d std normal, no shape', np.zeros(2), np.ones(2), ()),
('2d std normal, int shape', [0, 0], [1, 1], 1),
('2d std normal, 1-tuple shape', np.zeros(2), np.ones(2), (1,)),
('2d std normal, 2-tuple shape', [0, 0], [1, 1], (2, 2)),
('rank 2 std normal, 2-tuple shape', np.zeros(
(3, 2)), np.ones((3, 2)), (2, 2)),
('broadcasted loc', 0, np.ones(3), (2, 2)),
('broadcasted scale', np.ones(3), 1, ()),
))
def test_sample_shape(self, mu, sigma, sample_shape, base_dist):
base = base_dist(mu, sigma)
bijector = chain.Chain([tfb.Scale(2), tfb.Shift(3), tfb.Tanh()])
dist = transformed.Transformed(base, bijector)
def sample_fn(seed, sample_shape):
return dist.sample(seed=seed, sample_shape=sample_shape)
samples = self.variant(sample_fn, ignore_argnums=(1,), static_argnums=1)(
self.seed, sample_shape)
tfp_bijector = tfb.Chain([tfb.Scale(2), tfb.Shift(3), tfb.Tanh()])
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
tfp_samples = tfp_dist.sample(sample_shape=sample_shape, seed=self.seed)
chex.assert_equal_shape([samples, tfp_samples])
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('1d dist, 1d value', 0, 1, 1.),
('1d dist, 2d value', 0., 1., np.array([1., 2.])),
('2d dist, 1d value', np.zeros(2), np.ones(2), 1.),
('2d broadcasted dist, 1d value', np.zeros(2), 1, 1.),
('2d dist, 2d value', np.zeros(2), np.ones(2), np.array([1., 2.])),
('1d dist, 1d value, edge case', 0, 1, 200.),
))
def test_log_prob(self, mu, sigma, value, base_dist):
base = base_dist(mu, sigma)
bijector = chain.Chain([tfb.Scale(2), tfb.Shift(3), tfb.Tanh()])
dist = transformed.Transformed(base, bijector)
actual = self.variant(dist.log_prob)(value)
tfp_bijector = tfb.Chain([tfb.Scale(2), tfb.Shift(3), tfb.Tanh()])
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
expected = tfp_dist.log_prob(value)
np.testing.assert_array_equal(actual, expected)
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('1d dist, 1d value', 0, 1, 1.),
('1d dist, 2d value', 0., 1., np.array([1., 2.])),
('2d dist, 1d value', np.zeros(2), np.ones(2), 1.),
('2d broadcasted dist, 1d value', np.zeros(2), 1, 1.),
('2d dist, 2d value', np.zeros(2), np.ones(2), np.array([1., 2.])),
('1d dist, 1d value, edge case', 0, 1, 200.),
))
def test_prob(self, mu, sigma, value, base_dist):
base = base_dist(mu, sigma)
bijector = chain.Chain([tfb.Scale(2), tfb.Shift(3), tfb.Tanh()])
dist = transformed.Transformed(base, bijector)
actual = self.variant(dist.prob)(value)
tfp_bijector = tfb.Chain([tfb.Scale(2), tfb.Shift(3), tfb.Tanh()])
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
expected = tfp_dist.prob(value)
np.testing.assert_array_equal(actual, expected)
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('1d std normal, no shape', 0, 1, ()),
('1d std normal, int shape', 0, 1, 1),
('1d std normal, 1-tuple shape', 0, 1, (1,)),
('1d std normal, 2-tuple shape', 0, 1, (2, 2)),
('2d std normal, no shape', np.zeros(2), np.ones(2), ()),
('2d std normal, int shape', [0, 0], [1, 1], 1),
('2d std normal, 1-tuple shape', np.zeros(2), np.ones(2), (1,)),
('2d std normal, 2-tuple shape', [0, 0], [1, 1], (2, 2)),
('rank 2 std normal, 2-tuple shape', np.zeros(
(3, 2)), np.ones((3, 2)), (2, 2)),
('broadcasted loc', 0, np.ones(3), (2, 2)),
('broadcasted scale', np.ones(3), 1, ()),
))
def test_sample_and_log_prob(self, mu, sigma, sample_shape, base_dist):
base = base_dist(mu, sigma)
bijector = chain.Chain([tfb.Scale(10), tfb.Tanh(), tfb.Scale(0.1)])
dist = transformed.Transformed(base, bijector)
def sample_and_log_prob_fn(seed, sample_shape):
return dist.sample_and_log_prob(seed=seed, sample_shape=sample_shape)
samples, log_prob = self.variant(
sample_and_log_prob_fn, ignore_argnums=(1,), static_argnums=(1,))(
self.seed, sample_shape)
expected_samples = bijector.forward(
base.sample(seed=self.seed, sample_shape=sample_shape))
tfp_bijector = tfb.Chain([tfb.Scale(10), tfb.Tanh(), tfb.Scale(0.1)])
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
tfp_samples = tfp_dist.sample(seed=self.seed, sample_shape=sample_shape)
tfp_log_prob = tfp_dist.log_prob(samples)
chex.assert_equal_shape([samples, tfp_samples])
np.testing.assert_allclose(log_prob, tfp_log_prob, rtol=RTOL)
np.testing.assert_allclose(samples, expected_samples, rtol=RTOL)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(_with_base_dists(
('entropy', 'entropy', 0., 1.),
('mean', 'mean', 0, 1),
('mean from list params', 'mean', [-1, 1], [1, 2]),
('mode', 'mode', 0, 1),
))
def test_method(self, function_string, mu, sigma, base_dist):
base = base_dist(mu, sigma)
bijector = chain.Chain([tfb.Identity(), tfb.Scale(2), tfb.Shift(3)])
dist = transformed.Transformed(base, bijector)
tfp_bijector = tfb.Chain([tfb.Identity(), tfb.Scale(2), tfb.Shift(3)])
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
np.testing.assert_allclose(
self.variant(getattr(dist, function_string))(),
getattr(tfp_dist, function_string)())
@chex.all_variants
@parameterized.named_parameters(
('scalar outer, scalar inner, batched input', (), (), (2, 3)),
('scalar outer, batched inner, scalar input', (), (2, 3), ()),
('scalar outer, batched inner, batched input', (), (2, 3), (2, 3)),
('batched outer, scalar inner, scalar input', (2, 3), (), ()),
('batched outer, scalar inner, batched input', (2, 3), (), (2, 3)),
('batched outer, batched inner, scalar input', (2, 3), (2, 3), ()),
('batched outer, batched inner, batched input', (2, 3), (2, 3), (2, 3)),
)
def test_batched_bijectors(self, outer_shape, inner_shape, input_shape):
outer_bij = tfb.Shift(5. * jnp.ones(outer_shape))
inner_bij = tfb.Shift(7. * jnp.ones(inner_shape))
dx_bij = chain.Chain([outer_bij, inner_bij])
tfb_bij = tfb.Chain([outer_bij, inner_bij])
x = jnp.zeros(input_shape)
dx_y = self.variant(dx_bij.forward)(x)
tfb_y = self.variant(tfb_bij.forward)(x)
chex.assert_equal_shape([dx_y, tfb_y])
np.testing.assert_allclose(dx_y, tfb_y, rtol=RTOL)
def test_raises_on_empty_list(self):
with self.assertRaises(ValueError):
chain.Chain([])
def test_raises_on_incompatible_dimensions(self):
with self.assertRaises(ValueError):
chain.Chain([jnp.log, block.Block(jnp.exp, 1)])
@chex.all_variants
@parameterized.named_parameters(
('int16', np.array([1, 2], dtype=np.int16)),
('int32', np.array([1, 2], dtype=np.int32)),
('int64', np.array([1, 2], dtype=np.int64)),
)
def test_integer_inputs(self, inputs):
bijector = chain.Chain([scalar_affine.ScalarAffine(shift=1.0)])
output, log_det = self.variant(bijector.forward_and_log_det)(inputs)
expected_out = jnp.array(inputs, dtype=jnp.float32) + 1.0
expected_log_det = jnp.zeros_like(inputs, dtype=jnp.float32)
np.testing.assert_array_equal(output, expected_out)
np.testing.assert_array_equal(log_det, expected_log_det)
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bijector = chain.Chain([scalar_affine.ScalarAffine(0, 1)])
x = np.zeros(())
f(x, bijector)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/chain_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `sigmoid.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import sigmoid
from distrax._src.bijectors import tanh
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfb = tfp.bijectors
RTOL = 1e-5
class SigmoidTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.seed = jax.random.PRNGKey(1234)
def test_properties(self):
bijector = sigmoid.Sigmoid()
self.assertEqual(bijector.event_ndims_in, 0)
self.assertEqual(bijector.event_ndims_out, 0)
self.assertFalse(bijector.is_constant_jacobian)
self.assertFalse(bijector.is_constant_log_det)
@chex.all_variants
@parameterized.parameters(
{'x_shape': (2,)},
{'x_shape': (2, 3)},
{'x_shape': (2, 3, 4)})
def test_forward_shapes(self, x_shape):
x = jnp.zeros(x_shape)
bijector = sigmoid.Sigmoid()
y1 = self.variant(bijector.forward)(x)
logdet1 = self.variant(bijector.forward_log_det_jacobian)(x)
y2, logdet2 = self.variant(bijector.forward_and_log_det)(x)
self.assertEqual(y1.shape, x_shape)
self.assertEqual(y2.shape, x_shape)
self.assertEqual(logdet1.shape, x_shape)
self.assertEqual(logdet2.shape, x_shape)
@chex.all_variants
@parameterized.parameters(
{'y_shape': (2,)},
{'y_shape': (2, 3)},
{'y_shape': (2, 3, 4)})
def test_inverse_shapes(self, y_shape):
y = jnp.zeros(y_shape)
bijector = sigmoid.Sigmoid()
x1 = self.variant(bijector.inverse)(y)
logdet1 = self.variant(bijector.inverse_log_det_jacobian)(y)
x2, logdet2 = self.variant(bijector.inverse_and_log_det)(y)
self.assertEqual(x1.shape, y_shape)
self.assertEqual(x2.shape, y_shape)
self.assertEqual(logdet1.shape, y_shape)
self.assertEqual(logdet2.shape, y_shape)
@chex.all_variants
def test_forward(self):
x = jax.random.normal(self.seed, (100,))
bijector = sigmoid.Sigmoid()
y = self.variant(bijector.forward)(x)
np.testing.assert_allclose(y, jax.nn.sigmoid(x), rtol=RTOL)
@chex.all_variants
def test_forward_log_det_jacobian(self):
x = jax.random.normal(self.seed, (100,))
bijector = sigmoid.Sigmoid()
fwd_logdet = self.variant(bijector.forward_log_det_jacobian)(x)
actual = jnp.log(jax.vmap(jax.grad(bijector.forward))(x))
np.testing.assert_allclose(fwd_logdet, actual, rtol=1e-3)
@chex.all_variants
def test_forward_and_log_det(self):
x = jax.random.normal(self.seed, (100,))
bijector = sigmoid.Sigmoid()
y1 = self.variant(bijector.forward)(x)
logdet1 = self.variant(bijector.forward_log_det_jacobian)(x)
y2, logdet2 = self.variant(bijector.forward_and_log_det)(x)
np.testing.assert_allclose(y1, y2, rtol=RTOL)
np.testing.assert_allclose(logdet1, logdet2, rtol=RTOL)
@chex.all_variants
def test_inverse(self):
x = jax.random.normal(self.seed, (100,))
bijector = sigmoid.Sigmoid()
y = self.variant(bijector.forward)(x)
x_rec = self.variant(bijector.inverse)(y)
np.testing.assert_allclose(x_rec, x, rtol=1e-3)
@chex.all_variants
def test_inverse_log_det_jacobian(self):
x = jax.random.normal(self.seed, (100,))
bijector = sigmoid.Sigmoid()
y = self.variant(bijector.forward)(x)
fwd_logdet = self.variant(bijector.forward_log_det_jacobian)(x)
inv_logdet = self.variant(bijector.inverse_log_det_jacobian)(y)
np.testing.assert_allclose(inv_logdet, -fwd_logdet, rtol=1e-4)
@chex.all_variants
def test_inverse_and_log_det(self):
y = jax.random.normal(self.seed, (100,))
bijector = sigmoid.Sigmoid()
x1 = self.variant(bijector.inverse)(y)
logdet1 = self.variant(bijector.inverse_log_det_jacobian)(y)
x2, logdet2 = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_allclose(x1, x2, rtol=RTOL)
np.testing.assert_allclose(logdet1, logdet2, rtol=RTOL)
@chex.all_variants
def test_stability(self):
bijector = sigmoid.Sigmoid()
tfp_bijector = tfb.Sigmoid()
x = np.array([-10.0, -3.3, 0.0, 3.3, 10.0], dtype=np.float32)
fldj = tfp_bijector.forward_log_det_jacobian(x, event_ndims=0)
fldj_ = self.variant(bijector.forward_log_det_jacobian)(x)
np.testing.assert_allclose(fldj_, fldj, rtol=RTOL)
y = bijector.forward(x) # pytype: disable=wrong-arg-types # jax-ndarray
ildj = tfp_bijector.inverse_log_det_jacobian(y, event_ndims=0)
ildj_ = self.variant(bijector.inverse_log_det_jacobian)(y)
np.testing.assert_allclose(ildj_, ildj, rtol=RTOL)
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bijector = sigmoid.Sigmoid()
x = np.zeros(())
f(x, bijector)
def test_same_as(self):
bijector = sigmoid.Sigmoid()
self.assertTrue(bijector.same_as(bijector))
self.assertTrue(bijector.same_as(sigmoid.Sigmoid()))
self.assertFalse(bijector.same_as(tanh.Tanh()))
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/sigmoid_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tanh.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import sigmoid
from distrax._src.bijectors import tanh
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfb = tfp.bijectors
RTOL = 1e-5
class TanhTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.seed = jax.random.PRNGKey(1234)
def test_properties(self):
bijector = tanh.Tanh()
self.assertEqual(bijector.event_ndims_in, 0)
self.assertEqual(bijector.event_ndims_out, 0)
self.assertFalse(bijector.is_constant_jacobian)
self.assertFalse(bijector.is_constant_log_det)
@chex.all_variants
@parameterized.parameters(
{'x_shape': (2,)},
{'x_shape': (2, 3)},
{'x_shape': (2, 3, 4)})
def test_forward_shapes(self, x_shape):
x = jnp.zeros(x_shape)
bijector = tanh.Tanh()
y1 = self.variant(bijector.forward)(x)
logdet1 = self.variant(bijector.forward_log_det_jacobian)(x)
y2, logdet2 = self.variant(bijector.forward_and_log_det)(x)
self.assertEqual(y1.shape, x_shape)
self.assertEqual(y2.shape, x_shape)
self.assertEqual(logdet1.shape, x_shape)
self.assertEqual(logdet2.shape, x_shape)
@chex.all_variants
@parameterized.parameters(
{'y_shape': (2,)},
{'y_shape': (2, 3)},
{'y_shape': (2, 3, 4)})
def test_inverse_shapes(self, y_shape):
y = jnp.zeros(y_shape)
bijector = tanh.Tanh()
x1 = self.variant(bijector.inverse)(y)
logdet1 = self.variant(bijector.inverse_log_det_jacobian)(y)
x2, logdet2 = self.variant(bijector.inverse_and_log_det)(y)
self.assertEqual(x1.shape, y_shape)
self.assertEqual(x2.shape, y_shape)
self.assertEqual(logdet1.shape, y_shape)
self.assertEqual(logdet2.shape, y_shape)
@chex.all_variants
def test_forward(self):
x = jax.random.normal(self.seed, (100,))
bijector = tanh.Tanh()
y = self.variant(bijector.forward)(x)
np.testing.assert_allclose(y, jnp.tanh(x), rtol=RTOL)
@chex.all_variants
def test_forward_log_det_jacobian(self):
x = jax.random.normal(self.seed, (100,))
bijector = tanh.Tanh()
fwd_logdet = self.variant(bijector.forward_log_det_jacobian)(x)
actual = jnp.log(jax.vmap(jax.grad(bijector.forward))(x))
np.testing.assert_allclose(fwd_logdet, actual, rtol=1e-2)
@chex.all_variants
def test_forward_and_log_det(self):
x = jax.random.normal(self.seed, (100,))
bijector = tanh.Tanh()
y1 = self.variant(bijector.forward)(x)
logdet1 = self.variant(bijector.forward_log_det_jacobian)(x)
y2, logdet2 = self.variant(bijector.forward_and_log_det)(x)
np.testing.assert_allclose(y1, y2, rtol=RTOL)
np.testing.assert_allclose(logdet1, logdet2, rtol=RTOL)
@chex.all_variants
def test_inverse(self):
x = jax.random.normal(self.seed, (100,))
bijector = tanh.Tanh()
y = self.variant(bijector.forward)(x)
x_rec = self.variant(bijector.inverse)(y)
np.testing.assert_allclose(x_rec, x, rtol=1e-3)
@chex.all_variants
def test_inverse_log_det_jacobian(self):
x = jax.random.normal(self.seed, (100,))
bijector = tanh.Tanh()
y = self.variant(bijector.forward)(x)
fwd_logdet = self.variant(bijector.forward_log_det_jacobian)(x)
inv_logdet = self.variant(bijector.inverse_log_det_jacobian)(y)
np.testing.assert_allclose(inv_logdet, -fwd_logdet, rtol=1e-3)
@chex.all_variants
def test_inverse_and_log_det(self):
y = jax.random.normal(self.seed, (100,))
bijector = tanh.Tanh()
x1 = self.variant(bijector.inverse)(y)
logdet1 = self.variant(bijector.inverse_log_det_jacobian)(y)
x2, logdet2 = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_allclose(x1, x2, rtol=RTOL)
np.testing.assert_allclose(logdet1, logdet2, rtol=RTOL)
@chex.all_variants
def test_stability(self):
bijector = tanh.Tanh()
tfp_bijector = tfb.Tanh()
x = np.array([-10.0, -3.3, 0.0, 3.3, 10.0], dtype=np.float32)
fldj = tfp_bijector.forward_log_det_jacobian(x, event_ndims=0)
fldj_ = self.variant(bijector.forward_log_det_jacobian)(x)
np.testing.assert_allclose(fldj_, fldj, rtol=RTOL)
y = bijector.forward(x) # pytype: disable=wrong-arg-types # jax-ndarray
ildj = tfp_bijector.inverse_log_det_jacobian(y, event_ndims=0)
ildj_ = self.variant(bijector.inverse_log_det_jacobian)(y)
np.testing.assert_allclose(ildj_, ildj, rtol=RTOL)
@chex.all_variants
@parameterized.named_parameters(
('int16', np.array([0, 0], dtype=np.int16)),
('int32', np.array([0, 0], dtype=np.int32)),
('int64', np.array([0, 0], dtype=np.int64)),
)
def test_integer_inputs(self, inputs):
bijector = tanh.Tanh()
output, log_det = self.variant(bijector.forward_and_log_det)(inputs)
expected_out = jnp.tanh(inputs).astype(jnp.float32)
expected_log_det = jnp.zeros_like(inputs, dtype=jnp.float32)
np.testing.assert_array_equal(output, expected_out)
np.testing.assert_array_equal(log_det, expected_log_det)
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bijector = tanh.Tanh()
x = np.zeros(())
f(x, bijector)
def test_same_as(self):
bijector = tanh.Tanh()
self.assertTrue(bijector.same_as(bijector))
self.assertTrue(bijector.same_as(tanh.Tanh()))
self.assertFalse(bijector.same_as(sigmoid.Sigmoid()))
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/tanh_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Rational-quadratic spline bijector."""
from typing import Tuple
from distrax._src.bijectors import bijector as base
import jax
import jax.numpy as jnp
Array = base.Array
def _normalize_bin_sizes(unnormalized_bin_sizes: Array,
total_size: float,
min_bin_size: float) -> Array:
"""Make bin sizes sum to `total_size` and be no less than `min_bin_size`."""
num_bins = unnormalized_bin_sizes.shape[-1]
if num_bins * min_bin_size > total_size:
raise ValueError(
f'The number of bins ({num_bins}) times the minimum bin size'
f' ({min_bin_size}) cannot be greater than the total bin size'
f' ({total_size}).')
bin_sizes = jax.nn.softmax(unnormalized_bin_sizes, axis=-1)
return bin_sizes * (total_size - num_bins * min_bin_size) + min_bin_size
def _normalize_knot_slopes(unnormalized_knot_slopes: Array,
min_knot_slope: float) -> Array:
"""Make knot slopes be no less than `min_knot_slope`."""
# The offset is such that the normalized knot slope will be equal to 1
# whenever the unnormalized knot slope is equal to 0.
if min_knot_slope >= 1.:
raise ValueError(f'The minimum knot slope must be less than 1; got'
f' {min_knot_slope}.')
min_knot_slope = jnp.array(
min_knot_slope, dtype=unnormalized_knot_slopes.dtype)
offset = jnp.log(jnp.exp(1. - min_knot_slope) - 1.)
return jax.nn.softplus(unnormalized_knot_slopes + offset) + min_knot_slope
def _rational_quadratic_spline_fwd(x: Array,
x_pos: Array,
y_pos: Array,
knot_slopes: Array) -> Tuple[Array, Array]:
"""Applies a rational-quadratic spline to a scalar.
Args:
x: a scalar (0-dimensional array). The scalar `x` can be any real number; it
will be transformed by the spline if it's in the closed interval
`[x_pos[0], x_pos[-1]]`, and it will be transformed linearly if it's
outside that interval.
x_pos: array of shape [num_bins + 1], the bin boundaries on the x axis.
y_pos: array of shape [num_bins + 1], the bin boundaries on the y axis.
knot_slopes: array of shape [num_bins + 1], the slopes at the knot points.
Returns:
A tuple of two scalars: the output of the transformation and the log of the
absolute first derivative at `x`.
"""
# Search to find the right bin. NOTE: The bins are sorted, so we could use
# binary search, but this is more GPU/TPU friendly.
# The following implementation avoids indexing for faster TPU computation.
below_range = x <= x_pos[0]
above_range = x >= x_pos[-1]
correct_bin = jnp.logical_and(x >= x_pos[:-1], x < x_pos[1:])
any_bin_in_range = jnp.any(correct_bin)
first_bin = jnp.concatenate([jnp.array([1]),
jnp.zeros(len(correct_bin)-1)]).astype(bool)
# If y does not fall into any bin, we use the first spline in the following
# computations to avoid numerical issues.
correct_bin = jnp.where(any_bin_in_range, correct_bin, first_bin)
# Dot product of each parameter with the correct bin mask.
params = jnp.stack([x_pos, y_pos, knot_slopes], axis=1)
params_bin_left = jnp.sum(correct_bin[:, None] * params[:-1], axis=0)
params_bin_right = jnp.sum(correct_bin[:, None] * params[1:], axis=0)
x_pos_bin = (params_bin_left[0], params_bin_right[0])
y_pos_bin = (params_bin_left[1], params_bin_right[1])
knot_slopes_bin = (params_bin_left[2], params_bin_right[2])
bin_width = x_pos_bin[1] - x_pos_bin[0]
bin_height = y_pos_bin[1] - y_pos_bin[0]
bin_slope = bin_height / bin_width
z = (x - x_pos_bin[0]) / bin_width
# `z` should be in range [0, 1] to avoid NaNs later. This can happen because
# of small floating point issues or when x is outside of the range of bins.
# To avoid all problems, we restrict z in [0, 1].
z = jnp.clip(z, 0., 1.)
sq_z = z * z
z1mz = z - sq_z # z(1-z)
sq_1mz = (1. - z) ** 2
slopes_term = knot_slopes_bin[1] + knot_slopes_bin[0] - 2. * bin_slope
numerator = bin_height * (bin_slope * sq_z + knot_slopes_bin[0] * z1mz)
denominator = bin_slope + slopes_term * z1mz
y = y_pos_bin[0] + numerator / denominator
# Compute log det Jacobian.
# The logdet is a sum of 3 logs. It is easy to see that the inputs of the
# first two logs are guaranteed to be positive because we ensured that z is in
# [0, 1]. This is also true of the log(denominator) because:
# denominator
# == bin_slope + (knot_slopes_bin[1] + knot_slopes_bin[0] - 2 * bin_slope) *
# z*(1-z)
# >= bin_slope - 2 * bin_slope * z * (1-z)
# >= bin_slope - 2 * bin_slope * (1/4)
# == bin_slope / 2
logdet = 2. * jnp.log(bin_slope) + jnp.log(
knot_slopes_bin[1] * sq_z + 2. * bin_slope * z1mz +
knot_slopes_bin[0] * sq_1mz) - 2. * jnp.log(denominator)
# If x is outside the spline range, we default to a linear transformation.
y = jnp.where(below_range, (x - x_pos[0]) * knot_slopes[0] + y_pos[0], y)
y = jnp.where(above_range, (x - x_pos[-1]) * knot_slopes[-1] + y_pos[-1], y)
logdet = jnp.where(below_range, jnp.log(knot_slopes[0]), logdet)
logdet = jnp.where(above_range, jnp.log(knot_slopes[-1]), logdet)
return y, logdet
def _safe_quadratic_root(a: Array, b: Array, c: Array) -> Array:
"""Implement a numerically stable version of the quadratic formula."""
# This is not a general solution to the quadratic equation, as it assumes
# b ** 2 - 4. * a * c is known a priori to be positive (and which of the two
# roots is to be used, see https://arxiv.org/abs/1906.04032).
# There are two sources of instability:
# (a) When b ** 2 - 4. * a * c -> 0, sqrt gives NaNs in gradient.
# We clip sqrt_diff to have the smallest float number.
sqrt_diff = b ** 2 - 4. * a * c
safe_sqrt = jnp.sqrt(jnp.clip(sqrt_diff, jnp.finfo(sqrt_diff.dtype).tiny))
# If sqrt_diff is non-positive, we set sqrt to 0. as it should be positive.
safe_sqrt = jnp.where(sqrt_diff > 0., safe_sqrt, 0.)
# (b) When 4. * a * c -> 0. We use the more stable quadratic solution
# depending on the sign of b.
# See https://people.csail.mit.edu/bkph/articles/Quadratics.pdf (eq 7 and 8).
# Solution when b >= 0
numerator_1 = 2. * c
denominator_1 = -b - safe_sqrt
# Solution when b < 0
numerator_2 = - b + safe_sqrt
denominator_2 = 2 * a
# Choose the numerically stable solution.
numerator = jnp.where(b >= 0, numerator_1, numerator_2)
denominator = jnp.where(b >= 0, denominator_1, denominator_2)
return numerator / denominator
def _rational_quadratic_spline_inv(y: Array,
x_pos: Array,
y_pos: Array,
knot_slopes: Array) -> Tuple[Array, Array]:
"""Applies the inverse of a rational-quadratic spline to a scalar.
Args:
y: a scalar (0-dimensional array). The scalar `y` can be any real number; it
will be transformed by the spline if it's in the closed interval
`[y_pos[0], y_pos[-1]]`, and it will be transformed linearly if it's
outside that interval.
x_pos: array of shape [num_bins + 1], the bin boundaries on the x axis.
y_pos: array of shape [num_bins + 1], the bin boundaries on the y axis.
knot_slopes: array of shape [num_bins + 1], the slopes at the knot points.
Returns:
A tuple of two scalars: the output of the inverse transformation and the log
of the absolute first derivative of the inverse at `y`.
"""
# Search to find the right bin. NOTE: The bins are sorted, so we could use
# binary search, but this is more GPU/TPU friendly.
# The following implementation avoids indexing for faster TPU computation.
below_range = y <= y_pos[0]
above_range = y >= y_pos[-1]
correct_bin = jnp.logical_and(y >= y_pos[:-1], y < y_pos[1:])
any_bin_in_range = jnp.any(correct_bin)
first_bin = jnp.concatenate([jnp.array([1]),
jnp.zeros(len(correct_bin)-1)]).astype(bool)
# If y does not fall into any bin, we use the first spline in the following
# computations to avoid numerical issues.
correct_bin = jnp.where(any_bin_in_range, correct_bin, first_bin)
# Dot product of each parameter with the correct bin mask.
params = jnp.stack([x_pos, y_pos, knot_slopes], axis=1)
params_bin_left = jnp.sum(correct_bin[:, None] * params[:-1], axis=0)
params_bin_right = jnp.sum(correct_bin[:, None] * params[1:], axis=0)
# These are the parameters for the corresponding bin.
x_pos_bin = (params_bin_left[0], params_bin_right[0])
y_pos_bin = (params_bin_left[1], params_bin_right[1])
knot_slopes_bin = (params_bin_left[2], params_bin_right[2])
bin_width = x_pos_bin[1] - x_pos_bin[0]
bin_height = y_pos_bin[1] - y_pos_bin[0]
bin_slope = bin_height / bin_width
w = (y - y_pos_bin[0]) / bin_height
w = jnp.clip(w, 0., 1.) # Ensure w is in [0, 1].
# Compute quadratic coefficients: az^2 + bz + c = 0
slopes_term = knot_slopes_bin[1] + knot_slopes_bin[0] - 2. * bin_slope
c = - bin_slope * w
b = knot_slopes_bin[0] - slopes_term * w
a = bin_slope - b
# Solve quadratic to obtain z and then x.
z = _safe_quadratic_root(a, b, c)
z = jnp.clip(z, 0., 1.) # Ensure z is in [0, 1].
x = bin_width * z + x_pos_bin[0]
# Compute log det Jacobian.
sq_z = z * z
z1mz = z - sq_z # z(1-z)
sq_1mz = (1. - z) ** 2
denominator = bin_slope + slopes_term * z1mz
logdet = - 2. * jnp.log(bin_slope) - jnp.log(
knot_slopes_bin[1] * sq_z + 2. * bin_slope * z1mz +
knot_slopes_bin[0] * sq_1mz) + 2. * jnp.log(denominator)
# If y is outside the spline range, we default to a linear transformation.
x = jnp.where(below_range, (y - y_pos[0]) / knot_slopes[0] + x_pos[0], x)
x = jnp.where(above_range, (y - y_pos[-1]) / knot_slopes[-1] + x_pos[-1], x)
logdet = jnp.where(below_range, - jnp.log(knot_slopes[0]), logdet)
logdet = jnp.where(above_range, - jnp.log(knot_slopes[-1]), logdet)
return x, logdet
class RationalQuadraticSpline(base.Bijector):
"""A rational-quadratic spline bijector.
Implements the spline bijector introduced by:
> Durkan et al., Neural Spline Flows, https://arxiv.org/abs/1906.04032, 2019.
This bijector is a monotonically increasing spline operating on an interval
[a, b], such that f(a) = a and f(b) = b. Outside the interval [a, b], the
bijector defaults to a linear transformation whose slope matches that of the
spline at the nearest boundary (either a or b). The range boundaries a and b
are hyperparameters passed to the constructor.
The spline on the interval [a, b] consists of `num_bins` segments, on each of
which the spline takes the form of a rational quadratic (ratio of two
quadratic polynomials). The first derivative of the bijector is guaranteed to
be continuous on the whole real line. The second derivative is generally not
continuous at the knot points (bin boundaries).
The spline is parameterized by the bin sizes on the x and y axis, and by the
slopes at the knot points. All spline parameters are passed to the constructor
as an unconstrained array `params` of shape `[..., 3 * num_bins + 1]`. The
spline parameters are extracted from `params`, and are reparameterized
internally as appropriate. The number of bins is a hyperparameter, and is
implicitly defined by the last dimension of `params`.
This bijector is applied elementwise. Given some input `x`, the parameters
`params` and the input `x` are broadcast against each other. For example,
suppose `x` is of shape `[N, D]`. Then:
- If `params` is of shape `[3 * num_bins + 1]`, the same spline is identically
applied to each element of `x`.
- If `params` is of shape `[D, 3 * num_bins + 1]`, the same spline is applied
along the first axis of `x` but a different spline is applied along the
second axis of `x`.
- If `params` is of shape `[N, D, 3 * num_bins + 1]`, a different spline is
applied to each element of `x`.
- If `params` is of shape `[M, N, D, 3 * num_bins + 1]`, `M` different splines
are applied to each element of `x`, and the output is of shape `[M, N, D]`.
"""
def __init__(self,
params: Array,
range_min: float,
range_max: float,
boundary_slopes: str = 'unconstrained',
min_bin_size: float = 1e-4,
min_knot_slope: float = 1e-4):
"""Initializes a RationalQuadraticSpline bijector.
Args:
params: array of shape `[..., 3 * num_bins + 1]`, the unconstrained
parameters of the bijector. The number of bins is implicitly defined by
the last dimension of `params`. The parameters can take arbitrary
unconstrained values; the bijector will reparameterize them internally
and make sure they obey appropriate constraints. If `params` is the
all-zeros array, the bijector becomes the identity function everywhere
on the real line.
range_min: the lower bound of the spline's range. Below `range_min`, the
bijector defaults to a linear transformation.
range_max: the upper bound of the spline's range. Above `range_max`, the
bijector defaults to a linear transformation.
boundary_slopes: controls the behaviour of the slope of the spline at the
range boundaries (`range_min` and `range_max`). It is used to enforce
certain boundary conditions on the spline. Available options are:
- 'unconstrained': no boundary conditions are imposed; the slopes at the
boundaries can vary freely.
- 'lower_identity': the slope of the spline is set equal to 1 at the
lower boundary (`range_min`). This makes the bijector equal to the
identity function for values less than `range_min`.
- 'upper_identity': similar to `lower_identity`, but now the slope of
the spline is set equal to 1 at the upper boundary (`range_max`). This
makes the bijector equal to the identity function for values greater
than `range_max`.
- 'identity': combines the effects of 'lower_identity' and
'upper_identity' together. The slope of the spline is set equal to 1
at both boundaries (`range_min` and `range_max`). This makes the
bijector equal to the identity function outside the interval
`[range_min, range_max]`.
- 'circular': makes the slope at `range_min` and `range_max` be the
same. This implements the "circular spline" introduced by:
> Rezende et al., Normalizing Flows on Tori and Spheres,
> https://arxiv.org/abs/2002.02428, 2020.
This option should be used when the spline operates on a circle
parameterized by an angle in the interval `[range_min, range_max]`,
where `range_min` and `range_max` correspond to the same point on the
circle.
min_bin_size: The minimum bin size, in either the x or the y axis. Should
be a small positive number, chosen for numerical stability. Guarantees
that no bin in either the x or the y axis will be less than this value.
min_knot_slope: The minimum slope at each knot point. Should be a small
positive number, chosen for numerical stability. Guarantess that no knot
will have a slope less than this value.
"""
super().__init__(event_ndims_in=0)
if params.shape[-1] % 3 != 1 or params.shape[-1] < 4:
raise ValueError(f'The last dimension of `params` must have size'
f' `3 * num_bins + 1` and `num_bins` must be at least 1.'
f' Got size {params.shape[-1]}.')
if range_min >= range_max:
raise ValueError(f'`range_min` must be less than `range_max`. Got'
f' `range_min={range_min}` and `range_max={range_max}`.')
if min_bin_size <= 0.:
raise ValueError(f'The minimum bin size must be positive; got'
f' {min_bin_size}.')
if min_knot_slope <= 0.:
raise ValueError(f'The minimum knot slope must be positive; got'
f' {min_knot_slope}.')
self._dtype = params.dtype
self._num_bins = (params.shape[-1] - 1) // 3
# Extract unnormalized parameters.
unnormalized_bin_widths = params[..., :self._num_bins]
unnormalized_bin_heights = params[..., self._num_bins : 2 * self._num_bins]
unnormalized_knot_slopes = params[..., 2 * self._num_bins:]
# Normalize bin sizes and compute bin positions on the x and y axis.
range_size = range_max - range_min
bin_widths = _normalize_bin_sizes(unnormalized_bin_widths, range_size,
min_bin_size)
bin_heights = _normalize_bin_sizes(unnormalized_bin_heights, range_size,
min_bin_size)
x_pos = range_min + jnp.cumsum(bin_widths[..., :-1], axis=-1)
y_pos = range_min + jnp.cumsum(bin_heights[..., :-1], axis=-1)
pad_shape = params.shape[:-1] + (1,)
pad_below = jnp.full(pad_shape, range_min, dtype=self._dtype)
pad_above = jnp.full(pad_shape, range_max, dtype=self._dtype)
self._x_pos = jnp.concatenate([pad_below, x_pos, pad_above], axis=-1)
self._y_pos = jnp.concatenate([pad_below, y_pos, pad_above], axis=-1)
# Normalize knot slopes and enforce requested boundary conditions.
knot_slopes = _normalize_knot_slopes(unnormalized_knot_slopes,
min_knot_slope)
if boundary_slopes == 'unconstrained':
self._knot_slopes = knot_slopes
elif boundary_slopes == 'lower_identity':
ones = jnp.ones(pad_shape, self._dtype)
self._knot_slopes = jnp.concatenate([ones, knot_slopes[..., 1:]], axis=-1)
elif boundary_slopes == 'upper_identity':
ones = jnp.ones(pad_shape, self._dtype)
self._knot_slopes = jnp.concatenate(
[knot_slopes[..., :-1], ones], axis=-1)
elif boundary_slopes == 'identity':
ones = jnp.ones(pad_shape, self._dtype)
self._knot_slopes = jnp.concatenate(
[ones, knot_slopes[..., 1:-1], ones], axis=-1)
elif boundary_slopes == 'circular':
self._knot_slopes = jnp.concatenate(
[knot_slopes[..., :-1], knot_slopes[..., :1]], axis=-1)
else:
raise ValueError(f'Unknown option for boundary slopes:'
f' `{boundary_slopes}`.')
@property
def num_bins(self) -> int:
"""The number of segments on the interval."""
return self._num_bins
@property
def knot_slopes(self) -> Array:
"""The slopes at the knot points."""
return self._knot_slopes
@property
def x_pos(self) -> Array:
"""The bin boundaries on the `x`-axis."""
return self._x_pos
@property
def y_pos(self) -> Array:
"""The bin boundaries on the `y`-axis."""
return self._y_pos
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
fn = jnp.vectorize(
_rational_quadratic_spline_fwd, signature='(),(n),(n),(n)->(),()')
y, logdet = fn(x, self._x_pos, self._y_pos, self._knot_slopes)
return y, logdet
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
fn = jnp.vectorize(
_rational_quadratic_spline_inv, signature='(),(n),(n),(n)->(),()')
x, logdet = fn(y, self._x_pos, self._y_pos, self._knot_slopes)
return x, logdet
| distrax-master | distrax/_src/bijectors/rational_quadratic_spline.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tfp_compatible_bijector.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors.block import Block
from distrax._src.bijectors.chain import Chain
from distrax._src.bijectors.lambda_bijector import Lambda
from distrax._src.bijectors.scalar_affine import ScalarAffine
from distrax._src.bijectors.tanh import Tanh
from distrax._src.bijectors.tfp_compatible_bijector import tfp_compatible_bijector
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
RTOL = 3e-3
class TFPCompatibleBijectorTest(parameterized.TestCase):
@parameterized.named_parameters(
('lambda identity, 0d sample', lambda: Block(lambda x: x, 1),
tfb.Identity, ()),
('lambda identity, 1d sample', lambda: Block(lambda x: x, 1),
tfb.Identity, (5,)),
('lambda identity, 2d sample', lambda: Block(lambda x: x, 1),
tfb.Identity, (7, 5)),
('ScalarAffine, 0d sample',
lambda: Block(ScalarAffine(3, 2), 1),
lambda: tfb.Chain([tfb.Shift(3), tfb.Scale(2)]), ()),
('ScalarAffine, 1d sample',
lambda: Block(ScalarAffine(3, 2), 1),
lambda: tfb.Chain([tfb.Shift(3), tfb.Scale(2)]), (5,)),
('ScalarAffine, 2d sample',
lambda: Block(ScalarAffine(3, 2), 1),
lambda: tfb.Chain([tfb.Shift(3), tfb.Scale(2)]), (7, 5)),
('Tanh, 0d sample', lambda: Block(Tanh(), 1), tfb.Tanh, ()),
('Tanh, 1d sample', lambda: Block(Tanh(), 1), tfb.Tanh, (5,)),
('Tanh, 2d sample',
lambda: Block(Tanh(), 1), tfb.Tanh, (7, 5)),
('Chain(Tanh, ScalarAffine), 0d sample',
lambda: Block(Chain([Tanh(), ScalarAffine(0.2, 1.2)]), 1),
lambda: tfb.Chain([tfb.Tanh(), tfb.Shift(0.2), tfb.Scale(1.2)]), ()),
('Chain(Tanh, ScalarAffine), 1d sample',
lambda: Block(Chain([Tanh(), ScalarAffine(0.2, 1.2)]), 1),
lambda: tfb.Chain([tfb.Tanh(), tfb.Shift(0.2), tfb.Scale(1.2)]), (5,)),
('Chain(Tanh, ScalarAffine), 2d sample',
lambda: Block(Chain([Tanh(), ScalarAffine(0.2, 1.2)]), 1),
lambda: tfb.Chain([tfb.Tanh(), tfb.Shift(0.2), tfb.Scale(1.2)]), (3, 5)),
)
def test_transformed_distribution(
self, dx_bijector_fn, tfp_bijector_fn, sample_shape):
base_dist = tfd.MultivariateNormalDiag(np.zeros((3, 2)), np.ones((3, 2)))
dx_bijector = dx_bijector_fn()
wrapped_bijector = tfp_compatible_bijector(dx_bijector)
tfp_bijector = tfp_bijector_fn()
dist_with_wrapped = tfd.TransformedDistribution(base_dist, wrapped_bijector)
dist_tfp_only = tfd.TransformedDistribution(base_dist, tfp_bijector)
with self.subTest('sample'):
dist_with_wrapped.sample(
seed=jax.random.PRNGKey(0), sample_shape=sample_shape)
with self.subTest('log_prob'):
y = dist_tfp_only.sample(
seed=jax.random.PRNGKey(0), sample_shape=sample_shape)
log_prob_wrapped = dist_with_wrapped.log_prob(y)
log_prob_tfp_only = dist_tfp_only.log_prob(y)
np.testing.assert_allclose(log_prob_wrapped, log_prob_tfp_only, rtol=RTOL)
@parameterized.named_parameters(
('identity', lambda: Lambda(lambda x: x, is_constant_jacobian=True),
tfb.Identity, np.array([1, 1.5, 2], dtype=np.float32)),
('affine', lambda: ScalarAffine(np.ones(3, dtype=np.float32), # pylint: disable=g-long-lambda
np.full(3, 5.5, dtype=np.float32)),
lambda: tfb.Chain([tfb.Shift(np.ones(3, dtype=np.float32)), # pylint: disable=g-long-lambda
tfb.Scale(np.full(3, 5.5, dtype=np.float32))]),
np.array([1, 1.5, 2], dtype=np.float32)),
('tanh', Tanh, tfb.Tanh, np.array([-0.1, 0.01, 0.1], dtype=np.float32)),
('chain(tanh, affine)', lambda: Chain([Tanh(), ScalarAffine(0.2, 1.2)]),
lambda: tfb.Chain([tfb.Tanh(), tfb.Shift(0.2), tfb.Scale(1.2)]),
np.array([-0.1, 0.01, 0.1], dtype=np.float32))
)
def test_chain(self, dx_bijector_fn, tfb_bijector_fn, event):
dx_bij = tfp_compatible_bijector(dx_bijector_fn())
tfp_bij = tfb_bijector_fn()
chain_with_dx = tfb.Chain([tfb.Shift(1.0), tfb.Scale(3.0), dx_bij])
chain_with_tfp = tfb.Chain([tfb.Shift(1.0), tfb.Scale(3.0), tfp_bij])
with self.subTest('forward'):
y_dx = chain_with_dx.forward(event)
y_tfp = chain_with_tfp.forward(event)
np.testing.assert_allclose(y_dx, y_tfp, rtol=RTOL)
with self.subTest('inverse'):
y = chain_with_tfp.forward(event)
x_dx = chain_with_dx.inverse(y)
np.testing.assert_allclose(x_dx, event, rtol=RTOL)
@parameterized.named_parameters(
('identity', lambda: Lambda(lambda x: x, is_constant_jacobian=True),
tfb.Identity, np.array([1, 1.5, 2], dtype=np.float32)),
('affine', lambda: ScalarAffine(np.ones(3, dtype=np.float32), # pylint: disable=g-long-lambda
np.full(3, 5.5, dtype=np.float32)),
lambda: tfb.Chain([tfb.Shift(np.ones(3, dtype=np.float32)), # pylint: disable=g-long-lambda
tfb.Scale(np.full(3, 5.5, dtype=np.float32))]),
np.array([1, 1.5, 2], dtype=np.float32)),
('tanh', Tanh, tfb.Tanh, np.array([-0.1, 0.01, 0.1], dtype=np.float32)),
('chain(tanh, affine)', lambda: Chain([Tanh(), ScalarAffine(0.2, 1.2)]),
lambda: tfb.Chain([tfb.Tanh(), tfb.Shift(0.2), tfb.Scale(1.2)]),
np.array([-0.1, 0.01, 0.1], dtype=np.float32)),
)
def test_invert(self, dx_bijector_fn, tfb_bijector_fn, event):
dx_bij = tfp_compatible_bijector(dx_bijector_fn())
tfp_bij = tfb_bijector_fn()
invert_with_dx = tfb.Invert(dx_bij)
invert_with_tfp = tfb.Invert(tfp_bij)
with self.subTest('forward'):
y_dx = invert_with_dx.forward(event)
y_tfp = invert_with_tfp.forward(event)
np.testing.assert_allclose(y_dx, y_tfp, rtol=RTOL)
with self.subTest('inverse'):
y = invert_with_tfp.forward(event)
x_dx = invert_with_dx.inverse(y)
np.testing.assert_allclose(x_dx, event, rtol=RTOL)
@parameterized.named_parameters(
('identity', lambda: Lambda(lambda x: x, is_constant_jacobian=True),
tfb.Identity, np.array([1, 1.5, 2], dtype=np.float32)),
('affine', lambda: ScalarAffine(np.ones(3, dtype=np.float32), # pylint: disable=g-long-lambda
np.full(3, 5.5, dtype=np.float32)),
lambda: tfb.Chain([tfb.Shift(np.ones(3, dtype=np.float32)), # pylint: disable=g-long-lambda
tfb.Scale(np.full(3, 5.5, dtype=np.float32))]),
np.array([1, 1.5, 2], dtype=np.float32)),
('tanh', Tanh, tfb.Tanh, np.array([-0.1, 0.01, 0.1], dtype=np.float32)),
('chain(tanh, affine)', lambda: Chain([Tanh(), ScalarAffine(0.2, 1.2)]),
lambda: tfb.Chain([tfb.Tanh(), tfb.Shift(0.2), tfb.Scale(1.2)]),
np.array([-0.1, 0.01, 0.1], dtype=np.float32))
)
def test_forward_and_inverse(
self, dx_bijector_fn, tfp_bijector_fn, event):
dx_bij = tfp_compatible_bijector(dx_bijector_fn())
tfp_bij = tfp_bijector_fn()
with self.subTest('forward'):
dx_out = dx_bij.forward(event)
tfp_out = tfp_bij.forward(event)
np.testing.assert_allclose(dx_out, tfp_out, rtol=RTOL)
with self.subTest('inverse'):
y = tfp_bij.forward(event)
dx_out = dx_bij.inverse(y)
tfp_out = tfp_bij.inverse(y)
np.testing.assert_allclose(dx_out, tfp_out, rtol=RTOL)
@parameterized.named_parameters(
('identity', lambda: Lambda(lambda x: x, is_constant_jacobian=True),
tfb.Identity, np.array([1, 1.5, 2], dtype=np.float32)),
('affine', lambda: ScalarAffine(np.ones(3, dtype=np.float32), # pylint: disable=g-long-lambda
np.full(3, 5.5, dtype=np.float32)),
lambda: tfb.Chain([tfb.Shift(np.ones(3, dtype=np.float32)), # pylint: disable=g-long-lambda
tfb.Scale(np.full(3, 5.5, dtype=np.float32))]),
np.array([1, 1.5, 2], dtype=np.float32)),
('tanh', Tanh, tfb.Tanh, np.array([-0.1, 0.01, 0.1], dtype=np.float32)),
('chain(tanh, affine)', lambda: Chain([Tanh(), ScalarAffine(0.2, 1.2)]),
lambda: tfb.Chain([tfb.Tanh(), tfb.Shift(0.2), tfb.Scale(1.2)]),
np.array([-0.1, 0.01, 0.1], dtype=np.float32))
)
def test_log_det_jacobian(self, dx_bijector_fn, tfp_bijector_fn, event):
base_bij = dx_bijector_fn()
dx_bij = tfp_compatible_bijector(base_bij)
tfp_bij = tfp_bijector_fn()
with self.subTest('forward'):
dx_out = dx_bij.forward_log_det_jacobian(
event, event_ndims=base_bij.event_ndims_in)
tfp_out = tfp_bij.forward_log_det_jacobian(
event, event_ndims=base_bij.event_ndims_in)
np.testing.assert_allclose(dx_out, tfp_out, rtol=RTOL)
with self.subTest('inverse'):
y = tfp_bij.forward(event)
dx_out = dx_bij.inverse_log_det_jacobian(
y, event_ndims=base_bij.event_ndims_out)
tfp_out = tfp_bij.inverse_log_det_jacobian(
y, event_ndims=base_bij.event_ndims_out)
np.testing.assert_allclose(dx_out, tfp_out, rtol=RTOL)
with self.subTest('experimental_compute_density_correction'):
dx_out = dx_bij.forward_log_det_jacobian(
event, event_ndims=base_bij.event_ndims_in)
dx_dcorr_out, space = dx_bij.experimental_compute_density_correction(
event, tangent_space=tfp.experimental.tangent_spaces.FullSpace(),
event_ndims=base_bij.event_ndims_in)
np.testing.assert_allclose(dx_out, dx_dcorr_out, rtol=RTOL)
self.assertIsInstance(space, tfp.experimental.tangent_spaces.FullSpace)
@parameterized.named_parameters(
('identity unbatched',
lambda: Lambda(lambda x: x, is_constant_jacobian=True), ()),
('identity 1d-batch',
lambda: Lambda(lambda x: x, is_constant_jacobian=True), (3,)),
('identity 2d-batch',
lambda: Lambda(lambda x: x, is_constant_jacobian=True), (5, 3)),
('affine unbatched', lambda: ScalarAffine(1.0, 5.5), ()),
('affine 1d-batch', lambda: ScalarAffine(1.0, 5.5), (3,)),
('affine 2d-batch', lambda: ScalarAffine(1.0, 5.5), (5, 3)),
('tanh unbatched', Tanh, ()),
('tanh 1d-batch', Tanh, (3,)),
('tanh 2d-batch', Tanh, (5, 3)),
('chain(tanh, affine) unbatched',
lambda: Chain([Tanh(), ScalarAffine(0.2, 1.2)]), ()),
('chain(tanh, affine) 1d-batch',
lambda: Chain([Tanh(), ScalarAffine(0.2, 1.2)]), (3,)),
('chain(tanh, affine) 2d-batch',
lambda: Chain([Tanh(), ScalarAffine(0.2, 1.2)]), (5, 3)),
)
def test_batched_events(self, bij_fn, batch_shape):
base = tfd.MultivariateNormalDiag(np.zeros(batch_shape + (3,)),
np.ones(batch_shape + (3,)))
bij = tfp_compatible_bijector(bij_fn())
dist = tfd.TransformedDistribution(base, bij)
with self.subTest('sample'):
sample = dist.sample(seed=jax.random.PRNGKey(0))
chex.assert_shape(sample, batch_shape + (3,))
with self.subTest('log_prob'):
sample = dist.sample(seed=jax.random.PRNGKey(0))
log_prob = dist.log_prob(sample)
chex.assert_shape(log_prob, batch_shape)
def test_with_different_event_ndims(self):
dx_bij = Lambda(forward=lambda x: x.reshape(x.shape[:-1] + (2, 3)),
inverse=lambda y: y.reshape(y.shape[:-2] + (6,)),
forward_log_det_jacobian=lambda _: 0,
inverse_log_det_jacobian=lambda _: 0,
is_constant_jacobian=True,
event_ndims_in=1, event_ndims_out=2)
tfp_bij = tfp_compatible_bijector(dx_bij)
with self.subTest('forward_event_ndims'):
assert tfp_bij.forward_event_ndims(1) == 2
assert tfp_bij.forward_event_ndims(2) == 3
with self.subTest('inverse_event_ndims'):
assert tfp_bij.inverse_event_ndims(2) == 1
assert tfp_bij.inverse_event_ndims(3) == 2
with self.subTest('forward_event_ndims with incorrect input'):
with self.assertRaises(ValueError):
tfp_bij.forward_event_ndims(0)
with self.subTest('inverse_event_ndims with incorrect input'):
with self.assertRaises(ValueError):
tfp_bij.inverse_event_ndims(0)
with self.assertRaises(ValueError):
tfp_bij.inverse_event_ndims(1)
with self.subTest('forward_event_shape'):
y_shape = tfp_bij.forward_event_shape((6,))
y_shape_tensor = tfp_bij.forward_event_shape_tensor((6,))
self.assertEqual(y_shape, (2, 3))
np.testing.assert_array_equal(y_shape_tensor, jnp.array((2, 3)))
with self.subTest('inverse_event_shape'):
x_shape = tfp_bij.inverse_event_shape((2, 3))
x_shape_tensor = tfp_bij.inverse_event_shape_tensor((2, 3))
self.assertEqual(x_shape, (6,))
np.testing.assert_array_equal(x_shape_tensor, jnp.array((6,)))
with self.subTest('TransformedDistribution with correct event_ndims'):
base = tfd.MultivariateNormalDiag(np.zeros(6), np.ones(6))
dist = tfd.TransformedDistribution(base, tfp_bij)
chex.assert_equal(dist.event_shape, (2, 3))
sample = dist.sample(seed=jax.random.PRNGKey(0))
chex.assert_shape(sample, (2, 3))
log_prob = dist.log_prob(sample)
chex.assert_shape(log_prob, ())
with self.subTest('TransformedDistribution with incorrect event_ndims'):
base = tfd.Normal(np.zeros(6), np.ones(6))
dist = tfd.TransformedDistribution(base, tfp_bij)
with self.assertRaises(ValueError):
_ = dist.event_shape
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/tfp_compatible_bijector_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `diag_linear.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors.diag_linear import DiagLinear
from distrax._src.bijectors.tanh import Tanh
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class DiagLinearTest(parameterized.TestCase):
def test_static_properties(self):
bij = DiagLinear(diag=jnp.ones((4,)))
self.assertTrue(bij.is_constant_jacobian)
self.assertTrue(bij.is_constant_log_det)
self.assertEqual(bij.event_ndims_in, 1)
self.assertEqual(bij.event_ndims_out, 1)
@parameterized.parameters(
{'batch_shape': (), 'dtype': jnp.float16},
{'batch_shape': (2, 3), 'dtype': jnp.float32},
)
def test_properties(self, batch_shape, dtype):
bij = DiagLinear(diag=jnp.ones(batch_shape + (4,), dtype))
self.assertEqual(bij.event_dims, 4)
self.assertEqual(bij.batch_shape, batch_shape)
self.assertEqual(bij.dtype, dtype)
self.assertEqual(bij.diag.shape, batch_shape + (4,))
self.assertEqual(bij.matrix.shape, batch_shape + (4, 4))
self.assertEqual(bij.diag.dtype, dtype)
self.assertEqual(bij.matrix.dtype, dtype)
np.testing.assert_allclose(bij.diag, 1., atol=1e-6)
np.testing.assert_allclose(
bij.matrix, np.tile(np.eye(4), batch_shape + (1, 1)), atol=1e-6)
def test_raises_with_invalid_parameters(self):
with self.assertRaises(ValueError):
DiagLinear(diag=np.ones(()))
@chex.all_variants
@parameterized.parameters(
((5,), (5,)),
((5,), ()),
((), (5,)),
)
def test_batched_parameters(self, diag_batch_shape, input_batch_shape):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
diag = jax.random.uniform(next(prng), diag_batch_shape + (4,)) + 0.5
bij = DiagLinear(diag)
x = jax.random.normal(next(prng), input_batch_shape + (4,))
y, logdet_fwd = self.variant(bij.forward_and_log_det)(x)
z, logdet_inv = self.variant(bij.inverse_and_log_det)(x)
output_batch_shape = jnp.broadcast_shapes(
diag_batch_shape, input_batch_shape)
self.assertEqual(y.shape, output_batch_shape + (4,))
self.assertEqual(z.shape, output_batch_shape + (4,))
self.assertEqual(logdet_fwd.shape, output_batch_shape)
self.assertEqual(logdet_inv.shape, output_batch_shape)
diag = jnp.broadcast_to(diag, output_batch_shape + (4,)).reshape((-1, 4))
x = jnp.broadcast_to(x, output_batch_shape + (4,)).reshape((-1, 4))
y = y.reshape((-1, 4))
z = z.reshape((-1, 4))
logdet_fwd = logdet_fwd.flatten()
logdet_inv = logdet_inv.flatten()
for i in range(np.prod(output_batch_shape)):
bij = DiagLinear(diag=diag[i])
this_y, this_logdet_fwd = self.variant(bij.forward_and_log_det)(x[i])
this_z, this_logdet_inv = self.variant(bij.inverse_and_log_det)(x[i])
np.testing.assert_allclose(this_y, y[i], atol=1e-6)
np.testing.assert_allclose(this_z, z[i], atol=1e-6)
np.testing.assert_allclose(this_logdet_fwd, logdet_fwd[i], atol=1e-6)
np.testing.assert_allclose(this_logdet_inv, logdet_inv[i], atol=1e-6)
@chex.all_variants
@parameterized.parameters(
{'batch_shape': (), 'param_shape': ()},
{'batch_shape': (2, 3), 'param_shape': (3,)},
)
def test_identity_initialization(self, batch_shape, param_shape):
bij = DiagLinear(diag=jnp.ones(param_shape + (4,)))
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
x = jax.random.normal(next(prng), batch_shape + (4,))
# Forward methods.
y, logdet = self.variant(bij.forward_and_log_det)(x)
np.testing.assert_array_equal(y, x)
np.testing.assert_array_equal(logdet, jnp.zeros(batch_shape))
# Inverse methods.
x_rec, logdet = self.variant(bij.inverse_and_log_det)(y)
np.testing.assert_array_equal(x_rec, y)
np.testing.assert_array_equal(logdet, jnp.zeros(batch_shape))
@chex.all_variants
@parameterized.parameters(
{'batch_shape': (), 'param_shape': ()},
{'batch_shape': (2, 3), 'param_shape': (3,)}
)
def test_inverse_methods(self, batch_shape, param_shape):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
diag = jax.random.uniform(next(prng), param_shape + (4,)) + 0.5
bij = DiagLinear(diag)
x = jax.random.normal(next(prng), batch_shape + (4,))
y, logdet_fwd = self.variant(bij.forward_and_log_det)(x)
x_rec, logdet_inv = self.variant(bij.inverse_and_log_det)(y)
np.testing.assert_allclose(x_rec, x, atol=1e-6)
np.testing.assert_allclose(logdet_fwd, -logdet_inv, atol=1e-6)
@chex.all_variants
def test_forward_jacobian_det(self):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
diag = jax.random.uniform(next(prng), (4,)) + 0.5
bij = DiagLinear(diag)
batched_x = jax.random.normal(next(prng), (10, 4))
single_x = jax.random.normal(next(prng), (4,))
batched_logdet = self.variant(bij.forward_log_det_jacobian)(batched_x)
jacobian_fn = jax.jacfwd(bij.forward)
logdet_numerical = jnp.linalg.slogdet(jacobian_fn(single_x))[1]
for logdet in batched_logdet:
np.testing.assert_allclose(logdet, logdet_numerical, atol=5e-4)
@chex.all_variants
def test_inverse_jacobian_det(self):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
diag = jax.random.uniform(next(prng), (4,)) + 0.5
bij = DiagLinear(diag)
batched_y = jax.random.normal(next(prng), (10, 4))
single_y = jax.random.normal(next(prng), (4,))
batched_logdet = self.variant(bij.inverse_log_det_jacobian)(batched_y)
jacobian_fn = jax.jacfwd(bij.inverse)
logdet_numerical = jnp.linalg.slogdet(jacobian_fn(single_y))[1]
for logdet in batched_logdet:
np.testing.assert_allclose(logdet, logdet_numerical, atol=5e-4)
def test_raises_on_invalid_input_shape(self):
bij = DiagLinear(diag=jnp.ones((4,)))
for fn_name, fn in [
('forward', bij.forward),
('inverse', bij.inverse),
('forward_log_det_jacobian', bij.forward_log_det_jacobian),
('inverse_log_det_jacobian', bij.inverse_log_det_jacobian),
('forward_and_log_det', bij.forward_and_log_det),
('inverse_and_log_det', bij.inverse_and_log_det)
]:
with self.subTest(fn_name=fn_name):
with self.assertRaises(ValueError):
fn(jnp.array(0))
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bij = DiagLinear(diag=jnp.ones((4,)))
x = np.zeros((4,))
f(x, bij)
def test_same_as_itself(self):
bij = DiagLinear(diag=jnp.ones((4,)))
self.assertTrue(bij.same_as(bij))
def test_not_same_as_others(self):
bij = DiagLinear(diag=jnp.ones((4,)))
other = DiagLinear(diag=2. * jnp.ones((4,)))
self.assertFalse(bij.same_as(other))
self.assertFalse(bij.same_as(Tanh()))
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/diag_linear_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `masked_coupling.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import bijector as distrax_bijector
from distrax._src.bijectors import block
from distrax._src.bijectors import masked_coupling
import jax
import jax.numpy as jnp
import numpy as np
def _create_masked_coupling_bijector(event_shape, event_ndims=None):
key = jax.random.PRNGKey(101)
return masked_coupling.MaskedCoupling(
mask=jax.random.choice(key, jnp.array([True, False]), event_shape),
conditioner=lambda x: x**2,
bijector=lambda _: lambda x: 2. * x + 3.,
event_ndims=event_ndims)
class MaskedCouplingTest(parameterized.TestCase):
def test_properties(self):
bijector = _create_masked_coupling_bijector((4, 5), None)
ones = jnp.ones((4, 5))
np.testing.assert_allclose(bijector.conditioner(2 * ones), 4 * ones)
assert callable(bijector.bijector(ones))
self.assertEqual(bijector.mask.shape, (4, 5))
@parameterized.named_parameters(
('jnp.float32', jnp.float32),
('jnp.int32', jnp.int32),
('jnp.float64', jnp.float64),
('jnp.int64', jnp.int64),
('jnp.complex64', jnp.complex64),
('jnp.complex128', jnp.complex128),
)
def test_raises_on_invalid_mask_dtype(self, dtype):
with self.assertRaises(ValueError):
masked_coupling.MaskedCoupling(
mask=jnp.zeros((4,), dtype=dtype),
conditioner=lambda x: x,
bijector=lambda _: lambda x: x
)
@chex.all_variants
@parameterized.parameters(
{'event_ndims': None, 'batch_shape': (2, 3)},
{'event_ndims': 0, 'batch_shape': (2, 3, 4, 5)},
{'event_ndims': 1, 'batch_shape': (2, 3, 4)},
{'event_ndims': 2, 'batch_shape': (2, 3)},
{'event_ndims': 3, 'batch_shape': (2,)},
{'event_ndims': 4, 'batch_shape': ()},
)
def test_shapes_are_correct(self, event_ndims, batch_shape):
key = jax.random.PRNGKey(42)
x = jax.random.normal(key, (2, 3, 4, 5))
bijector = _create_masked_coupling_bijector((4, 5), event_ndims)
# Forward methods.
y, logdet = self.variant(bijector.forward_and_log_det)(x)
self.assertEqual(y.shape, (2, 3, 4, 5))
self.assertEqual(logdet.shape, batch_shape)
# Inverse methods.
x, logdet = self.variant(bijector.inverse_and_log_det)(y)
self.assertEqual(x.shape, (2, 3, 4, 5))
self.assertEqual(logdet.shape, batch_shape)
def test_non_default_inner_event_ndims(self):
batch = 2
event_shape = (7, 5, 3)
inner_event_ndims = 1
multipliers = jnp.array([4., 1., 0.5])
class InnerBijector(distrax_bijector.Bijector):
"""A simple inner bijector."""
def __init__(self):
super().__init__(event_ndims_in=inner_event_ndims)
def forward_and_log_det(self, x):
return x * multipliers, jnp.full(x.shape[:-inner_event_ndims],
jnp.sum(jnp.log(multipliers)))
def inverse_and_log_det(self, y):
return y / multipliers, jnp.full(x.shape[:-inner_event_ndims],
-jnp.sum(jnp.log(multipliers)))
bijector = masked_coupling.MaskedCoupling(
mask=jnp.full(event_shape[:-inner_event_ndims], False),
conditioner=lambda x: x,
bijector=lambda _: InnerBijector(),
inner_event_ndims=inner_event_ndims,
event_ndims=len(event_shape))
x = jnp.ones((batch,) + event_shape)
# Test forward.
y, ldj_y = bijector.forward_and_log_det(x)
np.testing.assert_allclose(
y, jnp.tile(multipliers[None, None, None, :], [batch, 7, 5, 1]))
np.testing.assert_allclose(
ldj_y,
np.full([batch],
np.prod(event_shape[:-inner_event_ndims]) *
jnp.sum(jnp.log(multipliers))),
rtol=1e-6)
# Test inverse
z, ldj_z = bijector.inverse_and_log_det(y)
np.testing.assert_allclose(z, x)
np.testing.assert_allclose(ldj_z, -ldj_y)
@chex.all_variants
def test_masking_works(self):
key = jax.random.PRNGKey(42)
x = jax.random.normal(key, (2, 3, 4, 5))
bijector = _create_masked_coupling_bijector((4, 5))
mask = bijector.mask
y = self.variant(bijector.forward)(x)
np.testing.assert_array_equal(mask * y, mask * x)
@chex.all_variants
@parameterized.parameters(
{'event_ndims': None},
{'event_ndims': 0},
{'event_ndims': 1},
{'event_ndims': 2},
{'event_ndims': 3},
{'event_ndims': 4},
)
def test_inverse_methods_are_correct(self, event_ndims):
key = jax.random.PRNGKey(42)
x = jax.random.normal(key, (2, 3, 4, 5))
bijector = _create_masked_coupling_bijector((4, 5), event_ndims)
y, logdet_fwd = self.variant(bijector.forward_and_log_det)(x)
x_rec, logdet_inv = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_allclose(x_rec, x, atol=1e-6)
np.testing.assert_allclose(logdet_fwd, -logdet_inv, atol=1e-6)
@chex.all_variants
@parameterized.parameters(
{'event_ndims': None},
{'event_ndims': 0},
{'event_ndims': 1},
{'event_ndims': 2},
{'event_ndims': 3},
{'event_ndims': 4},
)
def test_composite_methods_are_consistent(self, event_ndims):
key = jax.random.PRNGKey(42)
bijector = _create_masked_coupling_bijector((4, 5), event_ndims)
# Forward methods.
x = jax.random.normal(key, (2, 3, 4, 5))
y1 = self.variant(bijector.forward)(x)
logdet1 = self.variant(bijector.forward_log_det_jacobian)(x)
y2, logdet2 = self.variant(bijector.forward_and_log_det)(x)
np.testing.assert_allclose(y1, y2, atol=1e-8)
np.testing.assert_allclose(logdet1, logdet2, atol=5e-6)
# Inverse methods.
y = jax.random.normal(key, (2, 3, 4, 5))
x1 = self.variant(bijector.inverse)(y)
logdet1 = self.variant(bijector.inverse_log_det_jacobian)(y)
x2, logdet2 = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_allclose(x1, x2, atol=1e-8)
np.testing.assert_allclose(logdet1, logdet2, atol=5e-6)
def test_raises_if_inner_bijector_is_not_scalar(self):
key = jax.random.PRNGKey(101)
event_shape = (2, 3)
bijector = masked_coupling.MaskedCoupling(
mask=jax.random.choice(key, jnp.array([True, False]), event_shape),
conditioner=lambda x: x,
bijector=lambda _: block.Block(lambda x: x, 1))
with self.assertRaisesRegex(ValueError, r'match.*inner_event_ndims'):
bijector.forward_and_log_det(jnp.zeros(event_shape))
with self.assertRaisesRegex(ValueError, r'match.*inner_event_ndims'):
bijector.inverse_and_log_det(jnp.zeros(event_shape))
def test_raises_if_inner_bijector_has_wrong_inner_ndims(self):
key = jax.random.PRNGKey(101)
event_shape = (2, 3, 5)
inner_event_ndims = 2
bijector = masked_coupling.MaskedCoupling(
mask=jax.random.choice(key, jnp.array([True, False]),
event_shape[:-inner_event_ndims]),
event_ndims=len(event_shape),
inner_event_ndims=inner_event_ndims,
conditioner=lambda x: x,
bijector=lambda _: block.Block(lambda x: x, 0))
with self.assertRaisesRegex(ValueError, r'match.*inner_event_ndims'):
bijector.forward_and_log_det(jnp.zeros(event_shape))
with self.assertRaisesRegex(ValueError, r'match.*inner_event_ndims'):
bijector.inverse_and_log_det(jnp.zeros(event_shape))
def test_raises_on_invalid_input_shape(self):
bij = _create_masked_coupling_bijector(event_shape=(2, 3))
for fn in [bij.forward, bij.inverse,
bij.forward_log_det_jacobian, bij.inverse_log_det_jacobian,
bij.forward_and_log_det, bij.inverse_and_log_det]:
with self.assertRaises(ValueError):
fn(jnp.zeros((3,)))
@chex.all_variants
@parameterized.parameters(
((3, 4), (3, 4)),
((3, 4), (3, 1)),
((3, 4), (4,)),
((3, 4), ()),
((3, 1), (3, 4)),
((4,), (3, 4)),
((), (3, 4)),
)
def test_batched_mask(self, mask_batch_shape, input_batch_shape):
def create_bijector(mask):
return masked_coupling.MaskedCoupling(
mask=mask,
conditioner=lambda x: x**2,
bijector=lambda _: lambda x: 2. * x + 3.,
event_ndims=2)
k1, k2 = jax.random.split(jax.random.PRNGKey(42))
mask = jax.random.choice(
k1, jnp.array([True, False]), mask_batch_shape + (5, 6))
bijector = create_bijector(mask)
x = jax.random.uniform(k2, input_batch_shape + (5, 6))
y, logdet_fwd = self.variant(bijector.forward_and_log_det)(x)
z, logdet_inv = self.variant(bijector.inverse_and_log_det)(x)
output_batch_shape = jnp.broadcast_arrays(
mask[..., 0, 0], x[..., 0, 0])[0].shape
self.assertEqual(y.shape, output_batch_shape + (5, 6))
self.assertEqual(z.shape, output_batch_shape + (5, 6))
self.assertEqual(logdet_fwd.shape, output_batch_shape)
self.assertEqual(logdet_inv.shape, output_batch_shape)
mask = jnp.broadcast_to(
mask, output_batch_shape + (5, 6)).reshape((-1, 5, 6))
x = jnp.broadcast_to(x, output_batch_shape + (5, 6)).reshape((-1, 5, 6))
y = y.reshape((-1, 5, 6))
z = z.reshape((-1, 5, 6))
logdet_fwd = logdet_fwd.flatten()
logdet_inv = logdet_inv.flatten()
for i in range(np.prod(output_batch_shape)):
bijector = create_bijector(mask[i])
this_y, this_logdet_fwd = self.variant(bijector.forward_and_log_det)(x[i])
this_z, this_logdet_inv = self.variant(bijector.inverse_and_log_det)(x[i])
np.testing.assert_allclose(this_y, y[i], atol=1e-7)
np.testing.assert_allclose(this_z, z[i], atol=1e-7)
np.testing.assert_allclose(this_logdet_fwd, logdet_fwd[i], atol=1e-5)
np.testing.assert_allclose(this_logdet_inv, logdet_inv[i], atol=1e-5)
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bijector = _create_masked_coupling_bijector((4, 5), event_ndims=None)
x = np.zeros((2, 3, 4, 5))
f(x, bijector)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/masked_coupling_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `scalar_affine.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import scalar_affine
import jax
import jax.numpy as jnp
import numpy as np
class ScalarAffineTest(parameterized.TestCase):
def test_properties(self):
bij = scalar_affine.ScalarAffine(shift=0., scale=1.)
self.assertTrue(bij.is_constant_jacobian)
self.assertTrue(bij.is_constant_log_det)
np.testing.assert_allclose(bij.shift, 0.)
np.testing.assert_allclose(bij.scale, 1.)
np.testing.assert_allclose(bij.log_scale, 0.)
def test_raises_if_both_scale_and_log_scale_are_specified(self):
with self.assertRaises(ValueError):
scalar_affine.ScalarAffine(shift=0., scale=1., log_scale=0.)
@chex.all_variants
def test_shapes_are_correct(self):
k1, k2, k3, k4 = jax.random.split(jax.random.PRNGKey(42), 4)
x = jax.random.normal(k1, (2, 3, 4, 5))
shift = jax.random.normal(k2, (4, 5))
scale = jax.random.uniform(k3, (3, 4, 5)) + 0.1
log_scale = jax.random.normal(k4, (3, 4, 5))
bij_no_scale = scalar_affine.ScalarAffine(shift)
bij_with_scale = scalar_affine.ScalarAffine(shift, scale=scale)
bij_with_log_scale = scalar_affine.ScalarAffine(shift, log_scale=log_scale)
for bij in [bij_no_scale, bij_with_scale, bij_with_log_scale]:
# Forward methods.
y, logdet = self.variant(bij.forward_and_log_det)(x)
self.assertEqual(y.shape, (2, 3, 4, 5))
self.assertEqual(logdet.shape, (2, 3, 4, 5))
# Inverse methods.
x, logdet = self.variant(bij.inverse_and_log_det)(y)
self.assertEqual(x.shape, (2, 3, 4, 5))
self.assertEqual(logdet.shape, (2, 3, 4, 5))
@chex.all_variants
def test_forward_methods_are_correct(self):
key = jax.random.PRNGKey(42)
x = jax.random.normal(key, (2, 3, 4, 5))
bij_no_scale = scalar_affine.ScalarAffine(shift=3.)
bij_with_scale = scalar_affine.ScalarAffine(shift=3., scale=1.)
bij_with_log_scale = scalar_affine.ScalarAffine(shift=3., log_scale=0.)
for bij in [bij_no_scale, bij_with_scale, bij_with_log_scale]:
y, logdet = self.variant(bij.forward_and_log_det)(x)
np.testing.assert_allclose(y, x + 3., atol=1e-8)
np.testing.assert_allclose(logdet, 0., atol=1e-8)
@chex.all_variants
def test_inverse_methods_are_correct(self):
k1, k2, k3, k4 = jax.random.split(jax.random.PRNGKey(42), 4)
x = jax.random.normal(k1, (2, 3, 4, 5))
shift = jax.random.normal(k2, (4, 5))
scale = jax.random.uniform(k3, (3, 4, 5)) + 0.1
log_scale = jax.random.normal(k4, (3, 4, 5))
bij_no_scale = scalar_affine.ScalarAffine(shift)
bij_with_scale = scalar_affine.ScalarAffine(shift, scale=scale)
bij_with_log_scale = scalar_affine.ScalarAffine(shift, log_scale=log_scale)
for bij in [bij_no_scale, bij_with_scale, bij_with_log_scale]:
y, logdet_fwd = self.variant(bij.forward_and_log_det)(x)
x_rec, logdet_inv = self.variant(bij.inverse_and_log_det)(y)
np.testing.assert_allclose(x_rec, x, atol=1e-5)
np.testing.assert_allclose(logdet_fwd, -logdet_inv, atol=3e-6)
@chex.all_variants
def test_composite_methods_are_consistent(self):
k1, k2, k3, k4 = jax.random.split(jax.random.PRNGKey(42), 4)
bij = scalar_affine.ScalarAffine(
shift=jax.random.normal(k1, (4, 5)),
log_scale=jax.random.normal(k2, (4, 5)))
# Forward methods.
x = jax.random.normal(k3, (2, 3, 4, 5))
y1 = self.variant(bij.forward)(x)
logdet1 = self.variant(bij.forward_log_det_jacobian)(x)
y2, logdet2 = self.variant(bij.forward_and_log_det)(x)
np.testing.assert_allclose(y1, y2, atol=1e-12)
np.testing.assert_allclose(logdet1, logdet2, atol=1e-12)
# Inverse methods.
y = jax.random.normal(k4, (2, 3, 4, 5))
x1 = self.variant(bij.inverse)(y)
logdet1 = self.variant(bij.inverse_log_det_jacobian)(y)
x2, logdet2 = self.variant(bij.inverse_and_log_det)(y)
np.testing.assert_allclose(x1, x2, atol=1e-12)
np.testing.assert_allclose(logdet1, logdet2, atol=1e-12)
@chex.all_variants
@parameterized.parameters(
((5,), (5,), (5,)),
((5,), (5,), ()),
((5,), (), (5,)),
((), (5,), (5,)),
((), (), (5,)),
((), (5,), ()),
((5,), (), ()),
)
def test_batched_parameters(self, scale_batch_shape, shift_batch_shape,
input_batch_shape):
k1, k2, k3 = jax.random.split(jax.random.PRNGKey(42), 3)
log_scale = jax.random.normal(k1, scale_batch_shape)
shift = jax.random.normal(k2, shift_batch_shape)
bijector = scalar_affine.ScalarAffine(shift, log_scale=log_scale)
x = jax.random.normal(k3, input_batch_shape)
y, logdet_fwd = self.variant(bijector.forward_and_log_det)(x)
z, logdet_inv = self.variant(bijector.inverse_and_log_det)(x)
output_batch_shape = jnp.broadcast_arrays(log_scale, shift, x)[0].shape
self.assertEqual(y.shape, output_batch_shape)
self.assertEqual(z.shape, output_batch_shape)
self.assertEqual(logdet_fwd.shape, output_batch_shape)
self.assertEqual(logdet_inv.shape, output_batch_shape)
log_scale = jnp.broadcast_to(log_scale, output_batch_shape).flatten()
shift = jnp.broadcast_to(shift, output_batch_shape).flatten()
x = jnp.broadcast_to(x, output_batch_shape).flatten()
y = y.flatten()
z = z.flatten()
logdet_fwd = logdet_fwd.flatten()
logdet_inv = logdet_inv.flatten()
for i in range(np.prod(output_batch_shape)):
bijector = scalar_affine.ScalarAffine(shift[i], jnp.exp(log_scale[i]))
this_y, this_logdet_fwd = self.variant(bijector.forward_and_log_det)(x[i])
this_z, this_logdet_inv = self.variant(bijector.inverse_and_log_det)(x[i])
np.testing.assert_allclose(this_y, y[i], atol=1e-7)
np.testing.assert_allclose(this_z, z[i], atol=1e-5)
np.testing.assert_allclose(this_logdet_fwd, logdet_fwd[i], atol=1e-4)
np.testing.assert_allclose(this_logdet_inv, logdet_inv[i], atol=1e-4)
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bijector = scalar_affine.ScalarAffine(0, 1)
x = np.zeros(())
f(x, bijector)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/scalar_affine_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LU-decomposed affine bijector."""
from distrax._src.bijectors import bijector as base
from distrax._src.bijectors import block
from distrax._src.bijectors import chain
from distrax._src.bijectors import shift
from distrax._src.bijectors import triangular_linear
from distrax._src.bijectors import unconstrained_affine
import jax.numpy as jnp
Array = base.Array
class LowerUpperTriangularAffine(chain.Chain):
"""An affine bijector whose weight matrix is parameterized as A = LU.
This bijector is defined as `f(x) = Ax + b` where:
* A = LU is a DxD matrix.
* L is a lower-triangular matrix with ones on the diagonal.
* U is an upper-triangular matrix.
The Jacobian determinant can be computed in O(D) as follows:
log|det J(x)| = log|det A| = sum(log|diag(U)|)
The inverse can be computed in O(D^2) by solving two triangular systems:
* Lz = y - b
* Ux = z
The bijector is invertible if and only if all diagonal elements of U are
non-zero. It is the responsibility of the user to make sure that this is the
case; the class will make no attempt to verify that the bijector is
invertible.
L and U are parameterized using a square matrix M as follows:
* The lower-triangular part of M (excluding the diagonal) becomes L.
* The upper-triangular part of M (including the diagonal) becomes U.
The parameterization is such that if M is the identity, LU is also the
identity. Note however that M is not generally equal to LU.
"""
def __init__(self, matrix: Array, bias: Array):
"""Initializes a `LowerUpperTriangularAffine` bijector.
Args:
matrix: a square matrix parameterizing `L` and `U` as described in the
class docstring. Can also be a batch of matrices. If `matrix` is the
identity, `LU` is also the identity. Note however that `matrix` is
generally not equal to the product `LU`.
bias: the vector `b` in `LUx + b`. Can also be a batch of vectors.
"""
unconstrained_affine.check_affine_parameters(matrix, bias)
self._upper = triangular_linear.TriangularLinear(matrix, is_lower=False)
dim = matrix.shape[-1]
lower = jnp.eye(dim) + jnp.tril(matrix, -1) # Replace diagonal with ones.
self._lower = triangular_linear.TriangularLinear(lower, is_lower=True)
self._shift = block.Block(shift.Shift(bias), 1)
self._bias = bias
super().__init__([self._shift, self._lower, self._upper])
@property
def lower(self) -> Array:
"""The lower triangular matrix `L` with ones in the diagonal."""
return self._lower.matrix
@property
def upper(self) -> Array:
"""The upper triangular matrix `U`."""
return self._upper.matrix
@property
def matrix(self) -> Array:
"""The matrix `A = LU` of the transformation."""
return self.lower @ self.upper
@property
def bias(self) -> Array:
"""The shift `b` of the transformation."""
return self._bias
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is LowerUpperTriangularAffine: # pylint: disable=unidiomatic-typecheck
return all((
self.lower is other.lower,
self.upper is other.upper,
self.bias is other.bias,
))
return False
| distrax-master | distrax/_src/bijectors/lower_upper_triangular_affine.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Shift bijector."""
from typing import Tuple, Union
from distrax._src.bijectors import bijector as base
import jax
import jax.numpy as jnp
Array = base.Array
Numeric = Union[Array, float]
class Shift(base.Bijector):
"""Bijector that translates its input elementwise.
The bijector is defined as follows:
- Forward: `y = x + shift`
- Forward Jacobian determinant: `log|det J(x)| = 0`
- Inverse: `x = y - shift`
- Inverse Jacobian determinant: `log|det J(y)| = 0`
where `shift` parameterizes the bijector.
"""
def __init__(self, shift: Numeric):
"""Initializes a `Shift` bijector.
Args:
shift: the bijector's shift parameter. Can also be batched.
"""
super().__init__(event_ndims_in=0, is_constant_jacobian=True)
self._shift = shift
self._batch_shape = jnp.shape(self._shift)
@property
def shift(self) -> Numeric:
"""The bijector's shift."""
return self._shift
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
return x + self._shift
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
batch_shape = jax.lax.broadcast_shapes(self._batch_shape, x.shape)
return jnp.zeros(batch_shape, dtype=x.dtype)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return self.forward(x), self.forward_log_det_jacobian(x)
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
return y - self._shift
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
return self.forward_log_det_jacobian(y)
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
return self.inverse(y), self.inverse_log_det_jacobian(y)
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is Shift: # pylint: disable=unidiomatic-typecheck
return self.shift is other.shift
return False
| distrax-master | distrax/_src/bijectors/shift.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `split_coupling.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import bijector as base_bijector
from distrax._src.bijectors import block
from distrax._src.bijectors import split_coupling
import jax
import jax.numpy as jnp
import numpy as np
def _create_split_coupling_bijector(split_index,
split_axis=-1,
swap=False,
event_ndims=2):
return split_coupling.SplitCoupling(
split_index=split_index,
split_axis=split_axis,
event_ndims=event_ndims,
swap=swap,
conditioner=lambda x: x**2,
bijector=lambda _: lambda x: 2. * x + 3.)
class DummyBijector(base_bijector.Bijector):
def forward_and_log_det(self, x):
super()._check_forward_input_shape(x)
return x, jnp.zeros((x.shape[:-self.event_ndims_in]), dtype=jnp.float_)
class SplitCouplingTest(parameterized.TestCase):
def test_properties(self):
bijector = _create_split_coupling_bijector(
split_index=0, swap=False, split_axis=-1, event_ndims=2)
ones = jnp.ones((4, 5))
self.assertEqual(bijector.split_index, 0)
self.assertEqual(bijector.split_axis, -1)
self.assertFalse(bijector.swap)
np.testing.assert_allclose(
bijector.conditioner(2 * ones), 4 * ones, atol=1e-4)
assert callable(bijector.bijector(ones))
@parameterized.named_parameters(
('negative split_index', {'split_index': -1, 'event_ndims': 0}),
('positive split_axis',
{'split_index': 0, 'event_ndims': 0, 'split_axis': 3}),
('negative event_ndims', {'split_index': 0, 'event_ndims': -1}),
('invalid split_axis',
{'split_index': 0, 'event_ndims': 1, 'split_axis': -2}),
)
def test_invalid_properties(self, bij_params):
bij_params.update(
{'conditioner': lambda x: x, 'bijector': lambda _: lambda x: x})
with self.assertRaises(ValueError):
split_coupling.SplitCoupling(**bij_params)
def test_raises_on_bijector_with_different_event_ndims(self):
inner_bij = lambda _: DummyBijector(1, 0, False, False)
bij_params = {'split_index': 0, 'event_ndims': 1,
'conditioner': lambda x: x, 'bijector': inner_bij}
bij = split_coupling.SplitCoupling(**bij_params)
with self.assertRaises(ValueError):
bij.forward_and_log_det(jnp.zeros((4, 3)))
def test_raises_on_bijector_with_extra_event_ndims(self):
inner_bij = lambda _: DummyBijector(2, 2, False, False)
bij_params = {'split_index': 0, 'event_ndims': 1,
'conditioner': lambda x: x, 'bijector': inner_bij}
bij = split_coupling.SplitCoupling(**bij_params)
with self.assertRaises(ValueError):
bij.forward_and_log_det(jnp.zeros((4, 3)))
@chex.all_variants
@parameterized.parameters(
{'split_index': 0, 'split_axis': -1, 'swap': False},
{'split_index': 3, 'split_axis': -1, 'swap': False},
{'split_index': 5, 'split_axis': -1, 'swap': False},
{'split_index': 0, 'split_axis': -2, 'swap': False},
{'split_index': 2, 'split_axis': -2, 'swap': False},
{'split_index': 4, 'split_axis': -2, 'swap': False},
{'split_index': 0, 'split_axis': -1, 'swap': True},
{'split_index': 3, 'split_axis': -1, 'swap': True},
{'split_index': 5, 'split_axis': -1, 'swap': True},
{'split_index': 0, 'split_axis': -2, 'swap': True},
{'split_index': 2, 'split_axis': -2, 'swap': True},
{'split_index': 4, 'split_axis': -2, 'swap': True},
)
def test_shapes_are_correct(self, split_index, split_axis, swap):
key = jax.random.PRNGKey(42)
x = jax.random.normal(key, (2, 3, 4, 5))
bijector = _create_split_coupling_bijector(
split_index, split_axis, swap, event_ndims=2)
# Forward methods.
y, logdet = self.variant(bijector.forward_and_log_det)(x)
self.assertEqual(y.shape, (2, 3, 4, 5))
self.assertEqual(logdet.shape, (2, 3))
# Inverse methods.
x, logdet = self.variant(bijector.inverse_and_log_det)(y)
self.assertEqual(x.shape, (2, 3, 4, 5))
self.assertEqual(logdet.shape, (2, 3))
@chex.all_variants
def test_swapping_works(self):
key = jax.random.PRNGKey(42)
x = jax.random.normal(key, (2, 3, 4, 5))
# Don't swap.
bijector = _create_split_coupling_bijector(
split_index=3, split_axis=-1, swap=False)
y = self.variant(bijector.forward)(x)
np.testing.assert_array_equal(y[..., :3], x[..., :3])
# Swap.
bijector = _create_split_coupling_bijector(
split_index=3, split_axis=-1, swap=True)
y = self.variant(bijector.forward)(x)
np.testing.assert_array_equal(y[..., 3:], x[..., 3:])
# Don't swap.
bijector = _create_split_coupling_bijector(
split_index=3, split_axis=-2, swap=False)
y = self.variant(bijector.forward)(x)
np.testing.assert_array_equal(y[..., :3, :], x[..., :3, :])
# Swap.
bijector = _create_split_coupling_bijector(
split_index=3, split_axis=-2, swap=True)
y = self.variant(bijector.forward)(x)
np.testing.assert_array_equal(y[..., 3:, :], x[..., 3:, :])
@chex.all_variants
@parameterized.parameters(
{'split_index': 0, 'split_axis': -1, 'swap': False},
{'split_index': 3, 'split_axis': -1, 'swap': False},
{'split_index': 5, 'split_axis': -1, 'swap': False},
{'split_index': 0, 'split_axis': -2, 'swap': False},
{'split_index': 2, 'split_axis': -2, 'swap': False},
{'split_index': 4, 'split_axis': -2, 'swap': False},
{'split_index': 0, 'split_axis': -1, 'swap': True},
{'split_index': 3, 'split_axis': -1, 'swap': True},
{'split_index': 5, 'split_axis': -1, 'swap': True},
{'split_index': 0, 'split_axis': -2, 'swap': True},
{'split_index': 2, 'split_axis': -2, 'swap': True},
{'split_index': 4, 'split_axis': -2, 'swap': True},
)
def test_inverse_methods_are_correct(self, split_index, split_axis, swap):
key = jax.random.PRNGKey(42)
x = jax.random.normal(key, (2, 3, 4, 5))
bijector = _create_split_coupling_bijector(
split_index, split_axis, swap, event_ndims=2)
y, logdet_fwd = self.variant(bijector.forward_and_log_det)(x)
x_rec, logdet_inv = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_allclose(x_rec, x, atol=1e-6)
np.testing.assert_allclose(logdet_fwd, -logdet_inv, atol=1e-6)
@chex.all_variants
@parameterized.parameters(
{'split_index': 0, 'split_axis': -1, 'swap': False},
{'split_index': 3, 'split_axis': -1, 'swap': False},
{'split_index': 5, 'split_axis': -1, 'swap': False},
{'split_index': 0, 'split_axis': -2, 'swap': False},
{'split_index': 2, 'split_axis': -2, 'swap': False},
{'split_index': 4, 'split_axis': -2, 'swap': False},
{'split_index': 0, 'split_axis': -1, 'swap': True},
{'split_index': 3, 'split_axis': -1, 'swap': True},
{'split_index': 5, 'split_axis': -1, 'swap': True},
{'split_index': 0, 'split_axis': -2, 'swap': True},
{'split_index': 2, 'split_axis': -2, 'swap': True},
{'split_index': 4, 'split_axis': -2, 'swap': True},
)
def test_composite_methods_are_consistent(self, split_index, split_axis,
swap):
key = jax.random.PRNGKey(42)
bijector = _create_split_coupling_bijector(
split_index, split_axis, swap, event_ndims=2)
# Forward methods.
x = jax.random.normal(key, (2, 3, 4, 5))
y1 = self.variant(bijector.forward)(x)
logdet1 = self.variant(bijector.forward_log_det_jacobian)(x)
y2, logdet2 = self.variant(bijector.forward_and_log_det)(x)
np.testing.assert_allclose(y1, y2, atol=1e-8)
np.testing.assert_allclose(logdet1, logdet2, atol=1e-8)
# Inverse methods.
y = jax.random.normal(key, (2, 3, 4, 5))
x1 = self.variant(bijector.inverse)(y)
logdet1 = self.variant(bijector.inverse_log_det_jacobian)(y)
x2, logdet2 = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_allclose(x1, x2, atol=1e-8)
np.testing.assert_allclose(logdet1, logdet2, atol=1e-8)
def test_raises_on_invalid_input_shape(self):
event_shape = (2, 3)
bij = split_coupling.SplitCoupling(
split_index=event_shape[-1] // 2,
event_ndims=len(event_shape),
conditioner=lambda x: x,
bijector=lambda _: lambda x: x)
for fn in [bij.forward, bij.inverse,
bij.forward_log_det_jacobian, bij.inverse_log_det_jacobian,
bij.forward_and_log_det, bij.inverse_and_log_det]:
with self.assertRaises(ValueError):
fn(jnp.zeros((3,)))
def test_raises_on_invalid_inner_bijector(self):
event_shape = (2, 3)
bij = split_coupling.SplitCoupling(
split_index=event_shape[-1] // 2,
event_ndims=len(event_shape),
conditioner=lambda x: x,
bijector=lambda _: block.Block(lambda x: x, len(event_shape) + 1))
for fn in [bij.forward, bij.inverse,
bij.forward_log_det_jacobian, bij.inverse_log_det_jacobian,
bij.forward_and_log_det, bij.inverse_and_log_det]:
with self.assertRaises(ValueError):
fn(jnp.zeros(event_shape))
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bijector = _create_split_coupling_bijector(0, -1, False, event_ndims=2)
x = np.zeros((2, 3, 4, 5))
f(x, bijector)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/split_coupling_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `shift.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors.shift import Shift
from distrax._src.bijectors.tanh import Tanh
import jax
import jax.numpy as jnp
import numpy as np
class ShiftTest(parameterized.TestCase):
def test_jacobian_is_constant_property(self):
bijector = Shift(jnp.ones((4,)))
self.assertTrue(bijector.is_constant_jacobian)
self.assertTrue(bijector.is_constant_log_det)
def test_properties(self):
bijector = Shift(jnp.array([1., 2., 3.]))
np.testing.assert_array_equal(bijector.shift, np.array([1., 2., 3.]))
@chex.all_variants
@parameterized.parameters(
{'batch_shape': (), 'param_shape': ()},
{'batch_shape': (3,), 'param_shape': ()},
{'batch_shape': (), 'param_shape': (3,)},
{'batch_shape': (2, 3), 'param_shape': (2, 3)},
)
def test_forward_methods(self, batch_shape, param_shape):
bijector = Shift(jnp.ones(param_shape))
prng = jax.random.PRNGKey(42)
x = jax.random.normal(prng, batch_shape)
output_shape = jnp.broadcast_shapes(batch_shape, param_shape)
y1 = self.variant(bijector.forward)(x)
logdet1 = self.variant(bijector.forward_log_det_jacobian)(x)
y2, logdet2 = self.variant(bijector.forward_and_log_det)(x)
self.assertEqual(y1.shape, output_shape)
self.assertEqual(y2.shape, output_shape)
self.assertEqual(logdet1.shape, output_shape)
self.assertEqual(logdet2.shape, output_shape)
np.testing.assert_allclose(y1, x + 1., 1e-6)
np.testing.assert_allclose(y2, x + 1., 1e-6)
np.testing.assert_allclose(logdet1, 0., 1e-6)
np.testing.assert_allclose(logdet2, 0., 1e-6)
@chex.all_variants
@parameterized.parameters(
{'batch_shape': (), 'param_shape': ()},
{'batch_shape': (3,), 'param_shape': ()},
{'batch_shape': (), 'param_shape': (3,)},
{'batch_shape': (2, 3), 'param_shape': (2, 3)},
)
def test_inverse_methods(self, batch_shape, param_shape):
bijector = Shift(jnp.ones(param_shape))
prng = jax.random.PRNGKey(42)
y = jax.random.normal(prng, batch_shape)
output_shape = jnp.broadcast_shapes(batch_shape, param_shape)
x1 = self.variant(bijector.inverse)(y)
logdet1 = self.variant(bijector.inverse_log_det_jacobian)(y)
x2, logdet2 = self.variant(bijector.inverse_and_log_det)(y)
self.assertEqual(x1.shape, output_shape)
self.assertEqual(x2.shape, output_shape)
self.assertEqual(logdet1.shape, output_shape)
self.assertEqual(logdet2.shape, output_shape)
np.testing.assert_allclose(x1, y - 1., 1e-6)
np.testing.assert_allclose(x2, y - 1., 1e-6)
np.testing.assert_allclose(logdet1, 0., 1e-6)
np.testing.assert_allclose(logdet2, 0., 1e-6)
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bij = Shift(jnp.ones((4,)))
x = np.zeros((4,))
f(x, bij)
def test_same_as_itself(self):
bij = Shift(jnp.ones((4,)))
self.assertTrue(bij.same_as(bij))
def test_not_same_as_others(self):
bij = Shift(jnp.ones((4,)))
other = Shift(jnp.zeros((4,)))
self.assertFalse(bij.same_as(other))
self.assertFalse(bij.same_as(Tanh()))
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/shift_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `lambda_bijector.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import lambda_bijector
from distrax._src.distributions import normal
from distrax._src.distributions import transformed
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
RTOL = 1e-2
def _with_additional_parameters(params, all_named_parameters):
"""Convenience function for appending a cartesian product of parameters."""
for name, param in params:
for named_params in all_named_parameters:
yield (f'{named_params[0]}; {name}',) + named_params[1:] + (param,)
def _with_base_dists(*all_named_parameters):
"""Partial of _with_additional_parameters to specify distrax and TFP base."""
base_dists = (
('tfp_base', tfd.Normal),
('distrax_base', normal.Normal),
)
return _with_additional_parameters(base_dists, all_named_parameters)
class LambdaTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.seed = jax.random.PRNGKey(1234)
@parameterized.named_parameters(_with_base_dists(
('1d std normal', 0, 1),
('2d std normal', np.zeros(2), np.ones(2)),
('broadcasted loc', 0, np.ones(3)),
('broadcasted scale', np.ones(3), 1),
))
def test_event_shape(self, mu, sigma, base_dist):
base = base_dist(mu, sigma)
bijector = lambda_bijector.Lambda(jnp.tanh)
dist = transformed.Transformed(base, bijector)
tfp_bijector = tfb.Tanh()
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
assert dist.event_shape == tfp_dist.event_shape
def test_raises_on_both_none(self):
with self.assertRaises(ValueError):
lambda_bijector.Lambda(forward=None, inverse=None)
def test_raises_on_log_det_without_event_ndims(self):
with self.assertRaises(ValueError):
lambda_bijector.Lambda(
forward=lambda x: x,
forward_log_det_jacobian=lambda x: jnp.zeros_like(x[:-1]),
event_ndims_in=None)
@parameterized.named_parameters(
('event_ndims_in', 0, None),
('event_ndims_out', None, 0),
('event_ndims_in and event_ndims_out', 0, 0),
)
def test_raises_on_event_ndims_without_log_det(self, ndims_in, ndims_out):
with self.assertRaises(ValueError):
lambda_bijector.Lambda(
forward=lambda x: x,
event_ndims_in=ndims_in,
event_ndims_out=ndims_out)
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('1d std normal, no shape', 0, 1, ()),
('1d std normal, int shape', 0, 1, 1),
('1d std normal, 1-tuple shape', 0, 1, (1,)),
('1d std normal, 2-tuple shape', 0, 1, (2, 2)),
('2d std normal, no shape', np.zeros(2), np.ones(2), ()),
('2d std normal, int shape', [0, 0], [1, 1], 1),
('2d std normal, 1-tuple shape', np.zeros(2), np.ones(2), (1,)),
('2d std normal, 2-tuple shape', [0, 0], [1, 1], (2, 2)),
('rank 2 std normal, 2-tuple shape', np.zeros(
(3, 2)), np.ones((3, 2)), (2, 2)),
('broadcasted loc', 0, np.ones(3), (2, 2)),
('broadcasted scale', np.ones(3), 1, ()),
))
def test_sample_shape(self, mu, sigma, sample_shape, base_dist):
base = base_dist(mu, sigma)
bijector = lambda_bijector.Lambda(jnp.tanh)
dist = transformed.Transformed(base, bijector)
def sample_fn(seed, sample_shape):
return dist.sample(seed=seed, sample_shape=sample_shape)
samples = self.variant(sample_fn, ignore_argnums=(1,), static_argnums=1)(
self.seed, sample_shape)
tfp_bijector = tfb.Tanh()
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
tfp_samples = tfp_dist.sample(sample_shape=sample_shape,
seed=self.seed)
chex.assert_equal_shape([samples, tfp_samples])
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('1d dist, 1d value', 0, 1, 0.5),
('1d dist, 2d value', 0., 1., np.array([0.25, 0.5])),
('2d dist, 1d value', np.zeros(2), np.ones(2), 0.5),
('2d broadcasted dist, 1d value', np.zeros(2), 1, 0.5),
('2d dist, 2d value', np.zeros(2), np.ones(2), np.array([0.25, 0.5])),
('1d dist, 1d value, edge case', 0, 1, 0.99),
))
def test_log_prob(self, mu, sigma, value, base_dist):
base = base_dist(mu, sigma)
bijector = lambda_bijector.Lambda(jnp.tanh)
dist = transformed.Transformed(base, bijector)
actual = self.variant(dist.log_prob)(value)
tfp_bijector = tfb.Tanh()
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
expected = tfp_dist.log_prob(value)
np.testing.assert_allclose(actual, expected, rtol=RTOL)
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('1d dist, 1d value', 0, 1, 0.5),
('1d dist, 2d value', 0., 1., np.array([0.25, 0.5])),
('2d dist, 1d value', np.zeros(2), np.ones(2), 0.5),
('2d broadcasted dist, 1d value', np.zeros(2), 1, 0.5),
('2d dist, 2d value', np.zeros(2), np.ones(2), np.array([0.25, 0.5])),
('1d dist, 1d value, edge case', 0, 1, 0.99),
))
def test_prob(self, mu, sigma, value, base_dist):
base = base_dist(mu, sigma)
bijector = lambda_bijector.Lambda(jnp.tanh)
dist = transformed.Transformed(base, bijector)
actual = self.variant(dist.prob)(value)
tfp_bijector = tfb.Tanh()
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
expected = tfp_dist.prob(value)
np.testing.assert_allclose(actual, expected, rtol=RTOL)
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('1d std normal, no shape', 0, 1, ()),
('1d std normal, int shape', 0, 1, 1),
('1d std normal, 1-tuple shape', 0, 1, (1,)),
('1d std normal, 2-tuple shape', 0, 1, (2, 2)),
('2d std normal, no shape', np.zeros(2), np.ones(2), ()),
('2d std normal, int shape', [0, 0], [1, 1], 1),
('2d std normal, 1-tuple shape', np.zeros(2), np.ones(2), (1,)),
('2d std normal, 2-tuple shape', [0, 0], [1, 1], (2, 2)),
('rank 2 std normal, 2-tuple shape', np.zeros(
(3, 2)), np.ones((3, 2)), (2, 2)),
('broadcasted loc', 0, np.ones(3), (2, 2)),
('broadcasted scale', np.ones(3), 1, ()),
))
def test_sample_and_log_prob(self, mu, sigma, sample_shape, base_dist):
base = base_dist(mu, sigma)
bijector = lambda_bijector.Lambda(lambda x: 10 * jnp.tanh(0.1 * x))
dist = transformed.Transformed(base, bijector)
def sample_and_log_prob_fn(seed, sample_shape):
return dist.sample_and_log_prob(seed=seed, sample_shape=sample_shape)
samples, log_prob = self.variant(
sample_and_log_prob_fn, ignore_argnums=(1,), static_argnums=(1,))(
self.seed, sample_shape)
expected_samples = bijector.forward(
base.sample(seed=self.seed, sample_shape=sample_shape))
tfp_bijector = tfb.Chain([tfb.Scale(10), tfb.Tanh(), tfb.Scale(0.1)])
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
tfp_samples = tfp_dist.sample(seed=self.seed, sample_shape=sample_shape)
tfp_log_prob = tfp_dist.log_prob(samples)
chex.assert_equal_shape([samples, tfp_samples])
np.testing.assert_allclose(log_prob, tfp_log_prob, rtol=RTOL)
np.testing.assert_allclose(samples, expected_samples, rtol=RTOL)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(_with_base_dists(
('entropy', 'entropy', 0., 1.),
('mean', 'mean', 0, 1),
('mean from list params', 'mean', [-1, 1], [1, 2]),
('mode', 'mode', 0, 1),
))
def test_method(self, function_string, mu, sigma, base_dist):
base = base_dist(mu, sigma)
bijector = lambda_bijector.Lambda(lambda x: x + 3)
dist = transformed.Transformed(base, bijector)
tfp_bijector = tfb.Shift(3)
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector)
np.testing.assert_allclose(self.variant(getattr(dist, function_string))(),
getattr(tfp_dist, function_string)(), rtol=RTOL)
@chex.all_variants(with_jit=False) # no need to jit function transformations
@parameterized.named_parameters(
('identity', lambda x: x, tfb.Identity),
('tanh', jnp.tanh, tfb.Tanh),
('scale', lambda x: 3.0 * x, lambda: tfb.Scale(3.0)),
('shift', lambda x: x + 2.0, lambda: tfb.Shift(2.0)),
('exp', jnp.exp, tfb.Exp),
('softplus', lambda x: jnp.log1p(jnp.exp(x)), tfb.Softplus),
('square', jnp.square, tfb.Square),
)
def test_log_dets(self, lambda_bjct, tfp_bijector_fn):
bijector = lambda_bijector.Lambda(lambda_bjct)
tfp_bijector = tfp_bijector_fn()
x = np.array([0.05, 0.3, 0.45], dtype=np.float32)
fldj = tfp_bijector.forward_log_det_jacobian(x, event_ndims=0)
fldj_ = self.variant(bijector.forward_log_det_jacobian)(x)
np.testing.assert_allclose(fldj_, fldj, rtol=RTOL)
y = bijector.forward(x) # pytype: disable=wrong-arg-types # jax-ndarray
ildj = tfp_bijector.inverse_log_det_jacobian(y, event_ndims=0)
ildj_ = self.variant(bijector.inverse_log_det_jacobian)(y)
np.testing.assert_allclose(ildj_, ildj, rtol=RTOL)
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('identity', lambda x: x, tfb.Identity),
('tanh', jnp.tanh, tfb.Tanh),
('scale', lambda x: 3.0 * x, lambda: tfb.Scale(3.0)),
('shift', lambda x: x + 2.0, lambda: tfb.Shift(2.0)),
('exp', jnp.exp, tfb.Exp),
('softplus', lambda x: jnp.log1p(jnp.exp(x)), tfb.Softplus),
('square', jnp.square, tfb.Square),
))
def test_against_tfp_bijectors(
self, lambda_bjct, tfp_bijector, base_dist):
mu = np.array([-1.0, 0.0, 1.0], dtype=np.float32)
sigma = np.array([0.5, 1.0, 2.5], dtype=np.float32)
base = base_dist(mu, sigma)
bijector = lambda_bijector.Lambda(lambda_bjct)
dist = transformed.Transformed(base, bijector)
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector())
y = np.array([0.05, 0.3, 0.95], dtype=np.float32)
lp_y = tfp_dist.log_prob(y)
lp_y_ = self.variant(dist.log_prob)(y)
np.testing.assert_allclose(lp_y_, lp_y, rtol=RTOL)
p_y = tfp_dist.prob(y)
p_y_ = self.variant(dist.prob)(y)
np.testing.assert_allclose(p_y_, p_y, rtol=RTOL)
@chex.all_variants
@parameterized.named_parameters(_with_base_dists(
('identity', lambda x: x, tfb.Identity),
('tanh', jnp.tanh, tfb.Tanh),
('scale', lambda x: 3.0 * x, lambda: tfb.Scale(3.0)),
))
def test_auto_lambda(
self, forward_fn, tfp_bijector, base_dist):
mu = np.array([-1.0, 0.0, 1.0], dtype=np.float32)
sigma = np.array([0.5, 1.0, 2.5], dtype=np.float32)
base = base_dist(mu, sigma)
dist = transformed.Transformed(base, forward_fn)
tfp_dist = tfd.TransformedDistribution(
conversion.to_tfp(base), tfp_bijector())
y = np.array([0.05, 0.3, 0.95], dtype=np.float32)
lp_y = tfp_dist.log_prob(y)
lp_y_ = self.variant(dist.log_prob)(y)
np.testing.assert_allclose(lp_y_, lp_y, rtol=RTOL)
p_y = tfp_dist.prob(y)
p_y_ = self.variant(dist.prob)(y)
np.testing.assert_allclose(p_y_, p_y, rtol=RTOL)
def test_raises_on_invalid_input_shape(self):
bij = lambda_bijector.Lambda(
forward=lambda x: x,
inverse=lambda y: y,
forward_log_det_jacobian=lambda x: jnp.zeros_like(x[:-1]),
inverse_log_det_jacobian=lambda y: jnp.zeros_like(y[:-1]),
event_ndims_in=1)
for fn in [bij.forward, bij.inverse,
bij.forward_log_det_jacobian, bij.inverse_log_det_jacobian,
bij.forward_and_log_det, bij.inverse_and_log_det]:
with self.assertRaises(ValueError):
fn(jnp.array(0))
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bijector = lambda_bijector.Lambda(lambda x: x)
x = np.zeros(())
f(x, bijector)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/lambda_bijector_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distrax adapter for Bijectors from TensorFlow Probability."""
from typing import Callable, Tuple
from distrax._src.bijectors import bijector as base
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfb = tfp.bijectors
Array = base.Array
class BijectorFromTFP(base.Bijector):
"""Wrapper around a TFP bijector that turns it into a Distrax bijector.
TFP bijectors and Distrax bijectors have similar but not identical semantics,
which makes them not directly compatible. This wrapper guarantees that the
wrapepd TFP bijector fully satisfies the semantics of Distrax, which enables
any TFP bijector to be used by Distrax without modification.
"""
def __init__(self, tfp_bijector: tfb.Bijector):
"""Initializes a BijectorFromTFP.
Args:
tfp_bijector: TFP bijector to convert to Distrax bijector.
"""
self._tfp_bijector = tfp_bijector
super().__init__(
event_ndims_in=tfp_bijector.forward_min_event_ndims,
event_ndims_out=tfp_bijector.inverse_min_event_ndims,
is_constant_jacobian=tfp_bijector.is_constant_jacobian)
def __getattr__(self, name: str):
return getattr(self._tfp_bijector, name)
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
return self._tfp_bijector.forward(x)
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
return self._tfp_bijector.inverse(y)
def _ensure_batch_shape(self,
logdet: Array,
event_ndims_out: int,
forward_fn: Callable[[Array], Array],
x: Array) -> Array:
"""Broadcasts logdet to the batch shape as required."""
if self._tfp_bijector.is_constant_jacobian:
# If the Jacobian is constant, TFP may return a log det that doesn't have
# full batch shape, but is broadcastable to it. Distrax assumes that the
# log det is always batch-shaped, so we broadcast.
y_shape = jax.eval_shape(forward_fn, x).shape
if event_ndims_out == 0:
batch_shape = y_shape
else:
batch_shape = y_shape[:-event_ndims_out]
logdet = jnp.broadcast_to(logdet, batch_shape)
return logdet
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
logdet = self._tfp_bijector.forward_log_det_jacobian(x, self.event_ndims_in)
logdet = self._ensure_batch_shape(
logdet, self.event_ndims_out, self._tfp_bijector.forward, x)
return logdet
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
logdet = self._tfp_bijector.inverse_log_det_jacobian(
y, self.event_ndims_out)
logdet = self._ensure_batch_shape(
logdet, self.event_ndims_in, self._tfp_bijector.inverse, y)
return logdet
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
y = self._tfp_bijector.forward(x)
logdet = self._tfp_bijector.forward_log_det_jacobian(x, self.event_ndims_in)
logdet = self._ensure_batch_shape(
logdet, self.event_ndims_out, self._tfp_bijector.forward, x)
return y, logdet
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
x = self._tfp_bijector.inverse(y)
logdet = self._tfp_bijector.inverse_log_det_jacobian(
y, self.event_ndims_out)
logdet = self._ensure_batch_shape(
logdet, self.event_ndims_in, self._tfp_bijector.inverse, y)
return x, logdet
@property
def name(self) -> str:
"""Name of the bijector."""
return self._tfp_bijector.name
| distrax-master | distrax/_src/bijectors/bijector_from_tfp.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Split coupling bijector."""
from typing import Any, Callable, Tuple
from distrax._src.bijectors import bijector as base
from distrax._src.bijectors import block
from distrax._src.utils import conversion
import jax.numpy as jnp
Array = base.Array
BijectorParams = Any
class SplitCoupling(base.Bijector):
"""Split coupling bijector, with arbitrary conditioner & inner bijector.
This coupling bijector splits the input array into two parts along a specified
axis. One part remains unchanged, whereas the other part is transformed by an
inner bijector conditioned on the unchanged part.
Let `f` be a conditional bijector (the inner bijector) and `g` be a function
(the conditioner). For `swap=False`, the split coupling bijector is defined as
follows:
- Forward:
```
x = [x1, x2]
y1 = x1
y2 = f(x2; g(x1))
y = [y1, y2]
```
- Forward Jacobian log determinant:
```
x = [x1, x2]
log|det J(x)| = log|det df/dx2(x2; g(x1))|
```
- Inverse:
```
y = [y1, y2]
x1 = y1
x2 = f^{-1}(y2; g(y1))
x = [x1, x2]
```
- Inverse Jacobian log determinant:
```
y = [y1, y2]
log|det J(y)| = log|det df^{-1}/dy2(y2; g(y1))|
```
Here, `[x1, x2]` is a partition of `x` along some axis. By default, `x1`
remains unchanged and `x2` is transformed. If `swap=True`, `x2` will remain
unchanged and `x1` will be transformed.
"""
def __init__(self,
split_index: int,
event_ndims: int,
conditioner: Callable[[Array], BijectorParams],
bijector: Callable[[BijectorParams], base.BijectorLike],
swap: bool = False,
split_axis: int = -1):
"""Initializes a SplitCoupling bijector.
Args:
split_index: the index used to split the input. The input array will be
split along the axis specified by `split_axis` into two parts. The first
part will correspond to indices up to `split_index` (non-inclusive),
whereas the second part will correspond to indices starting from
`split_index` (inclusive).
event_ndims: the number of event dimensions the bijector operates on. The
`event_ndims_in` and `event_ndims_out` of the coupling bijector are both
equal to `event_ndims`.
conditioner: a function that computes the parameters of the inner bijector
as a function of the unchanged part of the input. The output of the
conditioner will be passed to `bijector` in order to obtain the inner
bijector.
bijector: a callable that returns the inner bijector that will be used to
transform one of the two parts. The input to `bijector` is a set of
parameters that can be used to configure the inner bijector. The
`event_ndims_in` and `event_ndims_out` of the inner bijector must be
equal, and less than or equal to `event_ndims`. If they are less than
`event_ndims`, the remaining dimensions will be converted to event
dimensions using `distrax.Block`.
swap: by default, the part of the input up to `split_index` is the one
that remains unchanged. If `swap` is True, then the other part remains
unchanged and the first one is transformed instead.
split_axis: the axis along which to split the input. Must be negative,
that is, it must index from the end. By default, it's the last axis.
"""
if split_index < 0:
raise ValueError(
f'The split index must be non-negative; got {split_index}.')
if split_axis >= 0:
raise ValueError(f'The split axis must be negative; got {split_axis}.')
if event_ndims < 0:
raise ValueError(
f'`event_ndims` must be non-negative; got {event_ndims}.')
if split_axis < -event_ndims:
raise ValueError(
f'The split axis points to an axis outside the event. With '
f'`event_ndims == {event_ndims}`, the split axis must be between -1 '
f'and {-event_ndims}. Got `split_axis == {split_axis}`.')
self._split_index = split_index
self._conditioner = conditioner
self._bijector = bijector
self._swap = swap
self._split_axis = split_axis
super().__init__(event_ndims_in=event_ndims)
@property
def bijector(self) -> Callable[[BijectorParams], base.BijectorLike]:
"""The callable that returns the inner bijector of `SplitCoupling`."""
return self._bijector
@property
def conditioner(self) -> Callable[[Array], BijectorParams]:
"""The conditioner function."""
return self._conditioner
@property
def split_index(self) -> int:
"""The index used to split the input."""
return self._split_index
@property
def swap(self) -> bool:
"""The flag that determines which part of the input remains unchanged."""
return self._swap
@property
def split_axis(self) -> int:
"""The axis along which to split the input."""
return self._split_axis
def _split(self, x: Array) -> Tuple[Array, Array]:
x1, x2 = jnp.split(x, [self._split_index], self._split_axis)
if self._swap:
x1, x2 = x2, x1
return x1, x2
def _recombine(self, x1: Array, x2: Array) -> Array:
if self._swap:
x1, x2 = x2, x1
return jnp.concatenate([x1, x2], self._split_axis)
def _inner_bijector(self, params: BijectorParams) -> base.Bijector:
"""Returns an inner bijector for the passed params."""
bijector = conversion.as_bijector(self._bijector(params))
if bijector.event_ndims_in != bijector.event_ndims_out:
raise ValueError(
f'The inner bijector must have `event_ndims_in==event_ndims_out`. '
f'Instead, it has `event_ndims_in=={bijector.event_ndims_in}` and '
f'`event_ndims_out=={bijector.event_ndims_out}`.')
extra_ndims = self.event_ndims_in - bijector.event_ndims_in
if extra_ndims < 0:
raise ValueError(
f'The inner bijector can\'t have more event dimensions than the '
f'coupling bijector. Got {bijector.event_ndims_in} for the inner '
f'bijector and {self.event_ndims_in} for the coupling bijector.')
elif extra_ndims > 0:
bijector = block.Block(bijector, extra_ndims)
return bijector
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
self._check_forward_input_shape(x)
x1, x2 = self._split(x)
params = self._conditioner(x1)
y2, logdet = self._inner_bijector(params).forward_and_log_det(x2)
return self._recombine(x1, y2), logdet
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
self._check_inverse_input_shape(y)
y1, y2 = self._split(y)
params = self._conditioner(y1)
x2, logdet = self._inner_bijector(params).inverse_and_log_det(y2)
return self._recombine(y1, x2), logdet
| distrax-master | distrax/_src/bijectors/split_coupling.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper to adapt a Distrax bijector for use in TFP."""
from typing import Any, Optional, Tuple
import chex
from distrax._src.bijectors import bijector
from distrax._src.utils import math
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
Array = chex.Array
Bijector = bijector.Bijector
TangentSpace = tfp.experimental.tangent_spaces.TangentSpace
def tfp_compatible_bijector(
base_bijector: Bijector,
name: Optional[str] = None):
"""Create a TFP-compatible bijector from a Distrax bijector.
Given a Distrax bijector, return a wrapped bijector that behaves as a TFP
bijector, to be used in TFP meta-bijectors and the TransformedDistribution.
In particular, the wrapped bijector implements the methods
`[forward|inverse]_event_ndims`, `[forward|inverse]_event_shape`,
`[forward|inverse]_event_shape_tensor`, `[forward|inverse]_log_det_jacobian`,
and the properties `[forward|inverse]_min_event_ndims`. Other attributes are
delegated to the `base_bijector`.
The methods of the resulting object do not take a `name` argument,
unlike their TFP equivalents.
The `shape` methods are implemented by tracing the `forward` and `inverse`
methods of the bijector, applied to a zero tensor of the requested dtype. If
the `forward` or `inverse` methods are not traceable or cannot be applied to a
zero tensor, then we cannot guarantee the correctness of the result.
Args:
base_bijector: A Distrax bijector.
name: The bijector name.
Returns:
An object that behaves like a TFP bijector.
"""
name_ = name
class TFPCompatibleBijector(base_bijector.__class__):
"""Class to wrap a Distrax bijector."""
def __init__(self):
self._is_injective = True
self._is_permutation = False
self._parts_interact = False
self.dtype = None
self.has_static_min_event_ndims = True
self.forward_min_event_ndims = base_bijector.event_ndims_in
self.inverse_min_event_ndims = base_bijector.event_ndims_out
def __getattr__(self, name: str):
return getattr(base_bijector, name)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""See `Bijector.forward_and_log_det`."""
return base_bijector.forward_and_log_det(x)
@property
def name(self) -> str:
"""The name of the wrapped bijector."""
return name_ or f"TFPCompatible{base_bijector.name}"
def experimental_batch_shape(self, x_event_ndims=None, y_event_ndims=None):
raise NotImplementedError()
def experimental_batch_shape_tensor(
self, x_event_ndims=None, y_event_ndims=None):
raise NotImplementedError()
def forward_dtype(self, _: jnp.dtype) -> None:
"""Returns None, making no promise regarding dtypes."""
return None
def inverse_dtype(self, _: jnp.dtype) -> None:
"""Returns None, making no promise regarding dtypes."""
return None
def forward_event_ndims(self, event_ndims: int) -> int:
"""Returns the number of event dimensions of the output of `forward`."""
extra_event_ndims = self._check_ndims(
"Forward", event_ndims, base_bijector.event_ndims_in)
return base_bijector.event_ndims_out + extra_event_ndims
def inverse_event_ndims(self, event_ndims: int) -> int:
"""Returns the number of event dimensions of the output of `inverse`."""
extra_event_ndims = self._check_ndims(
"Inverse", event_ndims, base_bijector.event_ndims_out)
return base_bijector.event_ndims_in + extra_event_ndims
def forward_event_shape(self, event_shape) -> tfp.tf2jax.TensorShape:
"""Returns the shape of the output of `forward` as a `TensorShape`."""
self._check_shape("Forward", event_shape, base_bijector.event_ndims_in)
forward_event_shape = jax.eval_shape(
base_bijector.forward, jnp.zeros(event_shape)).shape
return tfp.tf2jax.TensorShape(forward_event_shape)
def inverse_event_shape(self, event_shape) -> tfp.tf2jax.TensorShape:
"""Returns the shape of the output of `inverse` as a `TensorShape`."""
self._check_shape("Inverse", event_shape, base_bijector.event_ndims_out)
inverse_event_shape = jax.eval_shape(
base_bijector.inverse, jnp.zeros(event_shape)).shape
return tfp.tf2jax.TensorShape(inverse_event_shape)
def forward_event_shape_tensor(self, event_shape) -> Array:
"""Returns the shape of the output of `forward` as a `jnp.array`."""
self._check_shape("Forward", event_shape, base_bijector.event_ndims_in)
forward_event_shape = jax.eval_shape(
base_bijector.forward, jnp.zeros(event_shape)).shape
return jnp.array(forward_event_shape, dtype=jnp.int32)
def inverse_event_shape_tensor(self, event_shape) -> Array:
"""Returns the shape of the output of `inverse` as a `jnp.array`."""
self._check_shape("Inverse", event_shape, base_bijector.event_ndims_out)
inverse_event_shape = jax.eval_shape(
base_bijector.inverse, jnp.zeros(event_shape)).shape
return jnp.array(inverse_event_shape, dtype=jnp.int32)
def forward_log_det_jacobian(
self, x: Array, event_ndims: Optional[int] = None) -> Array:
"""See `Bijector.forward_log_det_jacobian`."""
extra_event_ndims = self._check_ndims(
"Forward", event_ndims, base_bijector.event_ndims_in)
fldj = base_bijector.forward_log_det_jacobian(x)
return math.sum_last(fldj, extra_event_ndims)
def inverse_log_det_jacobian(
self, y: Array, event_ndims: Optional[int] = None) -> Array:
"""See `Bijector.inverse_log_det_jacobian`."""
extra_event_ndims = self._check_ndims(
"Inverse", event_ndims, base_bijector.event_ndims_out)
ildj = base_bijector.inverse_log_det_jacobian(y)
return math.sum_last(ildj, extra_event_ndims)
def _check_ndims(
self, direction: str, event_ndims: int, expected_ndims: int) -> int:
"""Checks that `event_ndims` are correct and returns any extra ndims."""
if event_ndims is not None and event_ndims < expected_ndims:
raise ValueError(f"{direction} `event_ndims` of {self.name} must be at "
f"least {expected_ndims} but was passed {event_ndims} "
f"instead.")
return 0 if event_ndims is None else event_ndims - expected_ndims
def _check_shape(
self, direction: str, event_shape: Any, expected_ndims: int):
"""Checks that `event_shape` is correct, raising ValueError otherwise."""
if len(event_shape) < expected_ndims:
raise ValueError(f"{direction} `event_shape` of {self.name} must have "
f"at least {expected_ndims} dimensions, but was "
f"{event_shape} which has only {len(event_shape)} "
f"dimensions instead.")
def experimental_compute_density_correction(
self,
x: Array,
tangent_space: TangentSpace,
backward_compat: bool = True,
**kwargs) -> Tuple[Array, TangentSpace]:
"""Density correction for this transform wrt the tangent space, at x.
See `tfp.bijectors.experimental_compute_density_correction`, and
Radul and Alexeev, AISTATS 2021, “The Base Measure Problem and its
Solution”, https://arxiv.org/abs/2010.09647.
Args:
x: `float` or `double` `Array`.
tangent_space: `TangentSpace` or one of its subclasses. The tangent to
the support manifold at `x`.
backward_compat: unused
**kwargs: Optional keyword arguments forwarded to tangent space methods.
Returns:
density_correction: `Array` representing the density correction---in log
space---under the transformation that this Bijector denotes. Assumes
the Bijector is dimension-preserving.
space: `TangentSpace` representing the new tangent to the support
manifold, at `x`.
"""
del backward_compat
# We ignore the `backward_compat` flag and always act as though it's
# true because Distrax bijectors and distributions need not follow the
# base measure protocol from TFP. This implies that we expect to return
# the `FullSpace` tangent space.
return tangent_space.transform_dimension_preserving(x, self, **kwargs)
return TFPCompatibleBijector()
| distrax-master | distrax/_src/bijectors/tfp_compatible_bijector.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `bijector.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import bijector
import jax
import jax.numpy as jnp
import numpy as np
class DummyBijector(bijector.Bijector):
def forward_and_log_det(self, x):
super()._check_forward_input_shape(x)
return x, jnp.zeros(x.shape[:-1], jnp.float_)
def inverse_and_log_det(self, y):
super()._check_inverse_input_shape(y)
return y, jnp.zeros(y.shape[:-1], jnp.float_)
class BijectorTest(parameterized.TestCase):
@parameterized.named_parameters(
('negative ndims_in', -1, 1, False, False),
('negative ndims_out', 1, -1, False, False),
('non-consistent constant properties', 1, 1, True, False),
)
def test_invalid_parameters(self, ndims_in, ndims_out, cnst_jac, cnst_logdet):
with self.assertRaises(ValueError):
DummyBijector(ndims_in, ndims_out, cnst_jac, cnst_logdet)
@chex.all_variants
@parameterized.parameters('forward', 'inverse')
def test_invalid_inputs(self, method_str):
bij = DummyBijector(1, 1, True, True)
fn = self.variant(getattr(bij, method_str))
with self.assertRaises(ValueError):
fn(jnp.zeros(()))
def test_jittable(self):
@jax.jit
def forward(bij, x):
return bij.forward(x)
bij = DummyBijector(1, 1, True, True)
x = jnp.zeros((4,))
np.testing.assert_allclose(forward(bij, x), x)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/bijector_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper to turn independent Bijectors into block Bijectors."""
from typing import Tuple
from distrax._src.bijectors import bijector as base
from distrax._src.utils import conversion
from distrax._src.utils import math
Array = base.Array
BijectorLike = base.BijectorLike
BijectorT = base.BijectorT
class Block(base.Bijector):
"""A wrapper that promotes a bijector to a block bijector.
A block bijector applies a bijector to a k-dimensional array of events, but
considers that array of events to be a single event. In practical terms, this
means that the log det Jacobian will be summed over its last k dimensions.
For example, consider a scalar bijector (such as `Tanh`) that operates on
scalar events. We may want to apply this bijector identically to a 4D array of
shape [N, H, W, C] representing a sequence of N images. Doing so naively will
produce a log det Jacobian of shape [N, H, W, C], because the scalar bijector
will assume scalar events and so all 4 dimensions will be considered as batch.
To promote the scalar bijector to a "block scalar" that operates on the 3D
arrays can be done by `Block(bijector, ndims=3)`. Then, applying the block
bijector will produce a log det Jacobian of shape [N] as desired.
In general, suppose `bijector` operates on n-dimensional events. Then,
`Block(bijector, k)` will promote `bijector` to a block bijector that
operates on (k + n)-dimensional events, summing the log det Jacobian over its
last k dimensions. In practice, this means that the last k batch dimensions
will be turned into event dimensions.
"""
def __init__(self, bijector: BijectorLike, ndims: int):
"""Initializes a Block.
Args:
bijector: the bijector to be promoted to a block bijector. It can be a
distrax bijector, a TFP bijector, or a callable to be wrapped by
`Lambda`.
ndims: number of batch dimensions to promote to event dimensions.
"""
if ndims < 0:
raise ValueError(f"`ndims` must be non-negative; got {ndims}.")
self._bijector = conversion.as_bijector(bijector)
self._ndims = ndims
super().__init__(
event_ndims_in=ndims + self._bijector.event_ndims_in,
event_ndims_out=ndims + self._bijector.event_ndims_out,
is_constant_jacobian=self._bijector.is_constant_jacobian,
is_constant_log_det=self._bijector.is_constant_log_det)
@property
def bijector(self) -> BijectorT:
"""The base bijector, without promoting to a block bijector."""
return self._bijector
@property
def ndims(self) -> int:
"""The number of batch dimensions promoted to event dimensions."""
return self._ndims
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
self._check_forward_input_shape(x)
return self._bijector.forward(x)
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
self._check_inverse_input_shape(y)
return self._bijector.inverse(y)
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
self._check_forward_input_shape(x)
log_det = self._bijector.forward_log_det_jacobian(x)
return math.sum_last(log_det, self._ndims)
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
self._check_inverse_input_shape(y)
log_det = self._bijector.inverse_log_det_jacobian(y)
return math.sum_last(log_det, self._ndims)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
self._check_forward_input_shape(x)
y, log_det = self._bijector.forward_and_log_det(x)
return y, math.sum_last(log_det, self._ndims)
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
self._check_inverse_input_shape(y)
x, log_det = self._bijector.inverse_and_log_det(y)
return x, math.sum_last(log_det, self._ndims)
@property
def name(self) -> str:
"""Name of the bijector."""
return self.__class__.__name__ + self._bijector.name
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is Block: # pylint: disable=unidiomatic-typecheck
return self.bijector.same_as(other.bijector) and self.ndims == other.ndims
return False
| distrax-master | distrax/_src/bijectors/block.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `triangular_linear.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors.tanh import Tanh
from distrax._src.bijectors.triangular_linear import TriangularLinear
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class TriangularLinearTest(parameterized.TestCase):
def test_static_properties(self):
bij = TriangularLinear(matrix=jnp.eye(4))
self.assertTrue(bij.is_constant_jacobian)
self.assertTrue(bij.is_constant_log_det)
self.assertEqual(bij.event_ndims_in, 1)
self.assertEqual(bij.event_ndims_out, 1)
@parameterized.parameters(
{'batch_shape': (), 'dtype': jnp.float16, 'is_lower': True},
{'batch_shape': (2, 3), 'dtype': jnp.float32, 'is_lower': False},
)
def test_properties(self, batch_shape, dtype, is_lower):
bij = TriangularLinear(
matrix=jnp.ones(batch_shape + (4, 4), dtype), is_lower=is_lower)
self.assertEqual(bij.event_dims, 4)
self.assertEqual(bij.batch_shape, batch_shape)
self.assertEqual(bij.dtype, dtype)
self.assertEqual(bij.matrix.shape, batch_shape + (4, 4))
self.assertEqual(bij.matrix.dtype, dtype)
tri = np.tril if is_lower else np.triu
np.testing.assert_allclose(
bij.matrix, np.tile(tri(np.ones((4, 4))), batch_shape + (1, 1)),
atol=1e-6)
self.assertEqual(bij.is_lower, is_lower)
@parameterized.named_parameters(
('matrix is 0d', {'matrix': np.zeros(())}),
('matrix is 1d', {'matrix': np.zeros((4,))}),
('matrix is not square', {'matrix': np.zeros((3, 4))}),
)
def test_raises_with_invalid_parameters(self, bij_params):
with self.assertRaises(ValueError):
TriangularLinear(**bij_params)
@chex.all_variants
@parameterized.parameters(
((5,), (5,)),
((5,), ()),
((), (5,)),
)
def test_batched_parameters(self, matrix_batch_shape, input_batch_shape):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
matrix = jax.random.uniform(
next(prng), matrix_batch_shape + (4, 4)) + jnp.eye(4)
bijector = TriangularLinear(matrix)
x = jax.random.normal(next(prng), input_batch_shape + (4,))
y, logdet_fwd = self.variant(bijector.forward_and_log_det)(x)
z, logdet_inv = self.variant(bijector.inverse_and_log_det)(x)
output_batch_shape = jnp.broadcast_arrays(
matrix[..., 0, 0], x[..., 0])[0].shape
self.assertEqual(y.shape, output_batch_shape + (4,))
self.assertEqual(z.shape, output_batch_shape + (4,))
self.assertEqual(logdet_fwd.shape, output_batch_shape)
self.assertEqual(logdet_inv.shape, output_batch_shape)
matrix = jnp.broadcast_to(
matrix, output_batch_shape + (4, 4)).reshape((-1, 4, 4))
x = jnp.broadcast_to(x, output_batch_shape + (4,)).reshape((-1, 4))
y = y.reshape((-1, 4))
z = z.reshape((-1, 4))
logdet_fwd = logdet_fwd.flatten()
logdet_inv = logdet_inv.flatten()
for i in range(np.prod(output_batch_shape)):
bijector = TriangularLinear(matrix[i])
this_y, this_logdet_fwd = self.variant(bijector.forward_and_log_det)(x[i])
this_z, this_logdet_inv = self.variant(bijector.inverse_and_log_det)(x[i])
np.testing.assert_allclose(this_y, y[i], rtol=8e-3)
np.testing.assert_allclose(this_z, z[i], atol=7e-6)
np.testing.assert_allclose(this_logdet_fwd, logdet_fwd[i], atol=1e-7)
np.testing.assert_allclose(this_logdet_inv, logdet_inv[i], atol=7e-6)
@chex.all_variants
@parameterized.parameters(
{'batch_shape': (), 'is_lower': True},
{'batch_shape': (3,), 'is_lower': True},
{'batch_shape': (2, 3), 'is_lower': False},
)
def test_identity_initialization(self, batch_shape, is_lower):
bijector = TriangularLinear(matrix=jnp.eye(4), is_lower=is_lower)
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
x = jax.random.normal(next(prng), batch_shape + (4,))
# Forward methods.
y, logdet = self.variant(bijector.forward_and_log_det)(x)
np.testing.assert_allclose(y, x, 8e-3)
np.testing.assert_array_equal(logdet, jnp.zeros(batch_shape))
# Inverse methods.
x_rec, logdet = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_array_equal(x_rec, y)
np.testing.assert_array_equal(logdet, jnp.zeros(batch_shape))
@chex.all_variants
@parameterized.parameters(
{'batch_shape': (), 'param_shape': (), 'is_lower': True},
{'batch_shape': (3,), 'param_shape': (3,), 'is_lower': True},
{'batch_shape': (2, 3), 'param_shape': (3,), 'is_lower': False}
)
def test_inverse_methods(self, batch_shape, param_shape, is_lower):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
matrix = jax.random.uniform(next(prng), param_shape + (4, 4)) + jnp.eye(4)
bijector = TriangularLinear(matrix, is_lower)
x = jax.random.normal(next(prng), batch_shape + (4,))
y, logdet_fwd = self.variant(bijector.forward_and_log_det)(x)
x_rec, logdet_inv = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_allclose(x_rec, x, atol=9e-3)
np.testing.assert_array_equal(logdet_fwd, -logdet_inv)
@chex.all_variants
@parameterized.parameters(True, False)
def test_forward_jacobian_det(self, is_lower):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
matrix = jax.random.uniform(next(prng), (4, 4)) + jnp.eye(4)
bijector = TriangularLinear(matrix, is_lower)
batched_x = jax.random.normal(next(prng), (10, 4))
single_x = jax.random.normal(next(prng), (4,))
batched_logdet = self.variant(bijector.forward_log_det_jacobian)(batched_x)
jacobian_fn = jax.jacfwd(bijector.forward)
logdet_numerical = jnp.linalg.slogdet(jacobian_fn(single_x))[1]
for logdet in batched_logdet:
np.testing.assert_allclose(logdet, logdet_numerical, atol=5e-3)
@chex.all_variants
@parameterized.parameters(True, False)
def test_inverse_jacobian_det(self, is_lower):
prng = hk.PRNGSequence(jax.random.PRNGKey(42))
matrix = jax.random.uniform(next(prng), (4, 4)) + jnp.eye(4)
bijector = TriangularLinear(matrix, is_lower)
batched_y = jax.random.normal(next(prng), (10, 4))
single_y = jax.random.normal(next(prng), (4,))
batched_logdet = self.variant(bijector.inverse_log_det_jacobian)(batched_y)
jacobian_fn = jax.jacfwd(bijector.inverse)
logdet_numerical = jnp.linalg.slogdet(jacobian_fn(single_y))[1]
for logdet in batched_logdet:
np.testing.assert_allclose(logdet, logdet_numerical, atol=5e-5)
def test_raises_on_invalid_input_shape(self):
bij = TriangularLinear(matrix=jnp.eye(4))
for fn in [bij.forward, bij.inverse,
bij.forward_log_det_jacobian, bij.inverse_log_det_jacobian,
bij.forward_and_log_det, bij.inverse_and_log_det]:
with self.assertRaises(ValueError):
fn(jnp.array(0))
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bij = TriangularLinear(matrix=jnp.eye(4))
x = np.zeros((4,))
f(x, bij)
def test_same_as_itself(self):
bij = TriangularLinear(matrix=jnp.eye(4))
self.assertTrue(bij.same_as(bij))
def test_not_same_as_others(self):
bij = TriangularLinear(matrix=jnp.eye(4))
other = TriangularLinear(matrix=jnp.ones((4, 4)))
self.assertFalse(bij.same_as(other))
self.assertFalse(bij.same_as(Tanh()))
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/triangular_linear_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `rational_quadratic_spline.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.bijectors import rational_quadratic_spline
import jax
import jax.numpy as jnp
import numpy as np
def _make_bijector(params_shape,
zero_params=False,
num_bins=8,
range_min=0.,
range_max=1.,
boundary_slopes='unconstrained'):
"""Returns RationalQuadraticSpline bijector."""
params_shape += (3 * num_bins + 1,)
if zero_params:
params = jnp.zeros(params_shape)
else:
key = jax.random.PRNGKey(101)
params = jax.random.normal(key, params_shape)
return rational_quadratic_spline.RationalQuadraticSpline(
params,
range_min=range_min,
range_max=range_max,
boundary_slopes=boundary_slopes)
class RationalQuadraticSplineTest(parameterized.TestCase):
"""Tests for rational quadratic spline."""
def test_properties(self):
bijector = _make_bijector(params_shape=(4, 5), num_bins=8)
self.assertEqual(bijector.is_constant_jacobian, False)
self.assertEqual(bijector.is_constant_log_det, False)
assert bijector.num_bins == 8
self.assertEqual(bijector.knot_slopes.shape, (4, 5, 9))
self.assertEqual(bijector.x_pos.shape, (4, 5, 9))
self.assertEqual(bijector.y_pos.shape, (4, 5, 9))
@parameterized.named_parameters(
('params.shape[-1] < 4',
{'params': np.zeros((3,)), 'range_min': 0., 'range_max': 1.}),
('params.shape[-1] % 3 is not 1',
{'params': np.zeros((8,)), 'range_min': 0., 'range_max': 1.}),
('inconsistent range min and max',
{'params': np.zeros((10,)), 'range_min': 1., 'range_max': 0.9}),
('negative min_bin_size',
{'params': np.zeros((10,)), 'range_min': 0., 'range_max': 1.,
'min_bin_size': -0.1}),
('negative min_knot_slope',
{'params': np.zeros((10,)), 'range_min': 0., 'range_max': 1.,
'min_knot_slope': -0.1}),
('min_knot_slope above 1',
{'params': np.zeros((10,)), 'range_min': 0., 'range_max': 1.,
'min_knot_slope': 1.3}),
('invalid boundary_slopes',
{'params': np.zeros((10,)), 'range_min': 0., 'range_max': 1.,
'boundary_slopes': 'invalid_value'}),
('num_bins * min_bin_size greater than total_size',
{'params': np.zeros((10,)), 'range_min': 0., 'range_max': 1.,
'min_bin_size': 0.9}),
)
def test_invalid_properties(self, bij_params):
with self.assertRaises(ValueError):
rational_quadratic_spline.RationalQuadraticSpline(**bij_params)
@chex.all_variants
def test_shapes_are_correct(self):
key = jax.random.PRNGKey(42)
x = jax.random.normal(key, (2, 3, 4, 5))
bijector = _make_bijector(params_shape=(4, 5))
# Forward methods.
y, logdet = self.variant(bijector.forward_and_log_det)(x)
self.assertEqual(y.shape, (2, 3, 4, 5))
self.assertEqual(logdet.shape, (2, 3, 4, 5))
# Inverse methods.
x, logdet = self.variant(bijector.inverse_and_log_det)(y)
self.assertEqual(x.shape, (2, 3, 4, 5))
self.assertEqual(logdet.shape, (2, 3, 4, 5))
@chex.all_variants
def test_broadcasting_is_correct(self):
z = 0.5 * jnp.ones((2, 2))
# Broadcast along first axis.
bijector = _make_bijector(params_shape=(2,))
y, logdet_fwd = self.variant(bijector.forward_and_log_det)(z)
x, logdet_inv = self.variant(bijector.inverse_and_log_det)(z)
np.testing.assert_array_equal(y[0], y[1])
np.testing.assert_array_equal(x[0], x[1])
np.testing.assert_array_equal(logdet_fwd[0], logdet_fwd[1])
np.testing.assert_array_equal(logdet_inv[0], logdet_inv[1])
self.assertFalse(jnp.allclose(y[:, 0], y[:, 1]))
self.assertFalse(jnp.allclose(x[:, 0], x[:, 1]))
self.assertFalse(jnp.allclose(logdet_fwd[:, 0], logdet_fwd[:, 1]))
self.assertFalse(jnp.allclose(logdet_inv[:, 0], logdet_inv[:, 1]))
# Broadcast along second axis.
bijector = _make_bijector(params_shape=(2, 1))
y, logdet_fwd = self.variant(bijector.forward_and_log_det)(z)
x, logdet_inv = self.variant(bijector.inverse_and_log_det)(z)
np.testing.assert_array_equal(y[:, 0], y[:, 1])
np.testing.assert_array_equal(x[:, 0], x[:, 1])
np.testing.assert_array_equal(logdet_fwd[:, 0], logdet_fwd[:, 1])
np.testing.assert_array_equal(logdet_inv[:, 0], logdet_inv[:, 1])
self.assertFalse(jnp.allclose(y[0], y[1]))
self.assertFalse(jnp.allclose(x[0], x[1]))
self.assertFalse(jnp.allclose(logdet_fwd[0], logdet_fwd[1]))
self.assertFalse(jnp.allclose(logdet_inv[0], logdet_inv[1]))
@chex.all_variants
def test_is_identity_for_zero_params(self):
key = jax.random.PRNGKey(42)
x = jax.random.normal(key, (2, 3, 4, 5))
bijector = _make_bijector(params_shape=(4, 5), zero_params=True)
# Forward methods.
y, logdet = self.variant(bijector.forward_and_log_det)(x)
np.testing.assert_allclose(y, x, atol=5e-5)
np.testing.assert_allclose(logdet, jnp.zeros((2, 3, 4, 5)), atol=5e-5)
# Inverse methods.
x, logdet = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_allclose(y, x, atol=5e-5)
np.testing.assert_allclose(logdet, jnp.zeros((2, 3, 4, 5)), atol=5e-5)
@chex.all_variants
def test_inverse_methods_are_correct(self):
key = jax.random.PRNGKey(42)
x = jax.random.normal(key, (2, 3, 4, 5))
bijector = _make_bijector(params_shape=(4, 5))
y, logdet_fwd = self.variant(bijector.forward_and_log_det)(x)
x_rec, logdet_inv = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_allclose(x_rec, x, atol=7e-4)
np.testing.assert_allclose(logdet_fwd, -logdet_inv, atol=7e-4)
@chex.all_variants
def test_is_monotonically_increasing(self):
z = jnp.linspace(start=-2, stop=2, num=100)
bijector = _make_bijector(params_shape=())
y = self.variant(bijector.forward)(z)
x = self.variant(bijector.inverse)(z)
np.testing.assert_array_less(y[:-1], y[1:])
np.testing.assert_array_less(x[:-1], x[1:])
@chex.all_variants
def test_composite_methods_are_consistent(self):
key = jax.random.PRNGKey(42)
bijector = _make_bijector(params_shape=(4, 5))
# Forward methods.
x = jax.random.normal(key, (2, 3, 4, 5))
y1 = self.variant(bijector.forward)(x)
logdet1 = self.variant(bijector.forward_log_det_jacobian)(x)
y2, logdet2 = self.variant(bijector.forward_and_log_det)(x)
np.testing.assert_allclose(y1, y2, atol=7e-8)
np.testing.assert_allclose(logdet1, logdet2, atol=7e-8)
# Inverse methods.
y = jax.random.normal(key, (2, 3, 4, 5))
x1 = self.variant(bijector.inverse)(y)
logdet1 = self.variant(bijector.inverse_log_det_jacobian)(y)
x2, logdet2 = self.variant(bijector.inverse_and_log_det)(y)
np.testing.assert_allclose(x1, x2, atol=7e-8)
np.testing.assert_allclose(logdet1, logdet2, atol=7e-8)
@chex.all_variants
def test_boundary_conditions(self):
a = jnp.array(0.)
b = jnp.array(1.)
# Unconstrained boundary slopes.
bijector = _make_bijector(
params_shape=(),
range_min=float(a),
range_max=float(b),
boundary_slopes='unconstrained')
log_slope_a = self.variant(bijector.forward_log_det_jacobian)(a)
log_slope_b = self.variant(bijector.forward_log_det_jacobian)(b)
self.assertEqual(self.variant(bijector.forward)(a), a)
self.assertEqual(self.variant(bijector.forward)(b), b)
self.assertFalse(jnp.allclose(log_slope_a, 0.))
self.assertFalse(jnp.allclose(log_slope_b, 0.))
# Lower boundary slope equal to 1.
bijector = _make_bijector(
params_shape=(),
range_min=float(a),
range_max=float(b),
boundary_slopes='lower_identity')
log_slope_a = self.variant(bijector.forward_log_det_jacobian)(a)
log_slope_b = self.variant(bijector.forward_log_det_jacobian)(b)
self.assertEqual(self.variant(bijector.forward)(a), a)
self.assertEqual(self.variant(bijector.forward)(b), b)
self.assertEqual(log_slope_a, 0.)
self.assertFalse(jnp.allclose(log_slope_b, 0.))
# Upper boundary slope equal to 1.
bijector = _make_bijector(
params_shape=(),
range_min=float(a),
range_max=float(b),
boundary_slopes='upper_identity')
log_slope_a = self.variant(bijector.forward_log_det_jacobian)(a)
log_slope_b = self.variant(bijector.forward_log_det_jacobian)(b)
self.assertEqual(self.variant(bijector.forward)(a), a)
self.assertEqual(self.variant(bijector.forward)(b), b)
self.assertFalse(jnp.allclose(log_slope_a, 0.))
self.assertEqual(log_slope_b, 0.)
# Both boundary slopes equal to 1.
bijector = _make_bijector(
params_shape=(),
range_min=float(a),
range_max=float(b),
boundary_slopes='identity')
log_slope_a = self.variant(bijector.forward_log_det_jacobian)(a)
log_slope_b = self.variant(bijector.forward_log_det_jacobian)(b)
self.assertEqual(self.variant(bijector.forward)(a), a)
self.assertEqual(self.variant(bijector.forward)(b), b)
self.assertEqual(log_slope_a, 0.)
self.assertEqual(log_slope_b, 0.)
# Circular spline (periodic slope).
bijector = _make_bijector(
params_shape=(),
range_min=float(a),
range_max=float(b),
boundary_slopes='circular')
log_slope_a = self.variant(bijector.forward_log_det_jacobian)(a)
log_slope_b = self.variant(bijector.forward_log_det_jacobian)(b)
self.assertEqual(self.variant(bijector.forward)(a), a)
self.assertEqual(self.variant(bijector.forward)(b), b)
self.assertEqual(log_slope_a, log_slope_b)
self.assertFalse(jnp.allclose(log_slope_b, 0.))
@chex.all_variants
@parameterized.parameters(
((3, 4), (3, 4)),
((3, 4), (3, 1)),
((3, 4), (4,)),
((3, 4), ()),
((3, 1), (3, 4)),
((4,), (3, 4)),
((), (3, 4)),
)
def test_batched_parameters(self, params_batch_shape, input_batch_shape):
k1, k2 = jax.random.split(jax.random.PRNGKey(42), 2)
num_bins = 4
param_dim = 3 * num_bins + 1
params = jax.random.normal(k1, params_batch_shape + (param_dim,))
bijector = rational_quadratic_spline.RationalQuadraticSpline(
params, range_min=0., range_max=1.)
x = jax.random.uniform(k2, input_batch_shape)
y, logdet_fwd = self.variant(bijector.forward_and_log_det)(x)
z, logdet_inv = self.variant(bijector.inverse_and_log_det)(x)
output_batch_shape = jnp.broadcast_arrays(params[..., 0], x)[0].shape
self.assertEqual(y.shape, output_batch_shape)
self.assertEqual(z.shape, output_batch_shape)
self.assertEqual(logdet_fwd.shape, output_batch_shape)
self.assertEqual(logdet_inv.shape, output_batch_shape)
params = jnp.broadcast_to(
params, output_batch_shape + (param_dim,)).reshape((-1, param_dim))
x = jnp.broadcast_to(x, output_batch_shape).flatten()
y = y.flatten()
z = z.flatten()
logdet_fwd = logdet_fwd.flatten()
logdet_inv = logdet_inv.flatten()
for i in range(np.prod(output_batch_shape)):
bijector = rational_quadratic_spline.RationalQuadraticSpline(
params[i], range_min=0., range_max=1.)
this_y, this_logdet_fwd = self.variant(bijector.forward_and_log_det)(x[i])
this_z, this_logdet_inv = self.variant(bijector.inverse_and_log_det)(x[i])
np.testing.assert_allclose(this_y, y[i], atol=1e-7)
np.testing.assert_allclose(this_z, z[i], atol=1e-6)
np.testing.assert_allclose(this_logdet_fwd, logdet_fwd[i], atol=1e-5)
np.testing.assert_allclose(this_logdet_inv, logdet_inv[i], atol=1e-5)
@chex.all_variants
@parameterized.parameters(
(-1., 4., -3., 1.,), # when b >= 0
(1., -4., 3., 3.), # when b < 0
(-1., 2., -1., 1.), # when b**2 - 4*a*c = 0, and b >= 0
(1., -2., 1., 1.), # when b**2 - 4*a*c = 0, and b < 0
)
def test_safe_quadratic_root(self, a, b, c, x):
a = jnp.array(a)
b = jnp.array(b)
c = jnp.array(c)
x = jnp.array(x)
sol_x, grad = self.variant(jax.value_and_grad(
rational_quadratic_spline._safe_quadratic_root))(a, b, c)
np.testing.assert_allclose(sol_x, x, atol=1e-5)
self.assertFalse(np.any(np.isnan(grad)))
def test_jittable(self):
@jax.jit
def f(x, b):
return b.forward(x)
bijector = _make_bijector(params_shape=(4, 5))
x = np.zeros((2, 3, 4, 5))
f(x, bijector)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/bijectors/rational_quadratic_spline_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sigmoid bijector."""
from typing import Tuple
from distrax._src.bijectors import bijector as base
import jax
import jax.numpy as jnp
Array = base.Array
class Sigmoid(base.Bijector):
"""A bijector that computes the logistic sigmoid.
The log-determinant implementation in this bijector is more numerically stable
than relying on the automatic differentiation approach used by Lambda, so this
bijector should be preferred over Lambda(jax.nn.sigmoid) where possible. See
`tfp.bijectors.Sigmoid` for details.
Note that the underlying implementation of `jax.nn.sigmoid` used by the
`forward` function of this bijector does not support inputs of integer type.
To invoke the forward function of this bijector on an argument of integer
type, it should first be cast explicitly to a floating point type.
When the absolute value of the input is large, `Sigmoid` becomes close to a
constant, so that it is not possible to recover the input `x` from the output
`y` within machine precision. In cases where it is needed to compute both the
forward mapping and the backward mapping one after the other to recover the
original input `x`, it is the user's responsibility to simplify the operation
to avoid numerical issues; this is unlike the `tfp.bijectors.Sigmoid`. One
example of such case is to use the bijector within a `Transformed`
distribution and to obtain the log-probability of samples obtained from the
distribution's `sample` method. For values of the samples for which it is not
possible to apply the inverse bijector accurately, `log_prob` returns NaN.
This can be avoided by using `sample_and_log_prob` instead of `sample`
followed by `log_prob`.
"""
def __init__(self):
"""Initializes a Sigmoid bijector."""
super().__init__(event_ndims_in=0)
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
# pylint:disable=invalid-unary-operand-type
return -_more_stable_softplus(-x) - _more_stable_softplus(x)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return _more_stable_sigmoid(x), self.forward_log_det_jacobian(x)
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
x = jnp.log(y) - jnp.log1p(-y)
return x, -self.forward_log_det_jacobian(x)
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
return type(other) is Sigmoid # pylint: disable=unidiomatic-typecheck
def _more_stable_sigmoid(x: Array) -> Array:
"""Where extremely negatively saturated, approximate sigmoid with exp(x)."""
return jnp.where(x < -9, jnp.exp(x), jax.nn.sigmoid(x))
def _more_stable_softplus(x: Array) -> Array:
"""Where extremely saturated, approximate softplus with log1p(exp(x))."""
return jnp.where(x < -9, jnp.log1p(jnp.exp(x)), jax.nn.softplus(x))
| distrax-master | distrax/_src/bijectors/sigmoid.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Masked coupling bijector."""
from typing import Any, Callable, Optional, Tuple
from distrax._src.bijectors import bijector as base
from distrax._src.utils import conversion
from distrax._src.utils import math
import jax.numpy as jnp
Array = base.Array
BijectorParams = Any
class MaskedCoupling(base.Bijector):
"""Coupling bijector that uses a mask to specify which inputs are transformed.
This coupling bijector takes in a boolean mask that indicates which inputs are
transformed. Inputs where `mask==True` remain unchanged. Inputs where
`mask==False` are transformed by an inner bijector, conditioned on the masked
inputs.
The number of event dimensions this bijector operates on is referred to as
`event_ndims`, and is equal to both `event_ndims_in` and `event_ndims_out`.
By default, `event_ndims` is equal to `mask.ndim + inner_event_ndims`. The
user can override this by passing an explicit value for `event_ndims`. If
`event_ndims > mask.ndim + inner_event_ndims`, the mask is broadcast to the
extra dimensions. If `event_ndims < mask.ndims + inner_event_ndims`, the mask
is assumed to be a batch of masks that will broadcast against the input.
Let `f` be a conditional bijector (the inner bijector), `g` be a function (the
conditioner), and `m` be a boolean mask interpreted numerically, such that
True is 1 and False is 0. The masked coupling bijector is defined as follows:
- Forward: `y = (1-m) * f(x; g(m*x)) + m*x`
- Forward Jacobian log determinant:
`log|det J(x)| = sum((1-m) * log|df/dx(x; g(m*x))|)`
- Inverse: `x = (1-m) * f^{-1}(y; g(m*y)) + m*y`
- Inverse Jacobian log determinant:
`log|det J(y)| = sum((1-m) * log|df^{-1}/dy(y; g(m*y))|)`
"""
def __init__(self,
mask: Array,
conditioner: Callable[[Array], BijectorParams],
bijector: Callable[[BijectorParams], base.BijectorLike],
event_ndims: Optional[int] = None,
inner_event_ndims: int = 0):
"""Initializes a MaskedCoupling bijector.
Args:
mask: the mask, or a batch of masks. Its elements must be boolean; a value
of True indicates that the corresponding input remains unchanged, and a
value of False indicates that the corresponding input is transformed.
The mask should have `mask.ndim` equal to the number of batch dimensions
plus `event_ndims - inner_event_ndims`. In particular, an inner event
is either fully masked or fully un-masked: it is not possible to be
partially masked.
conditioner: a function that computes the parameters of the inner bijector
as a function of the masked input. The output of the conditioner will be
passed to `bijector` in order to obtain the inner bijector.
bijector: a callable that returns the inner bijector that will be used to
transform the input. The input to `bijector` is a set of parameters that
can be used to configure the inner bijector. The `event_ndims_in` and
`event_ndims_out` of this bijector must match the `inner_event_dims`.
For example, if `inner_event_dims` is `0`, then the inner bijector must
be a scalar bijector.
event_ndims: the number of array dimensions the bijector operates on. If
None, it defaults to `mask.ndim + inner_event_ndims`. Both
`event_ndims_in` and `event_ndims_out` are equal to `event_ndims`. Note
that `event_ndims` should be at least as large as `inner_event_ndims`.
inner_event_ndims: the number of array dimensions the inner bijector
operates on. This is `0` by default, meaning the inner bijector acts on
scalars.
"""
if mask.dtype != bool:
raise ValueError(f'`mask` must have values of type `bool`; got values of'
f' type `{mask.dtype}`.')
if event_ndims is not None and event_ndims < inner_event_ndims:
raise ValueError(f'`event_ndims={event_ndims}` should be at least as'
f' large as `inner_event_ndims={inner_event_ndims}`.')
self._mask = mask
self._event_mask = jnp.reshape(mask, mask.shape + (1,) * inner_event_ndims)
self._conditioner = conditioner
self._bijector = bijector
self._inner_event_ndims = inner_event_ndims
if event_ndims is None:
self._event_ndims = mask.ndim + inner_event_ndims
else:
self._event_ndims = event_ndims
super().__init__(event_ndims_in=self._event_ndims)
@property
def bijector(self) -> Callable[[BijectorParams], base.BijectorLike]:
"""The callable that returns the inner bijector of `MaskedCoupling`."""
return self._bijector
@property
def conditioner(self) -> Callable[[Array], BijectorParams]:
"""The conditioner function."""
return self._conditioner
@property
def mask(self) -> Array:
"""The mask characterizing the `MaskedCoupling`, with boolean `dtype`."""
return self._mask
def _inner_bijector(self, params: BijectorParams) -> base.Bijector:
bijector = conversion.as_bijector(self._bijector(params))
if (bijector.event_ndims_in != self._inner_event_ndims
or bijector.event_ndims_out != self._inner_event_ndims):
raise ValueError(
'The inner bijector event ndims in and out must match the'
f' `inner_event_ndims={self._inner_event_ndims}`. Instead, got'
f' `event_ndims_in={bijector.event_ndims_in}` and'
f' `event_ndims_out={bijector.event_ndims_out}`.')
return bijector
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
self._check_forward_input_shape(x)
masked_x = jnp.where(self._event_mask, x, 0.)
params = self._conditioner(masked_x)
y0, log_d = self._inner_bijector(params).forward_and_log_det(x)
y = jnp.where(self._event_mask, x, y0)
logdet = math.sum_last(
jnp.where(self._mask, 0., log_d),
self._event_ndims - self._inner_event_ndims)
return y, logdet
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
self._check_inverse_input_shape(y)
masked_y = jnp.where(self._event_mask, y, 0.)
params = self._conditioner(masked_y)
x0, log_d = self._inner_bijector(params).inverse_and_log_det(y)
x = jnp.where(self._event_mask, y, x0)
logdet = math.sum_last(jnp.where(self._mask, 0., log_d),
self._event_ndims - self._inner_event_ndims)
return x, logdet
| distrax-master | distrax/_src/bijectors/masked_coupling.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for inverting a Distrax Bijector."""
from typing import Tuple
from distrax._src.bijectors import bijector as base
from distrax._src.utils import conversion
Array = base.Array
BijectorLike = base.BijectorLike
BijectorT = base.BijectorT
class Inverse(base.Bijector):
"""A bijector that inverts a given bijector.
That is, if `bijector` implements the transformation `f`, `Inverse(bijector)`
implements the inverse transformation `f^{-1}`.
The inversion is performed by swapping the forward with the corresponding
inverse methods of the given bijector.
"""
def __init__(self, bijector: BijectorLike):
"""Initializes an Inverse bijector.
Args:
bijector: the bijector to be inverted. It can be a distrax bijector, a TFP
bijector, or a callable to be wrapped by `Lambda`.
"""
self._bijector = conversion.as_bijector(bijector)
super().__init__(
event_ndims_in=self._bijector.event_ndims_out,
event_ndims_out=self._bijector.event_ndims_in,
is_constant_jacobian=self._bijector.is_constant_jacobian,
is_constant_log_det=self._bijector.is_constant_log_det)
@property
def bijector(self) -> BijectorT:
"""The base bijector that was the input to `Inverse`."""
return self._bijector
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
return self._bijector.inverse(x)
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
return self._bijector.forward(y)
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
return self._bijector.inverse_log_det_jacobian(x)
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
return self._bijector.forward_log_det_jacobian(y)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return self._bijector.inverse_and_log_det(x)
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
return self._bijector.forward_and_log_det(y)
@property
def name(self) -> str:
"""Name of the bijector."""
return self.__class__.__name__ + self._bijector.name
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is Inverse: # pylint: disable=unidiomatic-typecheck
return self.bijector.same_as(other.bijector)
return False
| distrax-master | distrax/_src/bijectors/inverse.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Diagonal-plus-low-rank linear bijector."""
from typing import Tuple
from distrax._src.bijectors import bijector as base
from distrax._src.bijectors import chain
from distrax._src.bijectors import diag_linear
from distrax._src.bijectors import linear
import jax
import jax.numpy as jnp
Array = base.Array
def _get_small_matrix(u_matrix: Array, v_matrix: Array) -> Array:
rank = u_matrix.shape[-1]
return jnp.eye(rank) + v_matrix.T @ u_matrix
def _get_logdet(matrix: Array) -> Array:
"""Computes the log absolute determinant of `matrix`."""
return jnp.linalg.slogdet(matrix)[1]
def _forward_unbatched(x: Array, u_matrix: Array, v_matrix: Array) -> Array:
return x + u_matrix @ (v_matrix.T @ x)
def _inverse_unbatched(
y: Array, u_matrix: Array, v_matrix: Array, small_matrix: Array) -> Array:
return y - u_matrix @ jax.scipy.linalg.solve(small_matrix, v_matrix.T @ y)
class _IdentityPlusLowRankLinear(base.Bijector):
"""Linear bijector whose weights are a low-rank perturbation of the identity.
The bijector is defined as `f(x) = Ax` where `A = I + UV^T` and `U`, `V` are
DxK matrices. When K < D, this bijector is computationally more efficient than
an equivalent `UnconstrainedAffine` bijector.
The Jacobian determinant is computed using the matrix determinant lemma:
det J(x) = det A = det(I + V^T U)
The matrix `I + V^T U` is KxK instead of DxD, so for K < D computing its
determinant is faster than computing the determinant of `A`.
The inverse is computed using the Woodbury matrix identity:
A^{-1} = I - U (I + V^T U)^{-1} V^T
As above, inverting the KxK matrix `I + V^T U` is faster than inverting `A`
when K < D.
The bijector is invertible if and only if `I + V^T U` is invertible. It is the
responsibility of the user to make sure that this is the case; the class will
make no attempt to verify that the bijector is invertible.
"""
def __init__(self, u_matrix: Array, v_matrix: Array):
"""Initializes the bijector.
Args:
u_matrix: a DxK matrix, the `U` matrix in `A = I + UV^T`. Can also be a
batch of DxK matrices.
v_matrix: a DxK matrix, the `V` matrix in `A = I + UV^T`. Can also be a
batch of DxK matrices.
"""
super().__init__(event_ndims_in=1, is_constant_jacobian=True)
self._batch_shape = jax.lax.broadcast_shapes(
u_matrix.shape[:-2], v_matrix.shape[:-2])
self._u_matrix = u_matrix
self._v_matrix = v_matrix
self._small_matrix = jnp.vectorize(
_get_small_matrix, signature="(d,k),(d,k)->(k,k)")(u_matrix, v_matrix)
self._logdet = _get_logdet(self._small_matrix)
def forward(self, x: Array) -> Array:
"""Computes y = f(x)."""
self._check_forward_input_shape(x)
batched = jnp.vectorize(
_forward_unbatched, signature="(d),(d,k),(d,k)->(d)")
return batched(x, self._u_matrix, self._v_matrix)
def forward_log_det_jacobian(self, x: Array) -> Array:
"""Computes log|det J(f)(x)|."""
self._check_forward_input_shape(x)
batch_shape = jax.lax.broadcast_shapes(self._batch_shape, x.shape[:-1])
return jnp.broadcast_to(self._logdet, batch_shape)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return self.forward(x), self.forward_log_det_jacobian(x)
def inverse(self, y: Array) -> Array:
"""Computes x = f^{-1}(y)."""
self._check_inverse_input_shape(y)
batched = jnp.vectorize(
_inverse_unbatched, signature="(d),(d,k),(d,k),(k,k)->(d)")
return batched(y, self._u_matrix, self._v_matrix, self._small_matrix)
def inverse_log_det_jacobian(self, y: Array) -> Array:
"""Computes log|det J(f^{-1})(y)|."""
return -self.forward_log_det_jacobian(y)
def inverse_and_log_det(self, y: Array) -> Tuple[Array, Array]:
"""Computes x = f^{-1}(y) and log|det J(f^{-1})(y)|."""
return self.inverse(y), self.inverse_log_det_jacobian(y)
def _check_shapes_are_valid(diag: Array,
u_matrix: Array,
v_matrix: Array) -> None:
"""Checks array shapes are valid, raises `ValueError` if not."""
for x, name, n in [(diag, "diag", 1),
(u_matrix, "u_matrix", 2),
(v_matrix, "v_matrix", 2)]:
if x.ndim < n:
raise ValueError(
f"`{name}` must have at least {n} dimensions, got {x.ndim}.")
dim = diag.shape[-1]
u_shape = u_matrix.shape[-2:]
v_shape = v_matrix.shape[-2:]
if u_shape[0] != dim:
raise ValueError(
f"The length of `diag` must equal the first dimension of `u_matrix`. "
f"Got `diag.length = {dim}` and `u_matrix.shape = {u_shape}`.")
if u_shape != v_shape:
raise ValueError(
f"`u_matrix` and `v_matrix` must have the same shape; got "
f"`u_matrix.shape = {u_shape}` and `v_matrix.shape = {v_shape}`.")
class DiagPlusLowRankLinear(linear.Linear):
"""Linear bijector whose weights are a low-rank perturbation of a diagonal.
The bijector is defined as `f(x) = Ax` where `A = S + UV^T` and:
- `S` is a DxD diagonal matrix,
- `U`, `V` are DxK matrices.
When K < D, this bijector is computationally more efficient than an equivalent
`UnconstrainedAffine` bijector.
The Jacobian determinant is computed using the matrix determinant lemma:
det J(x) = det A = det(S) det(I + V^T S^{-1} U)
The matrix `I + V^T S^{-1} U` is KxK instead of DxD, so for K < D computing
its determinant is faster than computing the determinant of `A`.
The inverse is computed using the Woodbury matrix identity:
A^{-1} = (I - S^{-1} U (I + V^T S^{-1} U)^{-1} V^T) S^{-1}
As above, inverting the KxK matrix `I + V^T S^{-1} U` is faster than inverting
`A` when K < D.
The bijector is invertible if and only if both `S` and `I + V^T S^{-1} U` are
invertible matrices. It is the responsibility of the user to make sure that
this is the case; the class will make no attempt to verify that the bijector
is invertible.
"""
def __init__(self, diag: Array, u_matrix: Array, v_matrix: Array):
"""Initializes the bijector.
Args:
diag: a vector of length D, the diagonal of matrix `S`. Can also be a
batch of such vectors.
u_matrix: a DxK matrix, the `U` matrix in `A = S + UV^T`. Can also be a
batch of DxK matrices.
v_matrix: a DxK matrix, the `V` matrix in `A = S + UV^T`. Can also be a
batch of DxK matrices.
"""
_check_shapes_are_valid(diag, u_matrix, v_matrix)
# Since `S + UV^T = S (I + WV^T)` where `W = S^{-1}U`, we can implement this
# bijector by composing `_IdentityPlusLowRankLinear` with `DiagLinear`.
id_plus_low_rank_linear = _IdentityPlusLowRankLinear(
u_matrix=u_matrix / diag[..., None],
v_matrix=v_matrix)
self._bijector = chain.Chain(
[diag_linear.DiagLinear(diag), id_plus_low_rank_linear])
batch_shape = jnp.broadcast_shapes(
diag.shape[:-1], u_matrix.shape[:-2], v_matrix.shape[:-2])
dtype = jnp.result_type(diag, u_matrix, v_matrix)
super().__init__(
event_dims=diag.shape[-1], batch_shape=batch_shape, dtype=dtype)
self._diag = diag
self._u_matrix = u_matrix
self._v_matrix = v_matrix
self.forward = self._bijector.forward
self.forward_log_det_jacobian = self._bijector.forward_log_det_jacobian
self.inverse = self._bijector.inverse
self.inverse_log_det_jacobian = self._bijector.inverse_log_det_jacobian
self.inverse_and_log_det = self._bijector.inverse_and_log_det
@property
def diag(self) -> Array:
"""Vector of length D, the diagonal of matrix `S`."""
return self._diag
@property
def u_matrix(self) -> Array:
"""The `U` matrix in `A = S + UV^T`."""
return self._u_matrix
@property
def v_matrix(self) -> Array:
"""The `V` matrix in `A = S + UV^T`."""
return self._v_matrix
@property
def matrix(self) -> Array:
"""The matrix `A = S + UV^T` of the transformation."""
batched = jnp.vectorize(
lambda s, u, v: jnp.diag(s) + u @ v.T,
signature="(d),(d,k),(d,k)->(d,d)")
return batched(self._diag, self._u_matrix, self._v_matrix)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return self._bijector.forward_and_log_det(x)
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is DiagPlusLowRankLinear: # pylint: disable=unidiomatic-typecheck
return all((
self.diag is other.diag,
self.u_matrix is other.u_matrix,
self.v_matrix is other.v_matrix,
))
return False
| distrax-master | distrax/_src/bijectors/diag_plus_low_rank_linear.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Diagonal linear bijector."""
from typing import Tuple
from distrax._src.bijectors import bijector as base
from distrax._src.bijectors import block
from distrax._src.bijectors import linear
from distrax._src.bijectors import scalar_affine
import jax.numpy as jnp
Array = base.Array
class DiagLinear(linear.Linear):
"""Linear bijector with a diagonal weight matrix.
The bijector is defined as `f(x) = Ax` where `A` is a `DxD` diagonal matrix.
Additional dimensions, if any, index batches.
The Jacobian determinant is trivially computed by taking the product of the
diagonal entries in `A`. The inverse transformation `x = f^{-1}(y)` is
computed element-wise.
The bijector is invertible if and only if the diagonal entries of `A` are all
non-zero. It is the responsibility of the user to make sure that this is the
case; the class will make no attempt to verify that the bijector is
invertible.
"""
def __init__(self, diag: Array):
"""Initializes the bijector.
Args:
diag: a vector of length D, the diagonal of matrix `A`. Can also be a
batch of such vectors.
"""
if diag.ndim < 1:
raise ValueError("`diag` must have at least one dimension.")
self._bijector = block.Block(
scalar_affine.ScalarAffine(shift=0., scale=diag), ndims=1)
super().__init__(
event_dims=diag.shape[-1],
batch_shape=diag.shape[:-1],
dtype=diag.dtype)
self._diag = diag
self.forward = self._bijector.forward
self.forward_log_det_jacobian = self._bijector.forward_log_det_jacobian
self.inverse = self._bijector.inverse
self.inverse_log_det_jacobian = self._bijector.inverse_log_det_jacobian
self.inverse_and_log_det = self._bijector.inverse_and_log_det
@property
def diag(self) -> Array:
"""Vector of length D, the diagonal of matrix `A`."""
return self._diag
@property
def matrix(self) -> Array:
"""The full matrix `A`."""
return jnp.vectorize(jnp.diag, signature="(k)->(k,k)")(self.diag)
def forward_and_log_det(self, x: Array) -> Tuple[Array, Array]:
"""Computes y = f(x) and log|det J(f)(x)|."""
return self._bijector.forward_and_log_det(x)
def same_as(self, other: base.Bijector) -> bool:
"""Returns True if this bijector is guaranteed to be the same as `other`."""
if type(other) is DiagLinear: # pylint: disable=unidiomatic-typecheck
return self.diag is other.diag
return False
| distrax-master | distrax/_src/bijectors/diag_linear.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `transformations.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.utils import transformations
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
RTOL = 1e-2
class TransformationsTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.seed = jax.random.PRNGKey(1234)
@chex.all_variants
@parameterized.named_parameters(
('tanh', jnp.tanh, jnp.arctanh, 0.5),
('arctanh', jnp.arctanh, jnp.tanh, 0.5),
('sinh', jnp.sinh, jnp.arcsinh, 0.5),
('arcsinh', jnp.arcsinh, jnp.sinh, 0.5),
('cosh', jnp.cosh, jnp.arccosh, 0.5),
('arccosh', jnp.arccosh, jnp.cosh, 2.0),
('exp', jnp.exp, jnp.log, 0.5),
('log', jnp.log, jnp.exp, 0.5),
('pow', lambda x: jnp.power(x, 3.5), lambda y: jnp.power(y, 1/3.5), 0.5),
('add', lambda x: x + 3, lambda y: y - 3, 0.5),
('sqrt', jnp.sqrt, jnp.square, 0.5),
('square', jnp.square, jnp.sqrt, 0.5),
('reciprocal', jnp.reciprocal, jnp.reciprocal, 0.5),
('negate', lambda x: -x, lambda y: -y, 0.5),
('log1p', jnp.log1p, jnp.expm1, 0.5),
('expm1', jnp.expm1, jnp.log1p, 0.5),
('erf', jax.lax.erf, jax.lax.erf_inv, 0.5),
('erf_inv', jax.lax.erf_inv, jax.lax.erf, 0.5),
('2_mul_x', lambda x: 2 * x, lambda y: y * 0.5, 0.5),
('x_mul_2', lambda x: x * 2, lambda y: y * 0.5, 0.5),
('2_div_x', lambda x: 2 / x, lambda y: 2 / y, 0.5),
('x_div_2', lambda x: x / 2, lambda y: y / 0.5, 0.5),
('x_sub_3', lambda x: x - 3, lambda y: y + 3, 0.5),
('3_sub_x', lambda x: 3 - x, lambda y: 3 - y, 0.5),
('x**3.5', lambda x: x**3.5, lambda y: y**(1/3.5), 0.5),
('x**(1/3.5)', lambda x: x**(1/3.5), lambda y: y**3.5, 0.5),
)
def test_inversion(self, forward, inverse, x):
x = jnp.array([x], dtype=jnp.float32)
y = forward(x)
inverse_ = self.variant(transformations.inv(forward))
x_ = inverse_(y)
np.testing.assert_allclose(x_, x, rtol=RTOL)
np.testing.assert_allclose(x_, inverse(y), rtol=RTOL)
@chex.all_variants
@parameterized.named_parameters(
('tanh', jnp.tanh, jnp.arctanh, 0.5),
('arctanh', jnp.arctanh, jnp.tanh, 0.5),
('sinh', jnp.sinh, jnp.arcsinh, 0.5),
('arcsinh', jnp.arcsinh, jnp.sinh, 0.5),
('cosh', jnp.cosh, jnp.arccosh, 0.5),
('arccosh', jnp.arccosh, jnp.cosh, 2.0),
('exp', jnp.exp, jnp.log, 0.5),
('log', jnp.log, jnp.exp, 0.5),
('pow', lambda x: jnp.power(x, 3.5), lambda y: jnp.power(y, 1/3.5), 0.5),
('add', lambda x: x + 3, lambda y: y - 3, 0.5),
('sqrt', jnp.sqrt, jnp.square, 0.5),
('square', jnp.square, jnp.sqrt, 0.5),
('reciprocal', jnp.reciprocal, jnp.reciprocal, 0.5),
('negate', lambda x: -x, lambda y: -y, 0.5),
('log1p', jnp.log1p, jnp.expm1, 0.5),
('expm1', jnp.expm1, jnp.log1p, 0.5),
('erf', jax.lax.erf, jax.lax.erf_inv, 0.5),
('erf_inv', jax.lax.erf_inv, jax.lax.erf, 0.5),
('2_mul_x', lambda x: 2 * x, lambda y: y * 0.5, 0.5),
('x_mul_2', lambda x: x * 2, lambda y: y * 0.5, 0.5),
('2_div_x', lambda x: 2 / x, lambda y: 2 / y, 0.5),
('x_div_2', lambda x: x / 2, lambda y: y / 0.5, 0.5),
('x_sub_3', lambda x: x - 3, lambda y: y + 3, 0.5),
('3_sub_x', lambda x: 3 - x, lambda y: 3 - y, 0.5),
('x**3.5', lambda x: x**3.5, lambda y: y**(1/3.5), 0.5),
('x**(1/3.5)', lambda x: x**(1/3.5), lambda y: y**3.5, 0.5),
)
def test_inverting_jitted_function(self, forward, inverse, x):
x = jnp.array([x], dtype=jnp.float32)
y = forward(x)
jitted_forward = jax.jit(forward)
inverse_ = self.variant(transformations.inv(jitted_forward))
x_ = inverse_(y)
np.testing.assert_allclose(x_, x, rtol=RTOL)
np.testing.assert_allclose(x_, inverse(y), rtol=RTOL)
@chex.all_variants
@parameterized.named_parameters(
('identity, 0d', lambda x: x, tfb.Identity, 0.5),
('identity, 1d', lambda x: x, tfb.Identity, [0.9]),
('identity, 2d', lambda x: x, tfb.Identity, [0.25, 0.75]),
('identity, 2x2d', lambda x: x, tfb.Identity, [[0.25, 0.75],
[0.1, 0.9]]),
('scale, 0d', lambda x: 3.0 * x, lambda: tfb.Scale(3.0), 0.5),
('scale, 1d', lambda x: 3.0 * x, lambda: tfb.Scale(3.0), [0.9]),
('scale, 2d', lambda x: 3.0 * x, lambda: tfb.Scale(3.0), [0.25, 0.75]),
('scale, 2x2d', lambda x: 3.0 * x, lambda: tfb.Scale(3.0), [[0.25, 0.75],
[0.1, 0.9]]),
('tanh, 0d', jnp.tanh, tfb.Tanh, 0.5),
('tanh, 1d', jnp.tanh, tfb.Tanh, [0.9]),
('tanh, 2d', jnp.tanh, tfb.Tanh, [0.25, 0.75]),
('tanh, 2x2d', jnp.tanh, tfb.Tanh, [[0.25, 0.75],
[0.1, 0.9]]),
('softplus, 0d', jax.nn.softplus, tfb.Softplus, 0.5),
('softplus, 1d', jax.nn.softplus, tfb.Softplus, [0.9]),
('softplus, 2d', jax.nn.softplus, tfb.Softplus, [0.25, 0.75]),
('softplus, 2x2d', jax.nn.softplus, tfb.Softplus, [[0.25, 0.75],
[0.1, 0.9]]),
('sigmoid, 0d', jax.nn.sigmoid, tfb.Sigmoid, 0.5),
('sigmoid, 1d', jax.nn.sigmoid, tfb.Sigmoid, [0.9]),
('sigmoid, 2d', jax.nn.sigmoid, tfb.Sigmoid, [0.25, 0.75]),
('sigmoid, 2x2d', jax.nn.sigmoid, tfb.Sigmoid, [[0.25, 0.75],
[0.1, 0.9]]),
)
def test_log_det_scalar(self, forward, tfb_bijector, x):
x = np.array(x, dtype=np.float32)
log_det_fn = self.variant(transformations.log_det_scalar(forward))
actual = log_det_fn(x)
expected = tfb_bijector().forward_log_det_jacobian(x, event_ndims=0)
np.testing.assert_allclose(actual, expected, rtol=RTOL)
@parameterized.named_parameters(
('tanh', jnp.tanh, False),
('sigmoid', jax.nn.sigmoid, False),
('identity', lambda x: x, True),
('square', lambda x: x**2, False),
('softplus', jax.nn.softplus, False),
('exp', jnp.exp, False),
('log', jnp.log, False, 1.0),
('shift', lambda x: x + 3.0, True),
('scale', lambda x: 2.0 * x, True),
('shift and scale', lambda x: 2.0 * x + 3.0, True),
)
def test_is_constant_jacobian(self, fn, is_constant, x=0.0):
is_constant_ = transformations.is_constant_jacobian(fn, x)
np.testing.assert_array_equal(is_constant, is_constant_)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/utils/transformations_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `importance_sampling.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import categorical
from distrax._src.utils import importance_sampling
import jax
import jax.numpy as jnp
import numpy as np
class ImportanceSamplingTest(parameterized.TestCase):
@chex.all_variants(with_pmap=False)
def test_importance_sampling_ratios_on_policy(self):
key = jax.random.PRNGKey(42)
probs = jnp.array([0.4, 0.2, 0.1, 0.3])
dist = categorical.Categorical(probs=probs)
event = dist.sample(seed=key, sample_shape=())
ratios_fn = self.variant(
importance_sampling.importance_sampling_ratios)
rhos = ratios_fn(target_dist=dist, sampling_dist=dist, event=event)
expected_rhos = jnp.ones_like(rhos)
np.testing.assert_array_almost_equal(rhos, expected_rhos)
@chex.all_variants(with_pmap=False)
def test_importance_sampling_ratios_off_policy(self):
"""Tests for a full batch."""
pi_logits = np.array([[0.2, 0.8], [0.6, 0.4]], dtype=np.float32)
pi = categorical.Categorical(logits=pi_logits)
mu_logits = np.array([[0.8, 0.2], [0.6, 0.4]], dtype=np.float32)
mu = categorical.Categorical(logits=mu_logits)
events = np.array([1, 0], dtype=np.int32)
ratios_fn = self.variant(
importance_sampling.importance_sampling_ratios)
rhos = ratios_fn(pi, mu, events)
expected_rhos = np.array(
[pi.probs[0][1] / mu.probs[0][1], pi.probs[1][0] / mu.probs[1][0]],
dtype=np.float32)
np.testing.assert_allclose(expected_rhos, rhos, atol=1e-4)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/utils/importance_sampling_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hmm.py."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import categorical
from distrax._src.distributions import mvn_diag
from distrax._src.distributions import normal
from distrax._src.utils import hmm
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
def _make_models(init_logits, trans_logits, obs_dist_name, obs_params, length):
"""Build distrax HMM and equivalent TFP HMM."""
obs_dist = {
"categorical": categorical.Categorical,
"normal": normal.Normal,
"mvn_diag": mvn_diag.MultivariateNormalDiag,
}[obs_dist_name](*obs_params)
dx_model = hmm.HMM(
init_dist=categorical.Categorical(init_logits),
trans_dist=categorical.Categorical(trans_logits),
obs_dist=obs_dist,
)
tfp_obs_dist = {
"categorical": tfd.Categorical,
"normal": tfd.Normal,
"mvn_diag": tfd.MultivariateNormalDiag,
}[obs_dist_name](*obs_params)
tfp_model = tfd.HiddenMarkovModel(
initial_distribution=tfd.Categorical(init_logits),
transition_distribution=tfd.Categorical(trans_logits),
observation_distribution=tfp_obs_dist,
num_steps=length,
)
return dx_model, tfp_model
class Function:
"""Overrides lambda __repr__ to "fn" to stabilize test naming across cores."""
def __init__(self, fn):
self._fn = fn
def __call__(self, *args, **kwargs):
return self._fn(*args, **kwargs)
def __repr__(self):
return "fn"
def _test_cases(test_fn):
return parameterized.product(
length=(1, 17),
num_states=(2, 23),
obs_dist_name_and_params_fn=(
("categorical", Function(lambda n: ( # pylint: disable=g-long-lambda
jax.random.normal(jax.random.PRNGKey(0), (n, 7)),))),
("normal", Function(lambda n: ( # pylint: disable=g-long-lambda
jax.random.normal(jax.random.PRNGKey(0), (n,)),
jax.random.normal(jax.random.PRNGKey(1), (n,))**2))),
("mvn_diag", Function(lambda n: ( # pylint: disable=g-long-lambda
jax.random.normal(jax.random.PRNGKey(0), (n, 7)),
jax.random.normal(jax.random.PRNGKey(1), (n, 7))**2))),
),
)(test_fn)
class HMMTest(parameterized.TestCase):
@chex.all_variants(without_device=False)
@_test_cases
def test_sample(self, length, num_states, obs_dist_name_and_params_fn):
name, params_fn = obs_dist_name_and_params_fn
logits = jax.random.normal(jax.random.PRNGKey(0), (num_states,))
matrix = jax.random.normal(jax.random.PRNGKey(1), (num_states, num_states))
model, tfp_model = _make_models(
init_logits=logits, trans_logits=matrix, obs_dist_name=name,
obs_params=params_fn(num_states), length=length)
states, obs = self.variant(functools.partial(model.sample, seq_len=length))(
seed=jax.random.PRNGKey(0))
tfp_obs = tfp_model.sample(seed=jax.random.PRNGKey(0))
with self.subTest("states"):
chex.assert_type(states, jnp.int32)
chex.assert_shape(states, (length,))
with self.subTest("observations"):
chex.assert_type(obs, model.obs_dist.dtype)
chex.assert_shape(obs, (length, *model.obs_dist.event_shape))
with self.subTest("matches TFP"):
chex.assert_equal_shape([obs, tfp_obs])
@chex.all_variants(without_device=False)
@_test_cases
def test_forward_backward(
self, length, num_states, obs_dist_name_and_params_fn):
name, params_fn = obs_dist_name_and_params_fn
logits = jax.random.normal(jax.random.PRNGKey(0), (num_states,))
matrix = jax.random.normal(jax.random.PRNGKey(1), (num_states, num_states))
model, tfp_model = _make_models(
init_logits=logits, trans_logits=matrix, obs_dist_name=name,
obs_params=params_fn(num_states), length=length)
_, observations = model.sample(seed=jax.random.PRNGKey(42), seq_len=length)
alphas, betas, marginals, log_prob = self.variant(model.forward_backward)(
observations)
tfp_marginal_logits = tfp_model.posterior_marginals(observations).logits
tfp_marginals = jax.nn.softmax(tfp_marginal_logits)
with self.subTest("alphas"):
chex.assert_type(alphas, jnp.float32)
chex.assert_shape(alphas, (length, num_states))
with self.subTest("betas"):
chex.assert_type(betas, jnp.float32)
chex.assert_shape(betas, (length, num_states))
with self.subTest("marginals"):
chex.assert_type(marginals, jnp.float32)
chex.assert_shape(marginals, (length, num_states))
with self.subTest("log_prob"):
chex.assert_type(log_prob, jnp.float32)
chex.assert_shape(log_prob, ())
with self.subTest("matches TFP"):
np.testing.assert_array_almost_equal(marginals, tfp_marginals, decimal=4)
@chex.all_variants(without_device=False)
@_test_cases
def test_viterbi(self, length, num_states, obs_dist_name_and_params_fn):
name, params_fn = obs_dist_name_and_params_fn
logits = jax.random.normal(jax.random.PRNGKey(0), (num_states,))
matrix = jax.random.normal(jax.random.PRNGKey(1), (num_states, num_states))
model, tfp_model = _make_models(
init_logits=logits, trans_logits=matrix, obs_dist_name=name,
obs_params=params_fn(num_states), length=length)
_, observations = model.sample(seed=jax.random.PRNGKey(42), seq_len=length)
most_likely_states = self.variant(model.viterbi)(observations)
tfp_mode = tfp_model.posterior_mode(observations)
with self.subTest("shape"):
chex.assert_shape(most_likely_states, (length,))
with self.subTest("matches TFP"):
np.testing.assert_array_equal(most_likely_states, tfp_mode)
@chex.all_variants(without_device=False)
def test_viterbi_matches_specific_example(self):
loc = jnp.array([0.0, 1.0, 2.0, 3.0])
scale = jnp.array(0.25)
initial = jnp.array([0.25, 0.25, 0.25, 0.25])
trans = jnp.array([[0.9, 0.1, 0.0, 0.0],
[0.1, 0.8, 0.1, 0.0],
[0.0, 0.1, 0.8, 0.1],
[0.0, 0.0, 0.1, 0.9]])
observations = jnp.array([0.1, 0.2, 0.3, 0.4, 0.5, 3.0, 2.9, 2.8, 2.7, 2.6])
model = hmm.HMM(
init_dist=categorical.Categorical(probs=initial),
trans_dist=categorical.Categorical(probs=trans),
obs_dist=normal.Normal(loc, scale))
inferred_states = self.variant(model.viterbi)(observations)
expected_states = [0, 0, 0, 0, 1, 2, 3, 3, 3, 3]
np.testing.assert_array_equal(inferred_states, expected_states)
if __name__ == "__main__":
absltest.main()
| distrax-master | distrax/_src/utils/hmm_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for performing Bijector-related JAX transformations.
A typical bijector will implement the following functionality:
- `forward`: the transformation of samples from an underlying distribution.
- `inverse`: the inverse function of `forward`.
- `[forward|inverse]_log_det_jacobian`: computes the log of the absolute value
of the determinant of the Jacobian of the bijector's forward or inverse
functions at a particular point.
- `is_constant_jacobian`: a boolean indicating whether the Jacobian of the
forward and inverse functions is constant with respect to the function's
input.
This module provides the following utilities for deriving these functions
automatically from JAX code implementing either the `forward` or `inverse`
functions:
- `inv`: inverts an input function `f` as long as it is composed of invertible
primitives. This is achieved by tracing `f` to its underlying JAXPR
representation and interpreting the code in reverse, substituting
each invertible primitive by its inverse. Once transformed by `jit`, the
code produced by the inversion interpreter is unrolled and JAX sees only the
resulting inverse operations.
The module provides the `register_inverse` function to specify custom
inversion rules for primitives not yet supported here.
See official JAX documentation for more information on tracing,
writing custom interpreters, and a simple example of function inversion.
- `log_det_scalar`: computes the log-determinant of the Jacobian of a scalar
function, using JAX autograd machinery.
- `is_constant_jacobian`: attempts to determine whether the Jacobian of `f` is
constant with respect to its input by tracing `f` to its underlying JAXPR
representation and checking whether any transformations of the input
appear in the output. This is an experimental feature.
"""
import functools
from absl import logging
import jax
import jax.numpy as jnp
_inverse_registry = {
# unary ops
jax.lax.tanh_p: jax.lax.atanh_p,
jax.lax.atanh_p: jax.lax.tanh_p,
jax.lax.sinh_p: jax.lax.asinh_p,
jax.lax.asinh_p: jax.lax.sinh_p,
jax.lax.cosh_p: jax.lax.acosh_p,
jax.lax.acosh_p: jax.lax.cosh_p,
jax.lax.exp_p: jax.lax.log_p,
jax.lax.log_p: jax.lax.exp_p,
jax.lax.sqrt_p: lambda x: jax.lax.pow_p.bind(x, 2.0),
jax.lax.rsqrt_p: lambda x: 1.0 / jax.lax.pow_p.bind(x, 2.0),
jax.lax.neg_p: jax.lax.neg_p,
jax.lax.log1p_p: jax.lax.expm1_p,
jax.lax.expm1_p: jax.lax.log1p_p,
jax.lax.erf_p: jax.lax.erf_inv_p,
jax.lax.erf_inv_p: jax.lax.erf_p,
jax.lax.conj_p: jax.lax.conj_p,
# binary ops; tuple values represent the variable-left/variable-right side
# case for non-commutatively invertible ops like div
jax.lax.mul_p: (jax.lax.div_p.bind, lambda x, y: jax.lax.div_p.bind(y, x)),
jax.lax.div_p: (jax.lax.mul_p.bind, jax.lax.div_p.bind),
jax.lax.add_p: (jax.lax.sub_p.bind, lambda x, y: jax.lax.sub_p.bind(y, x)),
jax.lax.sub_p: (jax.lax.add_p.bind, jax.lax.sub_p.bind),
jax.lax.pow_p: lambda x, y: jax.lax.pow_p.bind(x, 1.0/y),
jax.lax.integer_pow_p: lambda x, y: jax.lax.pow_p.bind(x, 1.0/y)
}
_potentially_unstable_primitives = {
jax.lax.tanh_p: "distrax.Tanh or distrax.Inverse(distrax.Tanh)",
jax.lax.atanh_p: "distrax.Tanh or distrax.Inverse(distrax.Tanh)",
}
def register_inverse(primitive, inverse_left, inverse_right=None):
"""Register a function that implements the inverse of a JAX primitive.
Args:
primitive: JAX primitive, often named `*_p` and located in `jax.lax.lax.py`.
inverse_left: a function implementing the inverse if the primitive is
a unary operator or if `inv(f(x,y)) == inv(f(y,x))`, else a function
implementing the inverse of a binary operator when the variable in
question comes before the operator, e.g. `x div_p 2`.
inverse_right: a function implementing the inverse of a binary
operator when the variable in question comes after the operator,
e.g. `2 div_p x`.
"""
if inverse_right is None:
_inverse_registry[primitive] = inverse_left
else:
_inverse_registry[primitive] = (inverse_left, inverse_right)
def inv(fun):
"""Returns the inverse of `fun` such that (inv(fun) o fun)(x) = x."""
jaxpr_fn = _invertible_jaxpr_and_constants(fun)
@functools.wraps(fun) # pylint: disable=no-value-for-parameter
def wrapped(*args, **kwargs):
jaxpr, consts = jaxpr_fn(*args, **kwargs)
out = _interpret_inverse(jaxpr, consts, *args)
return out[0]
return wrapped
def is_constant_jacobian(fn, x=0.0):
"""Experimental. Attempts to determine whether `fn` has a constant Jacobian.
This function attempts to determine whether the Jacobian of `fn` is constant
w.r.t. its input. We compute the Jacobian of `fn` at `x` and inspect the
jaxpr to determine whether any function of the input appears at the output.
Args:
fn: a JAX-traceable differentiable function taking scalar input.
x: the location at which to check whether the Jacobian is constant.
Returns:
Boolean value indicating whether the Jacobian is constant at `x`.
"""
jac_fn = jax.jacfwd(fn)
jac_jaxpr = jax.make_jaxpr(jac_fn)(jnp.array(x)).jaxpr
dependent_vars = _dependent_variables(jac_jaxpr)
jac_is_constant = not any(isinstance(v, jax.core.Var) and v in dependent_vars
for v in jac_jaxpr.outvars)
return jac_is_constant
def log_det_scalar(fn):
"""Uses JAX autograd to derive the log-det-jacobian of a scalar function."""
_check_numerical_stability(fn)
jac_fn = jax.vmap(jax.jacfwd(fn))
def log_det_fn(x):
x = jnp.asarray(x)
jac_scalar = jac_fn(x.reshape(-1))
log_det_ = jnp.log(jnp.absolute(jac_scalar))
return log_det_.reshape(x.shape)
return log_det_fn
def _check_numerical_stability(fn):
"""Logs a warning if numerically unstable operations are requested."""
jaxpr = jax.make_jaxpr(fn)(0.0).jaxpr
for eqn in jaxpr.eqns:
if eqn.primitive in _potentially_unstable_primitives:
logging.warn("[Distrax]: the '%s' primitive can exhibit unstable "
"numerical behavior under certain circumstances. Consider "
"using the %s bijector instead if possible.", eqn.primitive,
_potentially_unstable_primitives[eqn.primitive])
def _dependent_variables(jaxpr, dependent=None):
"""Returns the set of variables in the jaxpr that depend on the input vars."""
if dependent is None:
dependent = set(jaxpr.invars)
for eqn in jaxpr.eqns:
# If primitive is an xla_call, get subexpressions and evaluate recursively
call_jaxpr, _ = _extract_call_jaxpr(eqn.primitive, eqn.params)
if call_jaxpr:
to_name = dict(zip(eqn.invars, call_jaxpr.invars))
arg_dependence = set(to_name[v] for v in eqn.invars if v in dependent)
subjaxpr_dependent = _dependent_variables(call_jaxpr, arg_dependence)
from_name = dict(zip(call_jaxpr.outvars, eqn.outvars))
dependent.update(from_name[v] for v in call_jaxpr.outvars
if v in subjaxpr_dependent)
else:
for v in eqn.invars:
if isinstance(v, jax.core.Var) and v in dependent:
dependent.update(eqn.outvars)
return dependent
def _invertible_jaxpr_and_constants(fun):
"""Returns a transformation from function invocation to invertible jaxpr."""
jaxpr_maker = jax.make_jaxpr(fun)
@functools.wraps(fun) # pylint: disable=no-value-for-parameter
def jaxpr_const_maker(*args, **kwargs):
typed_jaxpr = jaxpr_maker(*args, **kwargs)
return typed_jaxpr.jaxpr, typed_jaxpr.literals
return jaxpr_const_maker
def _identify_variable_in_eqn(eqn):
"""Identify whether primitive is a unop or binop and which side var is on."""
if len(eqn.invars) == 1: # unary operation
var_idx = 0
elif len(eqn.invars) == 2: # binary operation
if tuple(map(type, eqn.invars)) == (jax.core.Var, jax.core.Literal):
var_idx = 0
elif tuple(map(type, eqn.invars)) == (jax.core.Literal, jax.core.Var):
var_idx = 1
elif tuple(map(type, eqn.invars)) == (jax.core.Var, jax.core.Var):
raise NotImplementedError(
"Expressions with multiple occurrences of the input variable are "
"not supported. Please rearrange such that the variable appears only "
"once in the expression if possible. If not possible, consider "
"providing both `forward` and `inverse` to Lambda explicitly.")
elif tuple(map(type, eqn.invars)) == (jax.core.Literal, jax.core.Literal):
raise ValueError("Expression appears to contain no variables and "
"therefore cannot be inverted.")
else:
raise NotImplementedError("Unsupported binary op combination: "
+ str(tuple(map(type, eqn.invars))))
else:
raise NotImplementedError(f"Op {eqn.primitive} with cardinality >= 3 not "
"supported.")
return var_idx
def _interpret_inverse(jaxpr, consts, *args):
"""Interprets and executes the inverse of `jaxpr`."""
env = {}
def read(var):
return var.val if isinstance(var, jax.core.Literal) else env[var]
def write(var, val):
env[var] = val
jax.api_util.safe_map(write, jaxpr.outvars, args)
jax.api_util.safe_map(write, jaxpr.constvars, consts)
for eqn in reversed(jaxpr.eqns):
params = eqn.params.copy()
# identify the cardinality of the op and the index of the variable in eqn
var_idx = _identify_variable_in_eqn(eqn)
# if primitive is an xla_call, get subexpressions and evaluate recursively
call_jaxpr, params = _extract_call_jaxpr(eqn.primitive, params)
if call_jaxpr:
subfuns = [jax.linear_util.wrap_init(
functools.partial(_interpret_inverse, call_jaxpr, ()))]
prim_inv = eqn.primitive
elif eqn.primitive is jax.experimental.pjit.pjit_p:
pjit_jaxpr = params.pop("jaxpr")
partial_inverse = functools.partial(_interpret_inverse, pjit_jaxpr.jaxpr,
pjit_jaxpr.consts)
inverse_jaxpr = jax.make_jaxpr(partial_inverse)(*args)
params["jaxpr"] = inverse_jaxpr
prim_inv = eqn.primitive
subfuns = []
else: # otherwise, get its inverse if it exists
if eqn.primitive not in _inverse_registry:
raise NotImplementedError(
f"Primitive '{eqn.primitive}' does not have a registered inverse.")
# use the correct inverse formulation depending on whether the variable is
# on the left or right side of the expression
prim_inv = _inverse_registry[eqn.primitive]
if isinstance(prim_inv, tuple):
prim_inv = prim_inv[var_idx]
subfuns = []
# get the values of any variables in the eqn
invals = jax.api_util.safe_map(read, eqn.outvars)
# place the args and variables in the right order
if var_idx == 0:
prim_args = subfuns + invals + [v.val for v in eqn.invars[1:]]
else:
prim_args = subfuns + [v.val for v in eqn.invars[:1]] + invals
# if the inverse is a primitive, bind it, otherwise call it directly
if hasattr(prim_inv, "bind"):
outvals = prim_inv.bind(*prim_args, **params)
else:
outvals = prim_inv(*prim_args, **params)
# if the primitive returns multiple results, write them all to env
if (hasattr(prim_inv, "multiple_results") and prim_inv.multiple_results):
jax.api_util.safe_map(write, eqn.invars, outvals)
else:
write(eqn.invars[var_idx], outvals)
if any(v not in env for v in jaxpr.invars):
raise ValueError("Expression appears to contain no variables and therefore "
"cannot be inverted.")
return jax.api_util.safe_map(read, jaxpr.invars)
def _extract_call_jaxpr(primitive, params):
if not (primitive.call_primitive or primitive.map_primitive):
return None, params
else:
params = dict(params)
return params.pop("call_jaxpr"), params
| distrax-master | distrax/_src/utils/transformations.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `monte_carlo.py`."""
from absl.testing import absltest
import chex
from distrax._src.distributions import mvn_diag
from distrax._src.distributions import normal
from distrax._src.utils import monte_carlo
import haiku as hk
import jax
import numpy as np
from tensorflow_probability.substrates import jax as tfp
class McTest(absltest.TestCase):
def test_estimate_kl_with_dice(self):
batch_size = 5
num_actions = 11
num_samples = 1024
rng_seq = hk.PRNGSequence(0)
distribution_a = tfp.distributions.Categorical(
logits=jax.random.normal(next(rng_seq), [batch_size, num_actions]))
distribution_b = tfp.distributions.Categorical(
logits=jax.random.normal(next(rng_seq), [batch_size, num_actions]))
kl_estim_exact = monte_carlo.estimate_kl_best_effort(
distribution_a, distribution_b, next(rng_seq), num_samples=num_samples)
kl_estim_mc = monte_carlo.mc_estimate_kl(
distribution_a, distribution_b, next(rng_seq), num_samples=num_samples)
kl = distribution_a.kl_divergence(distribution_b)
np.testing.assert_allclose(kl, kl_estim_exact, rtol=1e-5)
np.testing.assert_allclose(kl, kl_estim_mc, rtol=2e-1)
def test_estimate_continuous_kl_with_dice(self):
_check_kl_estimator(monte_carlo.mc_estimate_kl, tfp.distributions.Normal)
_check_kl_estimator(monte_carlo.mc_estimate_kl, normal.Normal)
def test_estimate_continuous_kl_with_reparameterized(self):
_check_kl_estimator(monte_carlo.mc_estimate_kl_with_reparameterized,
tfp.distributions.Normal)
_check_kl_estimator(monte_carlo.mc_estimate_kl_with_reparameterized,
normal.Normal)
def test_estimate_mode(self):
with self.subTest('ScalarEventShape'):
distribution = normal.Normal(
loc=np.zeros((4, 5, 100)),
scale=np.ones((4, 5, 100)))
# pytype: disable=wrong-arg-types
mode_estimate = monte_carlo.mc_estimate_mode(
distribution, rng_key=42, num_samples=100)
# pytype: enable=wrong-arg-types
mean_mode_estimate = np.abs(np.mean(mode_estimate))
self.assertLess(mean_mode_estimate, 1e-3)
with self.subTest('NonScalarEventShape'):
distribution = mvn_diag.MultivariateNormalDiag(
loc=np.zeros((4, 5, 100)),
scale_diag=np.ones((4, 5, 100)))
# pytype: disable=wrong-arg-types
mv_mode_estimate = monte_carlo.mc_estimate_mode(
distribution, rng_key=42, num_samples=100)
# pytype: enable=wrong-arg-types
mean_mv_mode_estimate = np.abs(np.mean(mv_mode_estimate))
self.assertLess(mean_mv_mode_estimate, 1e-1)
# The mean of the mode-estimate of the Normal should be a lot closer
# to 0 compared to the MultivariateNormal, because the 100 less samples
# are taken and most of the mass in a high-dimensional gaussian is NOT
# at 0!
self.assertLess(10 * mean_mode_estimate, mean_mv_mode_estimate)
def _check_kl_estimator(estimator_fn, distribution_fn, num_samples=10000,
rtol=1e-1, atol=1e-3, grad_rtol=2e-1, grad_atol=1e-1):
"""Compares the estimator_fn output and gradient to exact KL."""
rng_key = jax.random.PRNGKey(0)
def expected_kl(params):
distribution_a = distribution_fn(**params[0])
distribution_b = distribution_fn(**params[1])
return distribution_a.kl_divergence(distribution_b)
def estimate_kl(params):
distribution_a = distribution_fn(**params[0])
distribution_b = distribution_fn(**params[1])
return estimator_fn(distribution_a, distribution_b, rng_key=rng_key,
num_samples=num_samples)
params = (
dict(loc=0.0, scale=1.0),
dict(loc=0.1, scale=1.0),
)
expected_value, expected_grad = jax.value_and_grad(expected_kl)(params)
value, grad = jax.value_and_grad(estimate_kl)(params)
np.testing.assert_allclose(expected_value, value, rtol=rtol, atol=atol)
chex.assert_trees_all_close(
expected_grad, grad, rtol=grad_rtol, atol=grad_atol
)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/utils/monte_carlo_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `jittable.py`."""
from absl.testing import absltest
from absl.testing import parameterized
from distrax._src.utils import jittable
import jax
import jax.numpy as jnp
import numpy as np
class DummyJittable(jittable.Jittable):
def __init__(self, params):
self.name = 'dummy' # Non-JAX property, cannot be traced.
self.data = {'params': params} # Tree property, must be traced recursively.
class JittableTest(parameterized.TestCase):
def test_jittable(self):
@jax.jit
def get_params(obj):
return obj.data['params']
obj = DummyJittable(jnp.ones((5,)))
np.testing.assert_array_equal(get_params(obj), obj.data['params'])
def test_vmappable(self):
def do_sum(obj):
return obj.data['params'].sum()
obj = DummyJittable(jnp.array([[1, 2, 3], [4, 5, 6]]))
with self.subTest('no vmap'):
np.testing.assert_array_equal(do_sum(obj), obj.data['params'].sum())
with self.subTest('in_axes=0'):
np.testing.assert_array_equal(
jax.vmap(do_sum, in_axes=0)(obj), obj.data['params'].sum(axis=1))
with self.subTest('in_axes=1'):
np.testing.assert_array_equal(
jax.vmap(do_sum, in_axes=1)(obj), obj.data['params'].sum(axis=0))
def test_traceable(self):
@jax.jit
def inner_fn(obj):
obj.data['params'] *= 3 # Modification after passing to jitted fn.
return obj.data['params'].sum()
def loss_fn(params):
obj = DummyJittable(params)
obj.data['params'] *= 2 # Modification before passing to jitted fn.
return inner_fn(obj)
with self.subTest('numpy'):
params = np.ones((5,))
# Both modifications will be traced if data tree is correctly traversed.
grad_expected = params * 2 * 3
grad = jax.grad(loss_fn)(params)
np.testing.assert_array_equal(grad, grad_expected)
with self.subTest('jax.numpy'):
params = jnp.ones((5,))
# Both modifications will be traced if data tree is correctly traversed.
grad_expected = params * 2 * 3
grad = jax.grad(loss_fn)(params)
np.testing.assert_array_equal(grad, grad_expected)
def test_different_jittables_to_compiled_function(self):
@jax.jit
def add_one_to_params(obj):
obj.data['params'] = obj.data['params'] + 1
return obj
with self.subTest('numpy'):
add_one_to_params(DummyJittable(np.zeros((5,))))
add_one_to_params(DummyJittable(np.ones((5,))))
with self.subTest('jax.numpy'):
add_one_to_params(DummyJittable(jnp.zeros((5,))))
add_one_to_params(DummyJittable(jnp.ones((5,))))
def test_modifying_object_data_does_not_leak_tracers(self):
@jax.jit
def add_one_to_params(obj):
obj.data['params'] = obj.data['params'] + 1
return obj
dummy = DummyJittable(jnp.ones((5,)))
dummy_out = add_one_to_params(dummy)
dummy_out.data['params'] -= 1
def test_metadata_modification_statements_are_removed_by_compilation(self):
@jax.jit
def add_char_to_name(obj):
obj.name += '_x'
return obj
dummy = DummyJittable(jnp.ones((5,)))
dummy_out = add_char_to_name(dummy)
dummy_out = add_char_to_name(dummy) # `name` change has been compiled out.
dummy_out.name += 'y'
self.assertEqual(dummy_out.name, 'dummy_xy')
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/utils/jittable_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for conversion between different types."""
from typing import Optional, Union
import chex
from distrax._src.bijectors import bijector
from distrax._src.bijectors import bijector_from_tfp
from distrax._src.bijectors import lambda_bijector
from distrax._src.bijectors import sigmoid
from distrax._src.bijectors import tanh
from distrax._src.bijectors import tfp_compatible_bijector
from distrax._src.distributions import distribution
from distrax._src.distributions import distribution_from_tfp
from distrax._src.distributions import tfp_compatible_distribution
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
BijectorLike = bijector.BijectorLike
DistributionLike = distribution.DistributionLike
def to_tfp(obj: Union[bijector.Bijector, tfb.Bijector,
distribution.Distribution, tfd.Distribution],
name: Optional[str] = None):
"""Converts a distribution or bijector to a TFP-compatible equivalent object.
The returned object is not necessarily of type `tfb.Bijector` or
`tfd.Distribution`; rather, it is a Distrax object that implements TFP
functionality so that it can be used in TFP.
If the input is already of TFP type, it is returned unchanged.
Args:
obj: The distribution or bijector to be converted to TFP.
name: The name of the resulting object.
Returns:
A TFP-compatible equivalent distribution or bijector.
"""
if isinstance(obj, (tfb.Bijector, tfd.Distribution)):
return obj
elif isinstance(obj, bijector.Bijector):
return tfp_compatible_bijector.tfp_compatible_bijector(obj, name)
elif isinstance(obj, distribution.Distribution):
return tfp_compatible_distribution.tfp_compatible_distribution(obj, name)
else:
raise TypeError(
f"`to_tfp` can only convert objects of type: `distrax.Bijector`,"
f" `tfb.Bijector`, `distrax.Distribution`, `tfd.Distribution`. Got type"
f" `{type(obj)}`.")
def as_bijector(obj: BijectorLike) -> bijector.BijectorT:
"""Converts a bijector-like object to a Distrax bijector.
Bijector-like objects are: Distrax bijectors, TFP bijectors, and callables.
Distrax bijectors are returned unchanged. TFP bijectors are converted to a
Distrax equivalent. Callables are wrapped by `distrax.Lambda`, with a few
exceptions where an explicit implementation already exists and is returned.
Args:
obj: The bijector-like object to be converted.
Returns:
A Distrax bijector.
"""
if isinstance(obj, bijector.Bijector):
return obj
elif isinstance(obj, tfb.Bijector):
return bijector_from_tfp.BijectorFromTFP(obj)
elif obj is jax.nn.sigmoid:
return sigmoid.Sigmoid()
elif obj is jnp.tanh:
return tanh.Tanh()
elif callable(obj):
return lambda_bijector.Lambda(obj)
else:
raise TypeError(
f"A bijector-like object can be a `distrax.Bijector`, a `tfb.Bijector`,"
f" or a callable. Got type `{type(obj)}`.")
def as_distribution(obj: DistributionLike) -> distribution.DistributionT:
"""Converts a distribution-like object to a Distrax distribution.
Distribution-like objects are: Distrax distributions and TFP distributions.
Distrax distributions are returned unchanged. TFP distributions are converted
to a Distrax equivalent.
Args:
obj: A distribution-like object to be converted.
Returns:
A Distrax distribution.
"""
if isinstance(obj, distribution.Distribution):
return obj
elif isinstance(obj, tfd.Distribution):
return distribution_from_tfp.distribution_from_tfp(obj)
else:
raise TypeError(
f"A distribution-like object can be a `distrax.Distribution` or a"
f" `tfd.Distribution`. Got type `{type(obj)}`.")
def as_float_array(x: Numeric) -> Array:
"""Converts input to an array with floating-point dtype.
If the input is already an array with floating-point dtype, it is returned
unchanged.
Args:
x: input to convert.
Returns:
An array with floating-point dtype.
"""
if not isinstance(x, (jax.Array, np.ndarray)):
x = jnp.asarray(x)
if jnp.issubdtype(x.dtype, jnp.floating):
return x
elif jnp.issubdtype(x.dtype, jnp.integer):
return x.astype(jnp.float_)
else:
raise ValueError(
f"Expected either floating or integer dtype, got {x.dtype}.")
| distrax-master | distrax/_src/utils/conversion.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| distrax-master | distrax/_src/utils/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monte-Carlo estimation of the KL divergence."""
from typing import Optional
import chex
from distrax._src.distributions.distribution import DistributionLike
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
PRNGKey = chex.PRNGKey
def estimate_kl_best_effort(
distribution_a: DistributionLike,
distribution_b: DistributionLike,
rng_key: PRNGKey,
num_samples: int,
proposal_distribution: Optional[DistributionLike] = None):
"""Estimates KL(distribution_a, distribution_b) exactly or with DiCE.
If the kl_divergence(distribution_a, distribution_b) is not supported,
the DiCE estimator is used instead.
Args:
distribution_a: The first distribution.
distribution_b: The second distribution.
rng_key: The PRNGKey random key.
num_samples: The number of samples, if using the DiCE estimator.
proposal_distribution: A proposal distribution for the samples, if using
the DiCE estimator. If None, use `distribution_a` as proposal.
Returns:
The estimated KL divergence.
"""
distribution_a = conversion.as_distribution(distribution_a)
distribution_b = conversion.as_distribution(distribution_b)
# If possible, compute the exact KL.
try:
return tfd.kl_divergence(distribution_a, distribution_b)
except NotImplementedError:
pass
return mc_estimate_kl(distribution_a, distribution_b, rng_key,
num_samples=num_samples,
proposal_distribution=proposal_distribution)
def mc_estimate_kl(
distribution_a: DistributionLike,
distribution_b: DistributionLike,
rng_key: PRNGKey,
num_samples: int,
proposal_distribution: Optional[DistributionLike] = None):
"""Estimates KL(distribution_a, distribution_b) with the DiCE estimator.
To get correct gradients with respect the `distribution_a`, we use the DiCE
estimator, i.e., we stop the gradient with respect to the samples and with
respect to the denominator in the importance weights. We then do not need
reparametrized distributions.
Args:
distribution_a: The first distribution.
distribution_b: The second distribution.
rng_key: The PRNGKey random key.
num_samples: The number of samples, if using the DiCE estimator.
proposal_distribution: A proposal distribution for the samples, if using the
DiCE estimator. If None, use `distribution_a` as proposal.
Returns:
The estimated KL divergence.
"""
if proposal_distribution is None:
proposal_distribution = distribution_a
proposal_distribution = conversion.as_distribution(proposal_distribution)
distribution_a = conversion.as_distribution(distribution_a)
distribution_b = conversion.as_distribution(distribution_b)
samples, logp_proposal = proposal_distribution.sample_and_log_prob(
seed=rng_key, sample_shape=[num_samples])
samples = jax.lax.stop_gradient(samples)
logp_proposal = jax.lax.stop_gradient(logp_proposal)
logp_a = distribution_a.log_prob(samples)
logp_b = distribution_b.log_prob(samples)
importance_weight = jnp.exp(logp_a - logp_proposal)
log_ratio = logp_b - logp_a
kl_estimator = -importance_weight * log_ratio
return jnp.mean(kl_estimator, axis=0)
def mc_estimate_kl_with_reparameterized(
distribution_a: DistributionLike,
distribution_b: DistributionLike,
rng_key: PRNGKey,
num_samples: int):
"""Estimates KL(distribution_a, distribution_b)."""
if isinstance(distribution_a, tfd.Distribution):
if distribution_a.reparameterization_type != tfd.FULLY_REPARAMETERIZED:
raise ValueError(
f'Distribution `{distribution_a.name}` cannot be reparameterized.')
distribution_a = conversion.as_distribution(distribution_a)
distribution_b = conversion.as_distribution(distribution_b)
samples, logp_a = distribution_a.sample_and_log_prob(
seed=rng_key, sample_shape=[num_samples])
logp_b = distribution_b.log_prob(samples)
log_ratio = logp_b - logp_a
kl_estimator = -log_ratio
return jnp.mean(kl_estimator, axis=0)
def mc_estimate_mode(
distribution: DistributionLike,
rng_key: PRNGKey,
num_samples: int):
"""Returns a Monte Carlo estimate of the mode of a distribution."""
distribution = conversion.as_distribution(distribution)
# Obtain samples from the distribution and their log probability.
samples, log_probs = distribution.sample_and_log_prob(
seed=rng_key, sample_shape=[num_samples])
# Do argmax over the sample_shape.
index = jnp.expand_dims(jnp.argmax(log_probs, axis=0), axis=0)
# Broadcast index to include event_shape of the sample.
index = index.reshape(index.shape + (1,) * (samples.ndim - index.ndim))
mode = jnp.squeeze(jnp.take_along_axis(samples, index, axis=0), axis=0)
return mode
| distrax-master | distrax/_src/utils/monte_carlo.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `conversion.py`."""
import sys
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from distrax._src.bijectors.bijector import Bijector
from distrax._src.bijectors.rational_quadratic_spline import RationalQuadraticSpline
from distrax._src.bijectors.tanh import Tanh
from distrax._src.distributions.categorical import Categorical
from distrax._src.distributions.distribution import Distribution
from distrax._src.distributions.normal import Normal
from distrax._src.distributions.transformed import Transformed
from distrax._src.utils import conversion
import jax
from jax.config import config as jax_config
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
FLAGS = flags.FLAGS
flags.DEFINE_bool('test_jax_enable_x64', False,
'Whether to enable double precision for tests.')
def setUpModule():
if not FLAGS.is_parsed():
FLAGS(sys.argv, known_only=True)
if FLAGS['test_jax_enable_x64'].value:
jax_config.update('jax_enable_x64', True)
class AsBijectorTest(parameterized.TestCase):
def test_num_bins_attr_of_rational_quadratic_spline(self):
num_bins = 4
bijector = RationalQuadraticSpline(
jnp.zeros((3 * num_bins + 1,)),
range_min=0.,
range_max=1.)
wrapped_bijector = conversion.as_bijector(bijector)
assert isinstance(wrapped_bijector, RationalQuadraticSpline)
self.assertIs(wrapped_bijector, bijector)
# Access the `num_bins` attribute of a wrapped RationalQuadraticSpline.
np.testing.assert_equal(wrapped_bijector.num_bins, num_bins)
def test_on_tfp_bijector(self):
inputs = jnp.array([0., 1.])
bijector = tfb.Exp()
wrapped_bijector = conversion.as_bijector(bijector)
assert isinstance(wrapped_bijector, Bijector)
np.testing.assert_array_almost_equal(
wrapped_bijector.forward(inputs),
bijector.forward(inputs))
class AsDistributionTest(parameterized.TestCase):
def test_loc_attr_of_normal(self):
dist = Normal(loc=0., scale=1.)
wrapped_dist = conversion.as_distribution(dist)
assert isinstance(wrapped_dist, Normal)
self.assertIs(wrapped_dist, dist)
# Access the `loc` attribute of a wrapped Normal.
np.testing.assert_almost_equal(wrapped_dist.loc, 0.)
def test_num_categories_attr_of_categorical(self):
dist = Categorical(logits=jnp.array([0., 0., 0.]))
wrapped_dist = conversion.as_distribution(dist)
assert isinstance(wrapped_dist, Categorical)
self.assertIs(wrapped_dist, dist)
# Access the `num_categories` attribute of a wrapped Categorical.
np.testing.assert_equal(wrapped_dist.num_categories, 3)
def test_attrs_of_transformed_distribution(self):
dist = Transformed(Normal(loc=0., scale=1.), bijector=lambda x: x)
wrapped_dist = conversion.as_distribution(dist)
assert isinstance(wrapped_dist, Transformed)
self.assertIs(wrapped_dist, dist)
# Access the `distribution` attribute of a wrapped Transformed.
assert isinstance(wrapped_dist.distribution, Normal)
# Access the `loc` attribute of a transformed Normal within a wrapped
# Transformed.
np.testing.assert_almost_equal(wrapped_dist.distribution.loc, 0.)
def test_on_tfp_distribution(self):
dist = tfd.Normal(loc=0., scale=1.)
wrapped_dist = conversion.as_distribution(dist)
assert isinstance(wrapped_dist, tfd.Normal)
assert isinstance(wrapped_dist, Distribution)
# Access the `loc` attribute of a wrapped Normal.
np.testing.assert_almost_equal(wrapped_dist.loc, 0.)
class ToTfpTest(parameterized.TestCase):
def test_on_distrax_distribution(self):
dist = Normal(loc=0., scale=1.)
wrapped_dist = conversion.to_tfp(dist)
assert isinstance(wrapped_dist, Normal)
# Access the `loc` attribute of a wrapped Normal.
np.testing.assert_almost_equal(wrapped_dist.loc, 0.)
def test_on_distrax_bijector(self):
bij = Tanh()
wrapped_bij = conversion.to_tfp(bij)
assert isinstance(wrapped_bij, Tanh)
# Call the `forward` attribute of a wrapped Tanh.
np.testing.assert_equal(
wrapped_bij.forward(np.zeros(())), bij.forward(np.zeros(())))
def test_on_tfp_distribution(self):
dist = tfd.Normal(0., 1.)
wrapped_dist = conversion.to_tfp(dist)
self.assertIs(wrapped_dist, dist)
def test_on_tfp_bijector(self):
bij = tfb.Exp()
wrapped_bij = conversion.to_tfp(bij)
self.assertIs(wrapped_bij, bij)
class AsFloatArrayTest(parameterized.TestCase):
@parameterized.parameters(0, 0.1)
def test_on_valid_scalar(self, x):
y = conversion.as_float_array(x)
self.assertIsInstance(y, jnp.ndarray)
self.assertEqual(
y.dtype, jnp.float64 if jax.config.x64_enabled else jnp.float32)
@parameterized.parameters(True, 1j)
def test_on_invalid_scalar(self, x):
with self.assertRaises(ValueError):
conversion.as_float_array(x)
@parameterized.parameters(
float, jnp.float_, jnp.float16, jnp.float32, jnp.float64, jnp.bfloat16)
def test_on_float_array(self, dtype):
x = jnp.zeros([], dtype)
y = conversion.as_float_array(x)
self.assertIs(y, x)
@parameterized.parameters(
int, jnp.int_, jnp.int8, jnp.int16, jnp.int32, jnp.int64,
jnp.uint8, jnp.uint16, jnp.uint32, jnp.uint64)
def test_on_int_array(self, dtype):
x = jnp.zeros([], dtype)
y = conversion.as_float_array(x)
self.assertIsInstance(y, jnp.ndarray)
self.assertEqual(
y.dtype, jnp.float64 if jax.config.x64_enabled else jnp.float32)
@parameterized.parameters(
bool, jnp.bool_, complex, jnp.complex_, jnp.complex64, jnp.complex128)
def test_on_invalid_array(self, dtype):
x = jnp.zeros([], dtype)
with self.assertRaises(ValueError):
conversion.as_float_array(x)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/utils/conversion_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility math functions."""
from typing import Optional, Tuple
import chex
import jax
import jax.numpy as jnp
Array = chex.Array
@jax.custom_jvp
def multiply_no_nan(x: Array, y: Array) -> Array:
"""Equivalent of TF `multiply_no_nan`.
Computes the element-wise product of `x` and `y` and return 0 if `y` is zero,
even if `x` is NaN or infinite.
Args:
x: First input.
y: Second input.
Returns:
The product of `x` and `y`.
Raises:
ValueError if the shapes of `x` and `y` do not match.
"""
dtype = jnp.result_type(x, y)
return jnp.where(y == 0, jnp.zeros((), dtype=dtype), x * y)
@multiply_no_nan.defjvp
def multiply_no_nan_jvp(
primals: Tuple[Array, Array],
tangents: Tuple[Array, Array]) -> Tuple[Array, Array]:
"""Custom gradient computation for `multiply_no_nan`."""
x, y = primals
x_dot, y_dot = tangents
primal_out = multiply_no_nan(x, y)
tangent_out = y * x_dot + x * y_dot
return primal_out, tangent_out
@jax.custom_jvp
def power_no_nan(x: Array, y: Array) -> Array:
"""Computes `x ** y` and ensure that the result is 1.0 when `y` is zero.
Compute the element-wise power `x ** y` and return 1.0 when `y` is zero,
regardless of the value of `x`, even if it is NaN or infinite. This method
uses the convention `0 ** 0 = 1`.
Args:
x: First input.
y: Second input.
Returns:
The power `x ** y`.
"""
dtype = jnp.result_type(x, y)
return jnp.where(y == 0, jnp.ones((), dtype=dtype), jnp.power(x, y))
@power_no_nan.defjvp
def power_no_nan_jvp(
primals: Tuple[Array, Array],
tangents: Tuple[Array, Array]) -> Tuple[Array, Array]:
"""Custom gradient computation for `power_no_nan`."""
x, y = primals
x_dot, y_dot = tangents
primal_out = power_no_nan(x, y)
tangent_out = (y * power_no_nan(x, y - 1) * x_dot
+ primal_out * jnp.log(x) * y_dot)
return primal_out, tangent_out
def mul_exp(x: Array, logp: Array) -> Array:
"""Returns `x * exp(logp)` with zero output if `exp(logp)==0`.
Args:
x: An array.
logp: An array.
Returns:
`x * exp(logp)` with zero output and zero gradient if `exp(logp)==0`,
even if `x` is NaN or infinite.
"""
p = jnp.exp(logp)
# If p==0, the gradient with respect to logp is zero,
# so we can replace the possibly non-finite `x` with zero.
x = jnp.where(p == 0, 0.0, x)
return x * p
def normalize(
*, probs: Optional[Array] = None, logits: Optional[Array] = None) -> Array:
"""Normalize logits (via log_softmax) or probs (ensuring they sum to one)."""
if logits is None:
probs = jnp.asarray(probs)
return probs / probs.sum(axis=-1, keepdims=True)
else:
logits = jnp.asarray(logits)
return jax.nn.log_softmax(logits, axis=-1)
def sum_last(x: Array, ndims: int) -> Array:
"""Sums the last `ndims` axes of array `x`."""
axes_to_sum = tuple(range(-ndims, 0))
return jnp.sum(x, axis=axes_to_sum)
def log_expbig_minus_expsmall(big: Array, small: Array) -> Array:
"""Stable implementation of `log(exp(big) - exp(small))`.
Args:
big: First input.
small: Second input. It must be `small <= big`.
Returns:
The resulting `log(exp(big) - exp(small))`.
"""
return big + jnp.log1p(-jnp.exp(small - big))
def log_beta(a: Array, b: Array) -> Array:
"""Obtains the log of the beta function `log B(a, b)`.
Args:
a: First input. It must be positive.
b: Second input. It must be positive.
Returns:
The value `log B(a, b) = log Gamma(a) + log Gamma(b) - log Gamma(a + b)`,
where `Gamma` is the Gamma function, obtained through stable computation of
`log Gamma`.
"""
return jax.lax.lgamma(a) + jax.lax.lgamma(b) - jax.lax.lgamma(a + b)
def log_beta_multivariate(a: Array) -> Array:
"""Obtains the log of the multivariate beta function `log B(a)`.
Args:
a: An array of length `K` containing positive values.
Returns:
The value
`log B(a) = sum_{k=1}^{K} log Gamma(a_k) - log Gamma(sum_{k=1}^{K} a_k)`,
where `Gamma` is the Gamma function, obtained through stable computation of
`log Gamma`.
"""
return (
jnp.sum(jax.lax.lgamma(a), axis=-1) - jax.lax.lgamma(jnp.sum(a, axis=-1)))
| distrax-master | distrax/_src/utils/math.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Importance sampling."""
import chex
from distrax._src.distributions import distribution
import jax.numpy as jnp
Array = chex.Array
DistributionLike = distribution.DistributionLike
def importance_sampling_ratios(
target_dist: DistributionLike,
sampling_dist: DistributionLike,
event: Array
) -> Array:
"""Compute importance sampling ratios given target and sampling distributions.
Args:
target_dist: Target probability distribution.
sampling_dist: Sampling probability distribution.
event: Samples.
Returns:
Importance sampling ratios.
"""
log_pi_a = target_dist.log_prob(event)
log_mu_a = sampling_dist.log_prob(event)
rho = jnp.exp(log_pi_a - log_mu_a)
return rho
| distrax-master | distrax/_src/utils/importance_sampling.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract class for Jittable objects."""
import abc
import jax
class Jittable(metaclass=abc.ABCMeta):
"""ABC that can be passed as an arg to a jitted fn, with readable state."""
def __new__(cls, *args, **kwargs):
# Discard the parameters to this function because the constructor is not
# called during serialization: its `__dict__` gets repopulated directly.
del args, kwargs
try:
registered_cls = jax.tree_util.register_pytree_node_class(cls)
except ValueError:
registered_cls = cls # Already registered.
return object.__new__(registered_cls)
def tree_flatten(self):
leaves, treedef = jax.tree_util.tree_flatten(self.__dict__)
switch = list(map(_is_jax_data, leaves))
children = [leaf if s else None for leaf, s in zip(leaves, switch)]
metadata = [None if s else leaf for leaf, s in zip(leaves, switch)]
return children, (metadata, switch, treedef)
@classmethod
def tree_unflatten(cls, aux_data, children):
metadata, switch, treedef = aux_data
leaves = [j if s else p for j, p, s in zip(children, metadata, switch)]
obj = object.__new__(cls)
obj.__dict__ = jax.tree_util.tree_unflatten(treedef, leaves)
return obj
def _is_jax_data(x):
"""Check whether `x` is an instance of a JAX-compatible type."""
# If it's a tracer, then it's already been converted by JAX.
if isinstance(x, jax.core.Tracer):
return True
# `jax.vmap` replaces vmappable leaves with `object()` during serialization.
if type(x) is object: # pylint: disable=unidiomatic-typecheck
return True
# Primitive types (e.g. shape tuples) are treated as metadata for Distrax.
if isinstance(x, (bool, int, float)) or x is None:
return False
# Otherwise, try to make it into a tracer. If it succeeds, then it's JAX data.
try:
jax.interpreters.xla.abstractify(x)
return True
except TypeError:
return False
| distrax-master | distrax/_src/utils/jittable.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for testing equivalence between TFP and Distrax."""
import functools
from typing import Any, Callable, Dict, Optional, Tuple, Union
from absl.testing import parameterized
import chex
from distrax._src.distributions import distribution
import jax
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
def get_tfp_equiv(distrax_cls):
"""Returns the TFP equivalent of a Distrax class.
Args:
distrax_cls: The Distrax class or the name of the class as a string.
Returns:
The equivalent TFP class if found, else `None`.
Raises:
ValueError:
If `distrax_cls` is neither a Distrax class nor a string.
#### Examples
from distrax import normal
from distrax import tfp_utils
# equivalent to tfd.normal.Normal(0, 1)
tfp_utils.get_tfp_equiv(normal.Normal)(0, 1)
# equivalent to tfd.normal.Normal(0, 1)
tfp_utils.get_tfp_equiv("Normal")(0, 1)
"""
if isinstance(distrax_cls, str):
tfp_class_str = distrax_cls
elif issubclass(distrax_cls, distribution.Distribution):
tfp_class_str = distrax_cls.__name__
else:
raise ValueError(
'distrax_cls must be the class object or the name of the class object'
' as a string'
)
if hasattr(tfd, tfp_class_str):
return getattr(tfd, tfp_class_str)
else:
return None
class EquivalenceTest(parameterized.TestCase):
"""Provides comparison assertions for TFP and Distrax distributions."""
def setUp(self):
super().setUp()
self.tfp_cls = None
def _init_distr_cls(self, distrax_cls: type(distribution.Distribution)):
self.key = jax.random.PRNGKey(1234)
self.distrax_cls = distrax_cls
if hasattr(distrax_cls, 'equiv_tfp_cls'):
self.tfp_cls = distrax_cls.equiv_tfp_cls
else:
self.tfp_cls = get_tfp_equiv(distrax_cls)
def assertion_fn(self, **kwargs) -> Callable[[Any, Any], None]:
def f(x, y):
np.testing.assert_allclose(x, y, **kwargs)
return f
def _get_tfp_cls(self) -> type(tfd.Distribution):
if self.tfp_cls is None:
raise ValueError('TFP class undefined. Run _init_distr_cls() first.')
return self.tfp_cls
def _test_attribute(
self,
attribute_string: str,
dist_args: Tuple[Any, ...] = (),
dist_kwargs: Optional[Dict[str, Any]] = None,
tfp_dist_args: Optional[Tuple[Any, ...]] = None,
tfp_dist_kwargs: Optional[Dict[str, Any]] = None,
call_args: Tuple[Any, ...] = (),
call_kwargs: Optional[Dict[str, Any]] = None,
assertion_fn: Callable[[Any, Any], None] = np.testing.assert_allclose):
"""Asserts equivalence of TFP and Distrax attributes.
Given a potentially callable attribute as a string, compares the attribute
values among Distrax and TFP implementations.
Args:
attribute_string: An attribute or a method of a Distrax/TFP
distribution, provided as a string.
dist_args: Arguments to be passed to Distrax constructor as *dist_args.
dist_kwargs: Keyword arguments to be passed to Distrax constructor as
**dist_kwargs.
tfp_dist_args: Arguments to be passed to TFP constructor as
*tfp_dist_args. If None, defaults to `dist_args`.
tfp_dist_kwargs: Keyword arguments to be passed to TFP constructor as
**tfp_dist_kwargs. If None, defaults to `dist_kwargs`.
call_args: Arguments to be passed to Distrax and TFP methods as
*call_args.
call_kwargs: Keyword arguments to be passed to Distrax and TFP
methods as **call_kwargs.
assertion_fn: Assertion function to be called to compare
Distrax and TFP methods/attributes.
"""
if dist_kwargs is None:
dist_kwargs = {}
if call_kwargs is None:
call_kwargs = {}
if tfp_dist_args is None:
tfp_dist_args = dist_args
if tfp_dist_kwargs is None:
tfp_dist_kwargs = dist_kwargs
dist = self.distrax_cls(*dist_args, **dist_kwargs)
tfp_dist = self._get_tfp_cls()(*tfp_dist_args, **tfp_dist_kwargs)
if callable(getattr(dist, attribute_string)):
distrax_fn = getattr(dist, attribute_string)
tfp_fn = getattr(tfp_dist, attribute_string)
if hasattr(self, 'variant'):
distrax_fn = self.variant(distrax_fn)
assertion_fn(distrax_fn(*call_args, **call_kwargs),
tfp_fn(*call_args, **call_kwargs))
else:
assertion_fn(getattr(dist, attribute_string),
getattr(tfp_dist, attribute_string))
def _test_event_shape(self, dist_args, dist_kwargs,
tfp_dist_args=None, tfp_dist_kwargs=None):
"""Tests event shape."""
self._test_attribute('event_shape', dist_args, dist_kwargs,
tfp_dist_args, tfp_dist_kwargs)
def _test_batch_shape(self, dist_args, dist_kwargs,
tfp_dist_args=None, tfp_dist_kwargs=None):
"""Tests batch shape."""
self._test_attribute('batch_shape', dist_args, dist_kwargs,
tfp_dist_args, tfp_dist_kwargs)
def _test_prob(self, dist_args, dist_kwargs, value,
tfp_dist_args=None, tfp_dist_kwargs=None):
"""Tests prob."""
self._test_attribute('prob', dist_args, dist_kwargs, tfp_dist_args,
tfp_dist_kwargs, (value,))
def _test_log_prob(self, dist_args, dist_kwargs, value,
tfp_dist_args=None, tfp_dist_kwargs=None):
"""Tests log prob."""
assertion_fn = functools.partial(np.testing.assert_allclose, rtol=1e-2)
self._test_attribute(
'log_prob', dist_args, dist_kwargs, tfp_dist_args, tfp_dist_kwargs,
(value,), assertion_fn=assertion_fn)
def _test_cdf(self, dist_args, dist_kwargs, value,
tfp_dist_args=None, tfp_dist_kwargs=None):
"""Tests CDF."""
self._test_attribute('cdf', dist_args, dist_kwargs,
tfp_dist_args, tfp_dist_kwargs, (value,))
def _test_log_cdf(self, dist_args, dist_kwargs, value,
tfp_dist_args=None, tfp_dist_kwargs=None):
"""Tests log CDF."""
self._test_attribute('log_cdf', dist_args, dist_kwargs,
tfp_dist_args, tfp_dist_kwargs, (value,))
def _test_sample_shape(self, dist_args, dist_kwargs, sample_shape,
tfp_dist_args=None, tfp_dist_kwargs=None):
"""Tests sample shape."""
if tfp_dist_args is None:
tfp_dist_args = dist_args
if tfp_dist_kwargs is None:
tfp_dist_kwargs = dist_kwargs
dist = self.distrax_cls(*dist_args, **dist_kwargs)
def sample_fn(key, sample_shape=sample_shape):
return dist.sample(seed=key, sample_shape=sample_shape)
if hasattr(self, 'variant'):
sample_fn = self.variant(sample_fn)
samples = sample_fn(self.key)
tfp_dist = self._get_tfp_cls()(*tfp_dist_args, **tfp_dist_kwargs)
tfp_samples = tfp_dist.sample(sample_shape=sample_shape,
seed=self.key)
chex.assert_equal_shape([samples, tfp_samples])
def _test_sample_and_log_prob(
self,
dist_args: Tuple[Any, ...] = (),
dist_kwargs: Optional[Dict[str, Any]] = None,
tfp_dist_args: Optional[Tuple[Any, ...]] = None,
tfp_dist_kwargs: Optional[Dict[str, Any]] = None,
sample_shape: Union[int, Tuple[int, ...]] = (),
assertion_fn: Callable[[Any, Any], None] = np.testing.assert_allclose):
"""Tests sample and log prob."""
if tfp_dist_args is None:
tfp_dist_args = dist_args
if tfp_dist_kwargs is None:
tfp_dist_kwargs = dist_kwargs
dist = self.distrax_cls(*dist_args, **dist_kwargs)
log_prob_fn = dist.log_prob
def sample_and_log_prob_fn(key):
return dist.sample_and_log_prob(seed=key, sample_shape=sample_shape)
if hasattr(self, 'variant'):
sample_and_log_prob_fn = self.variant(sample_and_log_prob_fn)
log_prob_fn = self.variant(dist.log_prob)
samples, log_prob = sample_and_log_prob_fn(self.key)
tfp_dist = self._get_tfp_cls()(*tfp_dist_args, **tfp_dist_kwargs)
tfp_samples = tfp_dist.sample(sample_shape=sample_shape,
seed=self.key)
tfp_log_prob = tfp_dist.log_prob(samples)
chex.assert_equal_shape([samples, tfp_samples])
assertion_fn(log_prob, tfp_log_prob)
assertion_fn(log_prob, log_prob_fn(samples))
def _test_with_two_distributions(
self,
attribute_string: str,
mode_string: str = 'distrax_to_distrax',
dist1_args: Tuple[Any, ...] = (),
dist1_kwargs: Optional[Dict[str, Any]] = None,
dist2_args: Tuple[Any, ...] = (),
dist2_kwargs: Optional[Dict[str, Any]] = None,
tfp_dist1_args: Tuple[Any, ...] = (),
tfp_dist1_kwargs: Optional[Dict[str, Any]] = None,
tfp_dist2_args: Tuple[Any, ...] = (),
tfp_dist2_kwargs: Optional[Dict[str, Any]] = None,
assertion_fn: Callable[[Any, Any], None] = np.testing.assert_allclose):
"""Asserts equivalence of TFP and Distrax methods that compare two distribs.
This checks that the methods `D(dist1 || dist2)` and `D(dist2 || dist1)`
give the same results as their TFP counterparts, where `D` is typically the
KL divergence or the cross-entropy.
Args:
attribute_string: The method attribute, provided as a string.
mode_string: string, must be one of the following:
- If "distrax_to_distrax", this method verifies the values of
`D(dist1 || dist2)` and `D(dist2 || dist1)`, where both `dist1` and
`dist2` are Distrax distributions.
- If "distrax_to_tfp", this method verifies the values of
`D(dist1 || tfp_dist2)` and `D(dist2 || tfp_dist1)`.
- If "tfp_to_distrax", this method verifies the values of
`D(tfp_dist1 || dist2)` and `D(tfp_dist2 || dist1)`.
dist1_args: Arguments to be passed to Distrax constructor as *dist_args
for the first distribution
dist1_kwargs: Keyword arguments to be passed to Distrax constructor as
**dist_kwargs for the first distribution.
dist2_args: Arguments to be passed to Distrax constructor as *dist_args
for the second distribution.
dist2_kwargs: Keyword arguments to be passed to Distrax constructor as
**dist_kwargs for the second distribution.
tfp_dist1_args: Arguments to be passed to TFP constructor as
*tfp_dist_args for the first distribution. If None, defaults to
`dist1_args`.
tfp_dist1_kwargs: Keyword arguments to be passed to TFP constructor as
**tfp_dist_kwargs for the first distribution. If None, defaults to
`dist1_kwargs`.
tfp_dist2_args: Arguments to be passed to TFP constructor as
*tfp_dist_args for the second distribution. If None, defaults to
`dist2_args`.
tfp_dist2_kwargs: Keyword arguments to be passed to TFP constructor as
**tfp_dist_kwargs for the second distribution. If None, defaults to
`dist2_kwargs`.
assertion_fn: Assertion function to be called to compare Distrax and TFP
function values.
"""
dist1_kwargs = {} if dist1_kwargs is None else dist1_kwargs
dist2_kwargs = {} if dist2_kwargs is None else dist2_kwargs
if tfp_dist1_args is None:
tfp_dist1_args = dist1_args
if tfp_dist1_kwargs is None:
tfp_dist1_kwargs = dist1_kwargs
if tfp_dist2_args is None:
tfp_dist2_args = dist2_args
if tfp_dist2_kwargs is None:
tfp_dist2_kwargs = dist2_kwargs
dist1 = self.distrax_cls(*dist1_args, **dist1_kwargs)
tfp_dist1 = self._get_tfp_cls()(*tfp_dist1_args, **tfp_dist1_kwargs)
dist2 = self.distrax_cls(*dist2_args, **dist2_kwargs)
tfp_dist2 = self._get_tfp_cls()(*tfp_dist2_args, **tfp_dist2_kwargs)
tfp_comp_dist1_dist2 = getattr(tfp_dist1, attribute_string)(tfp_dist2)
tfp_comp_dist2_dist1 = getattr(tfp_dist2, attribute_string)(tfp_dist1)
distrax_fn_1 = getattr(dist1, attribute_string)
distrax_fn_2 = getattr(dist2, attribute_string)
if hasattr(self, 'variant'):
distrax_fn_1 = self.variant(distrax_fn_1)
distrax_fn_2 = self.variant(distrax_fn_2)
if mode_string == 'distrax_to_distrax':
comp_dist1_dist2 = distrax_fn_1(dist2)
comp_dist2_dist1 = distrax_fn_2(dist1)
elif mode_string == 'distrax_to_tfp':
comp_dist1_dist2 = distrax_fn_1(tfp_dist2)
comp_dist2_dist1 = distrax_fn_2(tfp_dist1)
elif mode_string == 'tfp_to_distrax':
comp_dist1_dist2 = getattr(tfp_dist1, attribute_string)(dist2)
comp_dist2_dist1 = getattr(tfp_dist2, attribute_string)(dist1)
else:
raise ValueError(
f'`mode_string` should be one of the following: '
f'"distrax_to_distrax", "distrax_to_tfp", or "tfp_to_distrax", '
f'but it is "{mode_string}".')
assertion_fn(comp_dist1_dist2, tfp_comp_dist1_dist2)
assertion_fn(comp_dist2_dist1, tfp_comp_dist2_dist1)
def _test_jittable(
self,
dist_args: Tuple[Any, ...] = (),
dist_kwargs: Optional[Dict[str, Any]] = None,
assertion_fn: Callable[[Any, Any], None] = np.testing.assert_allclose):
"""Tests that the distribution can be passed to a jitted function."""
dist_kwargs = dist_kwargs or {}
@jax.jit
def jitted_function(event, dist):
return dist.log_prob(event)
dist = self.distrax_cls(*dist_args, **dist_kwargs)
event = dist.sample(seed=self.key)
log_prob = dist.log_prob(event)
jitted_log_prob = jitted_function(event, dist)
assertion_fn(jitted_log_prob, log_prob)
def _test_raises_error(
self,
dist_args: Tuple[Any, ...] = (),
dist_kwargs: Optional[Dict[str, Any]] = None,
error_type=AssertionError):
"""Tests that the instantiation of the distribution raises an error."""
dist_kwargs = dist_kwargs or {}
try:
with self.assertRaises(error_type):
self.distrax_cls(*dist_args, **dist_kwargs)
except ValueError:
# For forward compatibility with Chex (it will raise AssertionErrors
# instead of ValueErrors in the new version) .
# TODO(iukemaev): remove after the new version of Chex is released.
pass
| distrax-master | distrax/_src/utils/equivalence.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `math.py`."""
from absl.testing import absltest
from distrax._src.utils import math
import jax
import jax.numpy as jnp
import numpy as np
import scipy.special
class MathTest(absltest.TestCase):
def test_multiply_no_nan(self):
zero = jnp.zeros(())
nan = zero / zero
self.assertTrue(jnp.isnan(math.multiply_no_nan(zero, nan)))
self.assertFalse(jnp.isnan(math.multiply_no_nan(nan, zero)))
def test_multiply_no_nan_grads(self):
x = -jnp.inf
y = 0.
self.assertEqual(math.multiply_no_nan(x, y), 0.)
grad_fn = jax.grad(
lambda inputs: math.multiply_no_nan(inputs[0], inputs[1]))
np.testing.assert_allclose(grad_fn((x, y)), (y, x), rtol=1e-3)
def test_power_no_nan(self):
zero = jnp.zeros(())
nan = zero / zero
self.assertTrue(jnp.isnan(math.power_no_nan(zero, nan)))
self.assertFalse(jnp.isnan(math.power_no_nan(nan, zero)))
def test_power_no_nan_grads(self):
x = np.exp(1.)
y = 0.
self.assertEqual(math.power_no_nan(x, y), 1.)
grad_fn = jax.grad(
lambda inputs: math.power_no_nan(inputs[0], inputs[1]))
np.testing.assert_allclose(grad_fn((x, y)), (0., 1.), rtol=1e-3)
def test_normalize_probs(self):
pre_normalised_probs = jnp.array([0.4, 0.4, 0., 0.2])
unnormalised_probs = jnp.array([4., 4., 0., 2.])
expected_probs = jnp.array([0.4, 0.4, 0., 0.2])
np.testing.assert_array_almost_equal(
math.normalize(probs=pre_normalised_probs), expected_probs)
np.testing.assert_array_almost_equal(
math.normalize(probs=unnormalised_probs), expected_probs)
def test_normalize_logits(self):
unnormalised_logits = jnp.array([1., -1., 3.])
expected_logits = jax.nn.log_softmax(unnormalised_logits, axis=-1)
np.testing.assert_array_almost_equal(
math.normalize(logits=unnormalised_logits), expected_logits)
np.testing.assert_array_almost_equal(
math.normalize(logits=expected_logits), expected_logits)
def test_sum_last(self):
x = jax.random.normal(jax.random.PRNGKey(42), (2, 3, 4))
np.testing.assert_array_equal(math.sum_last(x, 0), x)
np.testing.assert_array_equal(math.sum_last(x, 1), x.sum(-1))
np.testing.assert_array_equal(math.sum_last(x, 2), x.sum((-2, -1)))
np.testing.assert_array_equal(math.sum_last(x, 3), x.sum())
def test_log_expbig_minus_expsmall(self):
small = jax.random.normal(jax.random.PRNGKey(42), (2, 3, 4))
big = small + jax.random.uniform(jax.random.PRNGKey(43), (2, 3, 4))
expected_result = np.log(np.exp(big) - np.exp(small))
np.testing.assert_allclose(
math.log_expbig_minus_expsmall(big, small), expected_result, atol=1e-4)
def test_log_beta(self):
a = jnp.abs(jax.random.normal(jax.random.PRNGKey(42), (2, 3, 4)))
b = jnp.abs(jax.random.normal(jax.random.PRNGKey(43), (3, 4)))
expected_result = scipy.special.betaln(a, b)
np.testing.assert_allclose(math.log_beta(a, b), expected_result, atol=2e-4)
def test_log_beta_bivariate(self):
a = jnp.abs(jax.random.normal(jax.random.PRNGKey(42), (4, 3, 2)))
expected_result = scipy.special.betaln(a[..., 0], a[..., 1])
np.testing.assert_allclose(
math.log_beta_multivariate(a), expected_result, atol=2e-4)
def test_log_beta_multivariate(self):
a = jnp.abs(jax.random.normal(jax.random.PRNGKey(42), (2, 3, 4)))
expected_result = (jnp.sum(scipy.special.gammaln(a), axis=-1)
- scipy.special.gammaln(jnp.sum(a, axis=-1)))
np.testing.assert_allclose(
math.log_beta_multivariate(a), expected_result, atol=1e-3)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/utils/math_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hidden Markov Model implementation."""
from typing import Optional, Tuple
import chex
from distrax._src.distributions import categorical
from distrax._src.distributions import distribution
from distrax._src.utils import conversion
from distrax._src.utils import jittable
import jax
import jax.numpy as jnp
def _normalize(u: chex.Array,
axis: int = 0,
eps: float = 1e-15) -> Tuple[chex.Array, chex.Array]:
"""Normalizes the values within the axis in a way that they sum up to 1.
Args:
u: Input array to normalize.
axis: Axis over which to normalize.
eps: Minimum value threshold for numerical stability.
Returns:
Tuple of the normalized values, and the normalizing denominator.
"""
u = jnp.where(u == 0, 0, jnp.where(u < eps, eps, u))
c = u.sum(axis=axis)
c = jnp.where(c == 0, 1, c)
return u / c, c
class HMM(jittable.Jittable):
"""Hidden Markov Model class."""
def __init__(self,
init_dist: categorical.CategoricalLike,
trans_dist: categorical.CategoricalLike,
obs_dist: distribution.DistributionLike):
"""Constructs an N-state Hidden Markov Model from component distributions.
Args:
init_dist: Integer-valued categorical distribution with parameters of
shape (N,), representing the distribution over initial latent states.
trans_dist: Integer-valued categorical distribution with parameters of
shape (N, N), representing the transition probability matrix between
latent states.
obs_dist: Any observation distribution with batch shape (N,), representing
`p(observation|latent state)`.
"""
self._init_dist = conversion.as_distribution(init_dist)
self._trans_dist = conversion.as_distribution(trans_dist)
self._obs_dist = conversion.as_distribution(obs_dist)
self._n_states = self._init_dist.num_categories
if not jnp.issubdtype(self._init_dist.dtype, jnp.integer):
raise TypeError(
f'init_dist must be categorical-like with integer dtype, but its '
f'dtype is {self._init_dist.dtype}.')
if not jnp.issubdtype(self._trans_dist.dtype, jnp.integer):
raise TypeError(
f'trans_dist must be categorical-like with integer dtype, but its '
f'dtype is {self._trans_dist.dtype}.')
if self._init_dist.batch_shape:
raise ValueError(
f'init_dist must be unbatched, but it has a batch shape of '
f'{self._init_dist.batch_shape}.')
if self._obs_dist.batch_shape != (self._n_states,):
raise ValueError(
f'obs_dist should have batch shape of ({self._n_states},) equal to '
f'the number of latent states in the model, but its batch shape is '
f'{self._obs_dist.batch_shape}.')
if self._trans_dist.batch_shape != (self._n_states,):
raise ValueError(
f'trans_dist should have batch shape of ({self._n_states},) equal to '
f'the number of latent states in the model, but its batch shape is '
f'{self._trans_dist.batch_shape}.')
if self._trans_dist.num_categories != self._n_states:
raise ValueError(
f'trans_dist should have `num_categories` of {self._n_states} equal '
f'to the number of latent states in the model, but it has '
f'`num_categories` of {self._trans_dist.num_categories}.')
@property
def init_dist(self) -> categorical.CategoricalLike:
return self._init_dist
@property
def trans_dist(self) -> categorical.CategoricalLike:
return self._trans_dist
@property
def obs_dist(self) -> distribution.DistributionLike:
return self._obs_dist
def sample(self,
*,
seed: chex.PRNGKey,
seq_len: int) -> Tuple[chex.Array, chex.Array]:
"""Sample from this HMM.
Samples an observation of given length according to this
Hidden Markov Model and gives the sequence of the hidden states
as well as the observation.
Args:
seed: Random key of shape (2,) and dtype uint32.
seq_len: The length of the observation sequence.
Returns:
Tuple of hidden state sequence, and observation sequence.
"""
rng_key, rng_init = jax.random.split(seed)
initial_state = self._init_dist.sample(seed=rng_init)
def draw_state(prev_state, key):
state = self._trans_dist.sample(seed=key)[prev_state]
return state, state
rng_state, rng_obs = jax.random.split(rng_key)
keys = jax.random.split(rng_state, seq_len - 1)
_, states = jax.lax.scan(draw_state, initial_state, keys)
states = jnp.append(initial_state, states)
def draw_obs(state, key):
return self._obs_dist.sample(seed=key)[state]
keys = jax.random.split(rng_obs, seq_len)
obs_seq = jax.vmap(draw_obs, in_axes=(0, 0))(states, keys)
return states, obs_seq
def forward(self,
obs_seq: chex.Array,
length: Optional[chex.Array] = None) -> Tuple[float, chex.Array]:
"""Calculates a belief state.
Args:
obs_seq: Observation sequence.
length: The valid length of the observation sequence, used to truncate the
computation for batches of varying length. If set to None, the entire
sequence is used.
Returns:
Tuple of `log(p(x_{1:T}|model))` and the array of forward joint
probabilities `p(z_t,x_{1:t})` for each sample `x_t`.
"""
seq_len = len(obs_seq)
if length is None:
length = seq_len
def scan_fn(carry, t):
(alpha_prev, log_ll_prev) = carry
alpha_n = jnp.where(
t < length,
(self._obs_dist.prob(obs_seq[t])
* (alpha_prev[:, None] * self._trans_dist.probs).sum(axis=0)),
jnp.zeros_like(alpha_prev))
alpha_n, cn = _normalize(alpha_n)
carry = (alpha_n, jnp.log(cn) + log_ll_prev)
return carry, alpha_n
# initial belief state
alpha_0, c0 = _normalize(
self._init_dist.probs * self._obs_dist.prob(obs_seq[0]))
# setup scan loop
init_state = (alpha_0, jnp.log(c0))
ts = jnp.arange(1, seq_len)
carry, alpha_hist = jax.lax.scan(scan_fn, init_state, ts)
# post-process
alpha_hist = jnp.vstack([alpha_0.reshape(1, self._n_states), alpha_hist])
(_, log_ll) = carry
return log_ll, alpha_hist
def backward(self,
obs_seq: chex.Array,
length: Optional[chex.Array] = None) -> chex.Array:
"""Computes the backward probabilities.
Args:
obs_seq: Observation sequence.
length: The valid length of the observation sequence, used to truncate the
computation for batches of varying length. If set to None, the entire
sequence is used.
Returns:
Array of backward joint probabilities `p(x_{t+1:T}|z_t)`.
"""
seq_len = len(obs_seq)
if length is None:
length = seq_len
beta_t = jnp.ones((self._n_states,))
def scan_fn(beta_prev, t):
beta_t = jnp.where(
t > length,
jnp.zeros_like(beta_prev),
_normalize((beta_prev * self._obs_dist.prob(obs_seq[t-1])
* self._trans_dist.probs).sum(axis=1))[0])
return beta_t, beta_t
ts = jnp.arange(seq_len, 1, -1)
_, beta_hist = jax.lax.scan(scan_fn, beta_t, ts)
beta_hist = jnp.flip(
jnp.vstack([beta_t.reshape(1, self._n_states), beta_hist]), axis=0)
return beta_hist
def forward_backward(
self,
obs_seq: chex.Array,
length: Optional[chex.Array] = None,
) -> Tuple[chex.Array, chex.Array, chex.Array, float]:
"""HMM forward-backward algorithm.
Computes, for each time step, the marginal conditional probability that the
Hidden Markov Model was in each possible state given the observations that
were made at each time step, i.e. P(z[i] | x[0], ..., x[num_steps - 1])
for all i from 0 to num_steps - 1.
Args:
obs_seq: Observation sequence.
length: The valid length of the observation sequence, used to truncate the
computation for batches of varying length. If set to None, the entire
sequence is used.
Returns:
Tuple of:
* Forward joint probabilities `p(z_t,x_{1:t})`.
* Backward joint probabilities `p(x_{t+1:T}|z_t)`.
* Marginal conditional probability of the observations.
* The log-likelihood log(p(x_{1:T}|model)).
"""
seq_len = len(obs_seq)
if length is None:
length = seq_len
def gamma_t(t):
return alpha[t] * beta[t]
ll, alpha = self.forward(obs_seq, length)
beta = self.backward(obs_seq, length)
ts = jnp.arange(seq_len)
gamma = jax.vmap(gamma_t)(ts)
gamma = jax.vmap(lambda x: _normalize(x)[0])(gamma)
return alpha, beta, gamma, ll
def viterbi(self, obs_seq: chex.Array) -> chex.Array:
"""Viterbi algorithm.
Computes the most probable sequence of hidden states given the observations.
Args:
obs_seq: Observation sequence.
Returns:
The most probable sequence of hidden states.
"""
trans_log_probs = jax.nn.log_softmax(self._trans_dist.logits)
init_log_probs = jax.nn.log_softmax(self._init_dist.logits)
first_log_prob = init_log_probs + self._obs_dist.log_prob(obs_seq[0])
if len(obs_seq) == 1:
return jnp.expand_dims(jnp.argmax(first_log_prob), axis=0)
def viterbi_forward(prev_logp, obs):
obs_logp = self._obs_dist.log_prob(obs)
logp = prev_logp[..., None] + trans_log_probs + obs_logp[..., None, :]
max_logp_given_successor = jnp.max(logp, axis=-2)
most_likely_given_successor = jnp.argmax(logp, axis=-2)
return max_logp_given_successor, most_likely_given_successor
final_log_prob, most_likely_sources = jax.lax.scan(
viterbi_forward, first_log_prob, obs_seq[1:])
most_likely_initial_given_successor = jnp.argmax(
trans_log_probs + first_log_prob, axis=-2)
most_likely_sources = jnp.concatenate([
jnp.expand_dims(most_likely_initial_given_successor, axis=0),
most_likely_sources], axis=0)
def viterbi_backward(state, most_likely_sources):
state = jax.nn.one_hot(state, self._n_states)
most_likely = jnp.sum(most_likely_sources * state).astype(jnp.int64)
return most_likely, most_likely
final_state = jnp.argmax(final_log_prob)
_, most_likely_path = jax.lax.scan(
viterbi_backward, final_state, most_likely_sources[1:], reverse=True)
return jnp.append(most_likely_path, final_state)
| distrax-master | distrax/_src/utils/hmm.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `distribution_from_tfp.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions.categorical import Categorical
from distrax._src.distributions.distribution import Distribution
from distrax._src.distributions.distribution_from_tfp import distribution_from_tfp
from distrax._src.distributions.mvn_diag import MultivariateNormalDiag
from distrax._src.distributions.normal import Normal
from distrax._src.distributions.transformed import Transformed
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
class DistributionFromTfpNormal(parameterized.TestCase):
"""Tests for normal distribution."""
def setUp(self):
super().setUp()
self._sample_shape = (np.int32(10),)
self._seed = 42
self._key = jax.random.PRNGKey(self._seed)
self.base_dist = tfd.Normal(loc=0., scale=1.)
self.values = jnp.array([1., -1.])
self.distrax_second_dist = Normal(loc=-1., scale=0.8)
self.tfp_second_dist = tfd.Normal(loc=-1., scale=0.8)
def assertion_fn(self, rtol):
return lambda x, y: np.testing.assert_allclose(x, y, rtol=rtol)
@property
def wrapped_dist(self):
return distribution_from_tfp(self.base_dist)
def test_event_shape(self):
chex.assert_equal(self.wrapped_dist.event_shape, self.base_dist.event_shape)
def test_batch_shape(self):
chex.assert_equal(self.wrapped_dist.batch_shape, self.base_dist.batch_shape)
@chex.all_variants
def test_sample_dtype(self):
samples = self.variant(self.wrapped_dist.sample)(seed=self._key)
self.assertEqual(self.wrapped_dist.dtype, samples.dtype)
self.assertEqual(self.wrapped_dist.dtype, self.base_dist.dtype)
@chex.all_variants
def test_sample(self):
def sample_fn(key):
return self.wrapped_dist.sample(sample_shape=self._sample_shape, seed=key)
self.assertion_fn(rtol=2e-4)(
self.variant(sample_fn)(self._key),
self.base_dist.sample(sample_shape=self._sample_shape, seed=self._key))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('mean', 'mean'),
('mode', 'mode'),
('median', 'median'),
('stddev', 'stddev'),
('variance', 'variance'),
('entropy', 'entropy'),
)
def test_method(self, method):
self.variant(lambda: None) # To avoid variants usage error.
try:
expected_result = getattr(self.base_dist, method)()
except NotImplementedError:
return
except AttributeError:
return
result = getattr(self.wrapped_dist, method)()
self.assertion_fn(rtol=2e-4)(result, expected_result)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('log_prob', 'log_prob'),
('prob', 'prob'),
('log_cdf', 'log_cdf'),
('cdf', 'cdf'),
)
def test_method_with_value(self, method):
self.variant(lambda: None) # To avoid variants usage error.
if (isinstance(self.base_dist, tfd.Categorical) and
method in ('cdf', 'log_cdf')):
# TODO(budden): make .cdf() and .log_cdf() from tfp.Categorical jittable.
return
try:
expected_result = getattr(self.base_dist, method)(self.values)
except NotImplementedError:
return
except AttributeError:
return
result = self.variant(getattr(self.wrapped_dist, method))(self.values)
self.assertion_fn(rtol=2e-4)(result, expected_result)
@chex.all_variants
def test_sample_and_log_prob(self):
base_samples = self.base_dist.sample(
sample_shape=self._sample_shape, seed=self._key)
base_logprob = self.base_dist.log_prob(base_samples)
def sample_fn(key):
return self.wrapped_dist.sample_and_log_prob(
sample_shape=self._sample_shape, seed=key)
samples, log_prob = self.variant(sample_fn)(self._key)
self.assertion_fn(rtol=2e-4)(samples, base_samples)
self.assertion_fn(rtol=2e-4)(log_prob, base_logprob)
@chex.all_variants
@parameterized.named_parameters(
('kl_divergence', 'kl_divergence'),
('cross_entropy', 'cross_entropy'),
)
def test_with_two_distributions(self, method):
"""Test methods of the forms listed below.
D(distrax_distrib || wrapped_distrib),
D(wrapped_distrib || distrax_distrib),
D(tfp_distrib || wrapped_distrib),
D(wrapped_distrib || tfp_distrib).
Args:
method: the method name to be tested
"""
try:
expected_result1 = self.variant(
getattr(self.tfp_second_dist, method))(self.base_distribution)
expected_result2 = self.variant(
getattr(self.base_distribution, method))(self.tfp_second_dist)
except NotImplementedError:
return
except AttributeError:
return
distrax_result1 = self.variant(getattr(self.distrax_second_dist, method))(
self.wrapped_dist)
distrax_result2 = self.variant(getattr(self.wrapped_dist, method))(
self.distrax_second_dist)
tfp_result1 = self.variant(getattr(self.tfp_second_dist, method))(
self.wrapped_dist)
tfp_result2 = self.variant(getattr(self.wrapped_dist, method))(
self.tfp_second_dist)
self.assertion_fn(rtol=2e-4)(distrax_result1, expected_result1)
self.assertion_fn(rtol=2e-4)(distrax_result2, expected_result2)
self.assertion_fn(rtol=2e-4)(tfp_result1, expected_result1)
self.assertion_fn(rtol=2e-4)(tfp_result2, expected_result2)
class DistributionFromTfpMvnNormal(DistributionFromTfpNormal):
"""Tests for multivariate normal distribution."""
def setUp(self):
super().setUp()
self.base_dist = tfd.MultivariateNormalDiag(loc=[0., 1.])
self.values = jnp.array([1., -1.])
self.distrax_second_dist = MultivariateNormalDiag(
loc=jnp.array([-1., 0.]), scale_diag=jnp.array([0.8, 1.2]))
self.tfp_second_dist = tfd.MultivariateNormalDiag(
loc=[-1., 0.], scale_diag=[0.8, 1.2])
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))))
def test_slice(self, slice_):
loc = np.random.randn(3, 4, 5)
scale_diag = np.random.randn(3, 4, 5)
dist = distribution_from_tfp(
tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag))
sliced_dist = dist[slice_]
self.assertIsInstance(sliced_dist, Distribution)
self.assertion_fn(rtol=2e-4)(sliced_dist.mean(), loc[slice_])
class DistributionFromTfpCategorical(DistributionFromTfpNormal):
"""Tests for categorical distribution."""
def setUp(self):
super().setUp()
self.base_dist = tfd.Categorical(logits=[0., -1., 1.])
self.values = jnp.array([0, 1, 2])
self.distrax_second_dist = Categorical(probs=jnp.array([0.2, 0.2, 0.6]))
self.tfp_second_dist = tfd.Categorical(probs=[0.2, 0.2, 0.6])
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
)
def test_slice(self, slice_):
logits = np.random.randn(3, 4, 5)
probs = jax.nn.softmax(np.random.randn(3, 4, 5), axis=-1)
dist1 = distribution_from_tfp(tfd.Categorical(logits=logits))
dist2 = distribution_from_tfp(tfd.Categorical(probs=probs))
sliced_dist1 = dist1[slice_]
sliced_dist2 = dist2[slice_]
self.assertIsInstance(sliced_dist1, Distribution)
self.assertIsInstance(sliced_dist2, Distribution)
self.assertIsInstance(sliced_dist1, tfd.Categorical)
self.assertIsInstance(sliced_dist2, tfd.Categorical)
self.assertion_fn(rtol=2e-4)(
jax.nn.softmax(sliced_dist1.logits, axis=-1),
jax.nn.softmax(logits[slice_], axis=-1))
self.assertion_fn(rtol=2e-4)(sliced_dist2.probs, probs[slice_])
def test_slice_ellipsis(self):
logits = np.random.randn(3, 4, 5)
probs = jax.nn.softmax(np.random.randn(3, 4, 5), axis=-1)
dist1 = distribution_from_tfp(tfd.Categorical(logits=logits))
dist2 = distribution_from_tfp(tfd.Categorical(probs=probs))
sliced_dist1 = dist1[..., -1]
sliced_dist2 = dist2[..., -1]
self.assertIsInstance(sliced_dist1, Distribution)
self.assertIsInstance(sliced_dist2, Distribution)
self.assertIsInstance(sliced_dist1, tfd.Categorical)
self.assertIsInstance(sliced_dist2, tfd.Categorical)
self.assertion_fn(rtol=2e-4)(
jax.nn.softmax(sliced_dist1.logits, axis=-1),
jax.nn.softmax(logits[:, -1], axis=-1))
self.assertion_fn(rtol=2e-4)(sliced_dist2.probs, probs[:, -1])
class DistributionFromTfpTransformed(DistributionFromTfpNormal):
"""Tests for transformed distributions."""
def setUp(self):
super().setUp()
self.base_dist = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=0., scale=1.),
bijector=tfb.Exp())
self.values = jnp.array([0., 1., 2.])
self.distrax_second_dist = Transformed(
distribution=Normal(loc=0.5, scale=0.8),
bijector=tfb.Exp())
self.tfp_second_dist = tfd.TransformedDistribution(
distribution=tfd.Normal(loc=0.5, scale=0.8),
bijector=tfb.Exp())
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/distribution_from_tfp_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `beta.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import beta as base_beta
from distrax._src.utils import equivalence
import jax.numpy as jnp
import numpy as np
class BetaTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(base_beta.Beta)
@parameterized.named_parameters(
('0d params', (), (), ()),
('1d params', (2,), (2,), (2,)),
('2d params, no broadcast', (3, 2), (3, 2), (3, 2)),
('2d params, broadcasted alpha', (2,), (3, 2), (3, 2)),
('2d params, broadcasted beta', (3, 2), (2,), (3, 2)),
)
def test_properties(self, shape_of_alpha, shape_of_beta, batch_shape):
rng = np.random.default_rng(42)
alpha = 0.1 + rng.uniform(size=shape_of_alpha)
beta = 0.1 + rng.uniform(size=shape_of_beta)
dist = base_beta.Beta(alpha, beta)
self.assertEqual(dist.event_shape, ())
self.assertEqual(dist.batch_shape, batch_shape)
self.assertion_fn(rtol=1e-3)(
dist.alpha, np.broadcast_to(alpha, batch_shape))
self.assertion_fn(rtol=1e-3)(dist.beta, np.broadcast_to(beta, batch_shape))
@chex.all_variants
@parameterized.named_parameters(
('1d std beta, no shape', (1, 1), ()),
('1d std beta, int shape', (1, 1), 1),
('1d std beta, 1-tuple shape', (1, 1), (1,)),
('1d std beta, 2-tuple shape', (1, 1), (2, 2)),
('2d std beta, no shape', (np.ones(2), np.ones(2)), ()),
('2d std beta, int shape', ([1, 1], [1, 1]), 1),
('2d std beta, 1-tuple shape', (np.ones(2), np.ones(2)), (1,)),
('2d std beta, 2-tuple shape', ([1, 1], [1, 1]), (2, 2)),
('rank 2 std beta, 2-tuple shape', (np.ones((3, 2)), np.ones(
(3, 2))), (2, 2)),
('broadcasted alpha', (1, np.ones(3)), (2, 2)),
('broadcasted beta', (np.ones(3), 1), ()),
)
def test_sample_shape(self, distr_params, sample_shape):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_sample_shape(distr_params, dict(), sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('sample, float16', 'sample', jnp.float16),
('sample, float32', 'sample', jnp.float32),
('sample_and_log_prob, float16', 'sample_and_log_prob', jnp.float16),
('sample_and_log_prob, float32', 'sample_and_log_prob', jnp.float32),
)
def test_sample_dtype(self, method, dtype):
dist = self.distrax_cls(alpha=jnp.ones((), dtype), beta=jnp.ones((), dtype))
samples = self.variant(getattr(dist, method))(seed=self.key)
samples = samples[0] if method == 'sample_and_log_prob' else samples
self.assertEqual(samples.dtype, dist.dtype)
self.assertEqual(samples.dtype, dtype)
@chex.all_variants
@parameterized.named_parameters(
('sample', 'sample'),
('sample_and_log_prob', 'sample_and_log_prob'),
)
def test_sample_values(self, method):
rng = np.random.default_rng(42)
alpha = jnp.array(np.abs(rng.normal(size=(4, 3, 2))))
beta = jnp.array(np.abs(rng.normal(size=(4, 3, 2))))
n_samples = 100000
dist = self.distrax_cls(alpha, beta)
sample_fn = self.variant(
lambda key: getattr(dist, method)(seed=key, sample_shape=n_samples))
samples = sample_fn(self.key)
samples = samples[0] if method == 'sample_and_log_prob' else samples
self.assertEqual(samples.shape, (n_samples,) + (4, 3, 2))
self.assertTrue(np.all(np.logical_and(samples >= 0., samples <= 1.)))
self.assertion_fn(rtol=0.1)(np.mean(samples, axis=0), dist.mean())
self.assertion_fn(rtol=0.1)(np.std(samples, axis=0), dist.stddev())
@chex.all_variants
@parameterized.named_parameters(
('1d std beta, no shape', (11, 3), ()),
('1d std beta, int shape', (11, 3), 1),
('1d std beta, 1-tuple shape', (11, 3), (1,)),
('1d std beta, 2-tuple shape', (1, 1), (2, 2)),
('2d std beta, no shape', (np.ones(2), np.ones(2)), ()),
('2d std beta, int shape', ([1, 1], [1, 1]), 1),
('2d std beta, 1-tuple shape', (np.ones(2), np.ones(2)), (1,)),
('2d std beta, 2-tuple shape', ([1, 1], [1, 1]), (2, 2)),
('rank 2 std beta, 2-tuple shape', (np.ones((3, 2)), np.ones(
(3, 2))), (2, 2)),
('broadcasted alpha', (1, np.ones(3)), (2, 2)),
('broadcasted beta', (np.ones(3), 1), ()),
)
def test_sample_and_log_prob(self, distr_params, sample_shape):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_sample_and_log_prob(
dist_args=distr_params,
dist_kwargs=dict(),
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants
@parameterized.named_parameters(
('1d dist, 1d value', (3.1, 1), 0.6),
('1d dist, 2d value', (0.5, 0.1), np.array([0.3, 0.8])),
('2d dist, 1d value', (0.5 + np.zeros(2), 0.3 * np.ones(2)), 0.7),
('2d broadcasted dist, 1d value', (0.4 + np.zeros(2), 0.8), 0.7),
('2d dist, 2d value',
([0.1, 0.5], 0.9 * np.ones(2)), np.array([0.2, 0.7])),
('edge cases alpha=1', (1., np.array([0.5, 2.])), np.array([0., 1.])),
('edge cases beta=1', (np.array([0.5, 2.]), 1.), np.array([0., 1.])),
)
def test_methods_with_value(self, distr_params, value):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
value = np.asarray(value, dtype=np.float32)
for method in ['prob', 'log_prob', 'cdf', 'log_cdf', 'survival_function',
'log_survival_function']:
with self.subTest(method=method):
super()._test_attribute(
attribute_string=method,
dist_args=distr_params,
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('no broadcast', ([0.1, 1.3, 0.5], [0.5, 1.3, 1.5])),
('broadcasted alpha', (0.5, [0.5, 1.3, 1.5])),
('broadcasted beta', ([0.1, 1.3, 0.5], 0.8)),
)
def test_method(self, distr_params):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
for method in ['entropy', 'mean', 'variance', 'stddev']:
with self.subTest(method=method):
super()._test_attribute(
attribute_string=method,
dist_args=distr_params,
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('alpha>1, beta>1, no broadcast', 1.5, 2.5, 0.25),
('alpha>1, beta>1, broadcasted alpha', 1.5, [2.5, 5.5], [0.25, 0.1]),
('alpha>1, beta>1, broadcasted beta', [1.5, 4.5], 2.5, [0.25, 0.7]),
('alpha<1, beta<1', 0.5, 0.1, np.nan),
('alpha=1, beta=1', 1., 1., np.nan),
('alpha=1, beta>1', 1., 1.5, 0.),
('alpha<1, beta>1', 0.5, 1.5, 0.),
('alpha>1, beta=1', 1.5, 1., 1.),
('alpha>1, beta<1', 1.5, 0.5, 1.),
)
def test_mode(self, alpha, beta, expected_result):
dist = self.distrax_cls(alpha, beta)
result = self.variant(dist.mode)()
if np.any(np.isnan(expected_result)):
self.assertTrue(jnp.isnan(result))
else:
self.assertion_fn(rtol=1e-3)(result, expected_result)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('distrax_to_distrax', 'distrax_to_distrax'),
('distrax_to_tfp', 'distrax_to_tfp'),
('tfp_to_distrax', 'tfp_to_distrax'),
)
def test_with_two_distributions(self, mode_string):
rng = np.random.default_rng(42)
alpha1 = np.abs(rng.normal(size=(4, 1, 2))).astype(np.float32)
beta1 = np.abs(rng.normal(size=(4, 3, 2))).astype(np.float32)
alpha2 = np.abs(rng.normal(size=(3, 2))).astype(np.float32)
beta2 = np.abs(rng.normal(size=(3, 2))).astype(np.float32)
for method in ['kl_divergence', 'cross_entropy']:
with self.subTest(method=method):
super()._test_with_two_distributions(
attribute_string=method,
mode_string=mode_string,
dist1_kwargs={'alpha': alpha1, 'beta': beta1},
dist2_kwargs={'alpha': alpha2, 'beta': beta2},
tfp_dist1_kwargs={
'concentration1': alpha1, 'concentration0': beta1},
tfp_dist2_kwargs={
'concentration1': alpha2, 'concentration0': beta2},
assertion_fn=self.assertion_fn(rtol=3e-2))
def test_jitable(self):
super()._test_jittable(
(0.1, 1.5), assertion_fn=self.assertion_fn(rtol=1e-3))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
('ellipsis', (Ellipsis, -1)),
)
def test_slice(self, slice_):
rng = np.random.default_rng(42)
alpha = jnp.array(np.abs(rng.normal(size=(4, 3, 2))))
beta = jnp.array(np.abs(rng.normal(size=(4, 3, 2))))
dist = self.distrax_cls(alpha, beta)
self.assertion_fn(rtol=1e-3)(dist[slice_].alpha, alpha[slice_])
self.assertion_fn(rtol=1e-3)(dist[slice_].beta, beta[slice_])
def test_slice_different_parameterization(self):
rng = np.random.default_rng(42)
alpha = np.abs(rng.normal(size=(4, 3, 2)))
beta = np.abs(rng.normal(size=(3, 2)))
dist = self.distrax_cls(alpha, beta)
self.assertion_fn(rtol=1e-3)(dist[0].alpha, alpha[0])
self.assertion_fn(rtol=1e-3)(dist[0].beta, beta) # Not slicing beta.
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/beta_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Laplace distribution."""
import math
from typing import Tuple, Union
import chex
from distrax._src.distributions import distribution
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
PRNGKey = chex.PRNGKey
EventT = distribution.EventT
def _log_cdf_laplace(norm_value: EventT) -> Array:
"""Log CDF of a standardized Laplace distribution."""
lower_value = norm_value - math.log(2.0)
exp_neg_norm_value = jnp.exp(-jnp.abs(norm_value))
upper_value = jnp.log1p(-0.5 * exp_neg_norm_value)
return jnp.where(jnp.less_equal(norm_value, 0.), lower_value, upper_value)
class Laplace(distribution.Distribution):
"""Laplace distribution with location `loc` and `scale` parameters."""
equiv_tfp_cls = tfd.Laplace
def __init__(self, loc: Numeric, scale: Numeric):
"""Initializes a Laplace distribution.
Args:
loc: Mean of the distribution.
scale: Spread of the distribution.
"""
super().__init__()
self._loc = conversion.as_float_array(loc)
self._scale = conversion.as_float_array(scale)
self._batch_shape = jax.lax.broadcast_shapes(
self._loc.shape, self._scale.shape)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return self._batch_shape
@property
def loc(self) -> Array:
"""Mean of the distribution."""
return jnp.broadcast_to(self._loc, self.batch_shape)
@property
def scale(self) -> Array:
"""Scale of the distribution."""
return jnp.broadcast_to(self._scale, self.batch_shape)
def _sample_from_std_laplace(self, key: PRNGKey, n: int) -> Array:
out_shape = (n,) + self.batch_shape
dtype = jnp.result_type(self._loc, self._scale)
return jax.random.laplace(key, shape=out_shape, dtype=dtype)
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
rnd = self._sample_from_std_laplace(key, n)
return self._loc + self._scale * rnd
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
rnd = self._sample_from_std_laplace(key, n)
samples = self._loc + self._scale * rnd
log_prob = -jnp.abs(rnd) - math.log(2.) - jnp.log(self._scale)
return samples, log_prob
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
norm_value = self._standardize(value)
return -jnp.abs(norm_value) - math.log(2.) - jnp.log(self._scale)
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
return math.log(2.) + 1. + jnp.log(self.scale)
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
norm_value = self._standardize(value)
return 0.5 - 0.5 * jnp.sign(norm_value) * jnp.expm1(-jnp.abs(norm_value))
def _standardize(self, value: Array) -> Array:
return (value - self._loc) / self._scale
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
norm_value = self._standardize(value)
return _log_cdf_laplace(norm_value)
def log_survival_function(self, value: EventT) -> Array:
"""See `Distribution.log_survival_function`."""
norm_value = self._standardize(value)
return _log_cdf_laplace(-norm_value)
def mean(self) -> Array:
"""Calculates the mean."""
return self.loc
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return math.sqrt(2.) * self.scale
def variance(self) -> Array:
"""Calculates the variance."""
return 2. * jnp.square(self.scale)
def mode(self) -> Array:
"""Calculates the mode."""
return self.mean()
def median(self) -> Array:
"""Calculates the median."""
return self.mean()
def __getitem__(self, index) -> 'Laplace':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Laplace(loc=self.loc[index], scale=self.scale[index])
def _kl_divergence_laplace_laplace(
dist1: Union[Laplace, tfd.Laplace],
dist2: Union[Laplace, tfd.Laplace],
*unused_args, **unused_kwargs,
) -> Array:
"""Batched KL divergence KL(dist1 || dist2) between two Laplace distributions.
Args:
dist1: A Laplace distribution.
dist2: A Laplace distribution.
Returns:
Batchwise `KL(dist1 || dist2)`.
"""
distance = jnp.abs(dist1.loc - dist2.loc)
diff_log_scale = jnp.log(dist1.scale) - jnp.log(dist2.scale)
return (- diff_log_scale +
distance / dist2.scale - 1. +
jnp.exp(-distance / dist1.scale + diff_log_scale))
# Register the KL functions with TFP.
tfd.RegisterKL(Laplace, Laplace)(_kl_divergence_laplace_laplace)
tfd.RegisterKL(Laplace, Laplace.equiv_tfp_cls)(_kl_divergence_laplace_laplace)
tfd.RegisterKL(Laplace.equiv_tfp_cls, Laplace)(_kl_divergence_laplace_laplace)
| distrax-master | distrax/_src/distributions/laplace.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical distribution."""
from typing import Any, Optional, Tuple, Union
import chex
from distrax._src.distributions import distribution
from distrax._src.utils import math
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
PRNGKey = chex.PRNGKey
EventT = distribution.EventT
class Categorical(distribution.Distribution):
"""Categorical distribution."""
equiv_tfp_cls = tfd.Categorical
def __init__(self,
logits: Optional[Array] = None,
probs: Optional[Array] = None,
dtype: Union[jnp.dtype, type[Any]] = int):
"""Initializes a Categorical distribution.
Args:
logits: Logit transform of the probability of each category. Only one
of `logits` or `probs` can be specified.
probs: Probability of each category. Only one of `logits` or `probs` can
be specified.
dtype: The type of event samples.
"""
super().__init__()
if (logits is None) == (probs is None):
raise ValueError(
f'One and exactly one of `logits` and `probs` should be `None`, '
f'but `logits` is {logits} and `probs` is {probs}.')
if not (jnp.issubdtype(dtype, jnp.integer) or
jnp.issubdtype(dtype, jnp.floating)):
raise ValueError(
f'The dtype of `{self.name}` must be integer or floating-point, '
f'instead got `{dtype}`.')
self._probs = None if probs is None else math.normalize(probs=probs)
self._logits = None if logits is None else math.normalize(logits=logits)
self._dtype = dtype
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return ()
@property
def logits(self) -> Array:
"""The logits for each event."""
if self._logits is not None:
return self._logits
return jnp.log(self._probs)
@property
def probs(self) -> Array:
"""The probabilities for each event."""
if self._probs is not None:
return self._probs
return jax.nn.softmax(self._logits, axis=-1)
@property
def num_categories(self) -> int:
"""Number of categories."""
if self._probs is not None:
return self._probs.shape[-1]
return self._logits.shape[-1]
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
new_shape = (n,) + self.logits.shape[:-1]
is_valid = jnp.logical_and(jnp.all(jnp.isfinite(self.probs), axis=-1),
jnp.all(self.probs >= 0, axis=-1))
draws = jax.random.categorical(key=key, logits=self.logits, axis=-1,
shape=new_shape).astype(self._dtype)
return jnp.where(is_valid, draws, jnp.ones_like(draws) * -1)
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
value_one_hot = jax.nn.one_hot(
value, self.num_categories, dtype=self.logits.dtype)
mask_outside_domain = jnp.logical_or(
value < 0, value > self.num_categories - 1)
return jnp.where(
mask_outside_domain, -jnp.inf,
jnp.sum(math.multiply_no_nan(self.logits, value_one_hot), axis=-1))
def prob(self, value: EventT) -> Array:
"""See `Distribution.prob`."""
value_one_hot = jax.nn.one_hot(
value, self.num_categories, dtype=self.probs.dtype)
return jnp.sum(math.multiply_no_nan(self.probs, value_one_hot), axis=-1)
def entropy(self) -> Array:
"""See `Distribution.entropy`."""
if self._logits is None:
log_probs = jnp.log(self._probs)
else:
log_probs = jax.nn.log_softmax(self._logits)
return -jnp.sum(math.mul_exp(log_probs, log_probs), axis=-1)
def mode(self) -> Array:
"""See `Distribution.mode`."""
parameter = self._probs if self._logits is None else self._logits
return jnp.argmax(parameter, axis=-1).astype(self._dtype)
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
# For value < 0 the output should be zero because support = {0, ..., K-1}.
should_be_zero = value < 0
# For value >= K-1 the output should be one. Explicitly accounting for this
# case addresses potential numerical issues that may arise when evaluating
# derived methods (mainly, `log_survival_function`) for `value >= K-1`.
should_be_one = value >= self.num_categories - 1
# Will use value as an index below, so clip it to {0, ..., K-1}.
value = jnp.clip(value, 0, self.num_categories - 1)
value_one_hot = jax.nn.one_hot(
value, self.num_categories, dtype=self.probs.dtype)
cdf = jnp.sum(math.multiply_no_nan(
jnp.cumsum(self.probs, axis=-1), value_one_hot), axis=-1)
return jnp.where(should_be_zero, 0., jnp.where(should_be_one, 1., cdf))
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return jnp.log(self.cdf(value))
def logits_parameter(self) -> Array:
"""Wrapper for `logits` property, for TFP API compatibility."""
return self.logits
def __getitem__(self, index) -> 'Categorical':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
if self._logits is not None:
return Categorical(logits=self.logits[index], dtype=self._dtype)
return Categorical(probs=self.probs[index], dtype=self._dtype)
CategoricalLike = Union[Categorical, tfd.Categorical]
def _kl_divergence_categorical_categorical(
dist1: CategoricalLike,
dist2: CategoricalLike,
*unused_args, **unused_kwargs,
) -> Array:
"""Obtains the KL divergence `KL(dist1 || dist2)` between two Categoricals.
The KL computation takes into account that `0 * log(0) = 0`; therefore,
`dist1` may have zeros in its probability vector.
Args:
dist1: A Categorical distribution.
dist2: A Categorical distribution.
Returns:
Batchwise `KL(dist1 || dist2)`.
Raises:
ValueError if the two distributions have different number of categories.
"""
logits1 = dist1.logits_parameter()
logits2 = dist2.logits_parameter()
num_categories1 = logits1.shape[-1]
num_categories2 = logits2.shape[-1]
if num_categories1 != num_categories2:
raise ValueError(
f'Cannot obtain the KL between two Categorical distributions '
f'with different number of categories: the first distribution has '
f'{num_categories1} categories, while the second distribution has '
f'{num_categories2} categories.')
log_probs1 = jax.nn.log_softmax(logits1, axis=-1)
log_probs2 = jax.nn.log_softmax(logits2, axis=-1)
return jnp.sum(
math.mul_exp(log_probs1 - log_probs2, log_probs1), axis=-1)
# Register the KL functions with TFP.
tfd.RegisterKL(Categorical, Categorical)(
_kl_divergence_categorical_categorical)
tfd.RegisterKL(Categorical, Categorical.equiv_tfp_cls)(
_kl_divergence_categorical_categorical)
tfd.RegisterKL(Categorical.equiv_tfp_cls, Categorical)(
_kl_divergence_categorical_categorical)
# Also register the KL with the TFP OneHotCategorical.
tfd.RegisterKL(Categorical, tfd.OneHotCategorical)(
_kl_divergence_categorical_categorical)
tfd.RegisterKL(tfd.OneHotCategorical, Categorical)(
_kl_divergence_categorical_categorical)
| distrax-master | distrax/_src/distributions/categorical.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dirichlet distribution."""
from typing import Tuple, Union
import chex
from distrax._src.distributions import distribution
from distrax._src.distributions.beta import Beta
from distrax._src.utils import conversion
from distrax._src.utils import math
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
PRNGKey = chex.PRNGKey
EventT = distribution.EventT
class Dirichlet(distribution.Distribution):
"""Dirichlet distribution with concentration parameter `alpha`.
The PDF of a Dirichlet distributed random variable `X`, where `X` lives in the
simplex `(0, 1)^K` with `sum_{k=1}^{K} X_k = 1`, is given by,
```
p(x; alpha) = ( prod_{k=1}^{K} x_k ** (alpha_k - 1) ) / B(alpha)
```
where `B(alpha)` is the multivariate beta function, and the concentration
parameters `alpha_k > 0`.
Note that the support of the distribution does not include `x_k = 0` nor
`x_k = 1`.
"""
equiv_tfp_cls = tfd.Dirichlet
def __init__(self, concentration: Array):
"""Initializes a Dirichlet distribution.
Args:
concentration: Concentration parameter `alpha` of the distribution. It
must be an array of length `K >= 2` containing positive values
(additional dimensions index batches).
"""
super().__init__()
self._concentration = conversion.as_float_array(concentration)
if self._concentration.ndim < 1:
raise ValueError(
'The concentration parameter must have at least one dimension.')
if self._concentration.shape[-1] < 2:
raise ValueError(
'The last dimension of the concentration parameter must be '
'at least 2.')
self._log_normalization_constant = math.log_beta_multivariate(
self._concentration)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
return self._concentration.shape[-1:]
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return self._concentration.shape[:-1]
@property
def concentration(self) -> Array:
"""Concentration parameter `alpha` of the distribution."""
return self._concentration
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
out_shape = (n,) + self.batch_shape
dtype = self._concentration.dtype
rnd = jax.random.dirichlet(
key, alpha=self._concentration, shape=out_shape, dtype=dtype)
return rnd
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return (jnp.sum((self._concentration - 1.) * jnp.log(value), axis=-1)
- self._log_normalization_constant)
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
sum_concentration = jnp.sum(self._concentration, axis=-1)
return (
self._log_normalization_constant
+ ((sum_concentration - self._concentration.shape[-1])
* jax.lax.digamma(sum_concentration))
- jnp.sum((self._concentration - 1.) *
jax.lax.digamma(self._concentration), axis=-1)
)
def mean(self) -> Array:
"""Calculates the mean."""
return self._concentration / jnp.sum(
self._concentration, axis=-1, keepdims=True)
def mode(self) -> Array:
"""Calculates the mode.
Returns:
The mode, an array of shape `batch_shape + event_shape`. If any
`alpha_k <= 1`, the returned value is `jnp.nan`.
"""
result_if_valid = (self._concentration - 1.) / jnp.sum(
self._concentration - 1., axis=-1, keepdims=True)
return jnp.where(
jnp.all(self._concentration > 1., axis=-1, keepdims=True),
result_if_valid,
jnp.nan)
def variance(self) -> Array:
"""Calculates the variance."""
sum_concentration = jnp.sum(self._concentration, axis=-1, keepdims=True)
norm_concentration = self._concentration / sum_concentration
return norm_concentration * (1. - norm_concentration) / (
sum_concentration + 1.)
def covariance(self) -> Array:
"""Calculates the covariance.
Returns:
An array of shape `batch_shape + event_shape + event_shape` with the
covariance of the distribution.
"""
sum_concentration = jnp.sum(self._concentration, axis=-1, keepdims=True)
norm_concentration = self._concentration / sum_concentration
norm_over_sum = norm_concentration / (sum_concentration + 1.)
cov = - jnp.expand_dims(norm_over_sum, axis=-1) * jnp.expand_dims(
norm_concentration, axis=-2)
cov += jnp.vectorize(jnp.diag, signature='(k)->(k,k)')(norm_over_sum)
return cov
def __getitem__(self, index) -> 'Dirichlet':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Dirichlet(concentration=self.concentration[index])
DirichletLike = Union[Beta, tfd.Beta, Dirichlet, tfd.Dirichlet]
def _obtain_concentration(dist: DirichletLike) -> Array:
"""Returns the concentration parameters of the input distribution."""
if isinstance(dist, Dirichlet):
concentration = dist.concentration
elif isinstance(dist, Beta):
concentration = jnp.stack((dist.alpha, dist.beta), axis=-1)
elif isinstance(dist, tfd.Beta):
concentration = jnp.stack(
(dist.concentration1, dist.concentration0), axis=-1)
elif isinstance(dist, tfd.Dirichlet):
concentration = dist.concentration
return concentration
def _kl_divergence_dirichlet_dirichlet(
dist1: DirichletLike,
dist2: DirichletLike,
*unused_args,
**unused_kwargs,
) -> Array:
"""KL divergence KL(dist1 || dist2) between two Dirichlet distributions.
Args:
dist1: A Dirichlet or Beta distribution.
dist2: A Dirichlet or Beta distribution.
Returns:
Batchwise `KL(dist1 || dist2)`.
"""
concentration1 = _obtain_concentration(dist1)
concentration2 = _obtain_concentration(dist2)
if concentration1.shape[-1] != concentration2.shape[-1]:
raise ValueError(
f'The two distributions must have the same event dimension, but got '
f'{concentration1.shape[-1]} and {concentration2.shape[-1]} '
f'dimensions.')
sum_concentration1 = jnp.sum(concentration1, axis=-1, keepdims=True)
t1 = (math.log_beta_multivariate(concentration2)
- math.log_beta_multivariate(concentration1))
t2 = jnp.sum((concentration1 - concentration2) * (
jax.lax.digamma(concentration1) - jax.lax.digamma(sum_concentration1)),
axis=-1)
return t1 + t2
# Register the KL functions with TFP.
tfd.RegisterKL(Dirichlet, Dirichlet)(_kl_divergence_dirichlet_dirichlet)
tfd.RegisterKL(Dirichlet, Dirichlet.equiv_tfp_cls)(
_kl_divergence_dirichlet_dirichlet)
tfd.RegisterKL(Dirichlet.equiv_tfp_cls, Dirichlet)(
_kl_divergence_dirichlet_dirichlet)
tfd.RegisterKL(Dirichlet, Beta)(_kl_divergence_dirichlet_dirichlet)
tfd.RegisterKL(Beta, Dirichlet)(_kl_divergence_dirichlet_dirichlet)
tfd.RegisterKL(Dirichlet, tfd.Beta)(_kl_divergence_dirichlet_dirichlet)
tfd.RegisterKL(tfd.Beta, Dirichlet)(_kl_divergence_dirichlet_dirichlet)
tfd.RegisterKL(tfd.Dirichlet, Beta)(_kl_divergence_dirichlet_dirichlet)
tfd.RegisterKL(Beta, tfd.Dirichlet)(_kl_divergence_dirichlet_dirichlet)
| distrax-master | distrax/_src/distributions/dirichlet.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple mixture of two (possibly heterogeneous) distribution."""
from typing import Tuple
import chex
from distrax._src.distributions import distribution as base_distribution
from distrax._src.utils import conversion
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
PRNGKey = chex.PRNGKey
DistributionLike = base_distribution.DistributionLike
EventT = base_distribution.EventT
class MixtureOfTwo(base_distribution.Distribution):
"""A mixture of two distributions."""
def __init__(
self,
prob_a: Numeric,
component_a: DistributionLike,
component_b: DistributionLike):
"""Creates a mixture of two distributions.
Differently from `MixtureSameFamily` the component distributions are allowed
to belong to different families.
Args:
prob_a: a scalar weight for the `component_a`, is a float or a rank 0
vector.
component_a: the first component distribution.
component_b: the second component distribution.
"""
super().__init__()
# Validate inputs.
chex.assert_rank(prob_a, 0)
if component_a.event_shape != component_b.event_shape:
raise ValueError(
f'The component distributions must have the same event shape, but '
f'{component_a.event_shape} != {component_b.event_shape}.')
if component_a.batch_shape != component_b.batch_shape:
raise ValueError(
f'The component distributions must have the same batch shape, but '
f'{component_a.batch_shape} != {component_b.batch_shape}.')
if component_a.dtype != component_b.dtype:
raise ValueError(
'The component distributions must have the same dtype, but'
f' {component_a.dtype} != {component_b.dtype}.')
# Store args.
self._prob_a = prob_a
self._component_a = conversion.as_distribution(component_a)
self._component_b = conversion.as_distribution(component_b)
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
key, key_a, key_b, mask_key = jax.random.split(key, num=4)
mask_from_a = jax.random.bernoulli(mask_key, p=self._prob_a, shape=[n])
sample_a = self._component_a.sample(seed=key_a, sample_shape=n)
sample_b = self._component_b.sample(seed=key_b, sample_shape=n)
mask_from_a = jnp.expand_dims(mask_from_a, tuple(range(1, sample_a.ndim)))
return jnp.where(mask_from_a, sample_a, sample_b)
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
logp1 = jnp.log(self._prob_a) + self._component_a.log_prob(value)
logp2 = jnp.log(1 - self._prob_a) + self._component_b.log_prob(value)
return jnp.logaddexp(logp1, logp2)
@property
def event_shape(self) -> Tuple[int, ...]:
return self._component_a.event_shape
@property
def batch_shape(self) -> Tuple[int, ...]:
return self._component_a.batch_shape
@property
def prob_a(self) -> Numeric:
return self._prob_a
@property
def prob_b(self) -> Numeric:
return 1. - self._prob_a
def __getitem__(self, index) -> 'MixtureOfTwo':
"""See `Distribution.__getitem__`."""
index = base_distribution.to_batch_shape_index(self.batch_shape, index)
return MixtureOfTwo(
prob_a=self.prob_a,
component_a=self._component_a[index],
component_b=self._component_b[index])
| distrax-master | distrax/_src/distributions/mixture_of_two.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `mixture_same_family.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions.categorical import Categorical
from distrax._src.distributions.mixture_same_family import MixtureSameFamily
from distrax._src.distributions.mvn_diag import MultivariateNormalDiag
from distrax._src.distributions.normal import Normal
from distrax._src.utils import equivalence
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
class TFPLogitsCategoricalTFPMultivariateComponents(equivalence.EquivalenceTest
):
"""Class to test distrax mixture against tfp mixture.
There are 4 methods to create categorical and components distributions (one
each for tfp and distrax). Those are used to instantiate the input
distributions for the mixtures to be tested against each other. By overloading
these methods, different combinations can be tested.
This class tests the case when using tfp distributions (categorical from
logits) as inputs in the tfp and distrax mixture.
"""
def _make_tfp_categorical(self, logits, probs):
"""Categorical distribution for tfp reference mixture."""
del probs
return tfd.Categorical(logits=logits)
def _make_tfp_components(self, key_loc, key_scale):
"""Components distribution for tfp reference mixture."""
components_shape = self.batch_shape + (self.num_components,) + (2,)
return tfd.MultivariateNormalDiag(
loc=jax.random.normal(key=key_loc, shape=components_shape),
scale_diag=jax.random.uniform(key=key_scale, shape=components_shape)+.5)
# Define functions to create input distributions for the Distrax mixture. This
# class tests Distrax mixture using the same TFP input distributions in both
# TFP and Distrax. Subclasses will use different combinations.
_make_categorical = _make_tfp_categorical
_make_components = _make_tfp_components
def setUp(self):
super().setUp()
self._init_distr_cls(MixtureSameFamily)
self.batch_shape = (5, 4)
self.num_components = 3
logits_shape = self.batch_shape + (self.num_components,)
logits = jax.random.normal(key=jax.random.PRNGKey(42),
shape=logits_shape)
probs = jax.nn.softmax(logits, axis=-1)
self.mixture_dist = self._make_categorical(logits, probs)
self.tfp_mixture_dist = self._make_tfp_categorical(logits, probs)
key_loc, key_scale = jax.random.split(jax.random.PRNGKey(42))
self.components_dist = self._make_components(key_loc, key_scale)
self.tfp_components_dist = self._make_tfp_components(key_loc, key_scale)
def test_event_shape(self):
super()._test_event_shape(
(),
dist_kwargs={
'mixture_distribution': self.mixture_dist,
'components_distribution': self.components_dist
},
tfp_dist_kwargs={
'mixture_distribution': self.tfp_mixture_dist,
'components_distribution': self.tfp_components_dist
},
)
def test_batch_shape(self):
super()._test_batch_shape(
(),
dist_kwargs={
'mixture_distribution': self.mixture_dist,
'components_distribution': self.components_dist
},
tfp_dist_kwargs={
'mixture_distribution': self.tfp_mixture_dist,
'components_distribution': self.tfp_components_dist
},
)
def test_invalid_parameters(self):
logits_shape = (1,) + self.batch_shape + (self.num_components,)
logits = jnp.ones(logits_shape, dtype=jnp.float32)
probs = jax.nn.softmax(logits, axis=-1)
key_loc, key_scale = jax.random.split(jax.random.PRNGKey(42))
self._test_raises_error(dist_kwargs={
'mixture_distribution': self._make_categorical(logits, probs),
'components_distribution': self._make_components(key_loc, key_scale),
})
@chex.all_variants
@parameterized.named_parameters(
('empty shape', ()),
('int shape', 10),
('2-tuple shape', (10, 20)),
)
def test_sample_shape(self, sample_shape):
super()._test_sample_shape(
(),
dist_kwargs={
'mixture_distribution': self.mixture_dist,
'components_distribution': self.components_dist
},
tfp_dist_kwargs={
'mixture_distribution': self.tfp_mixture_dist,
'components_distribution': self.tfp_components_dist
},
sample_shape=sample_shape)
@chex.all_variants
def test_sample_dtype(self):
dist = self.distrax_cls(
mixture_distribution=self.mixture_dist,
components_distribution=self.components_dist)
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(dist.dtype, samples.dtype)
self.assertEqual(dist.dtype, self.components_dist.dtype)
@chex.all_variants()
@parameterized.named_parameters(
('empty shape', ()),
('int shape', 10),
('2-tuple shape', (10, 20)),
)
def test_sample_and_log_prob(self, sample_shape):
super()._test_sample_and_log_prob(
dist_args=(),
dist_kwargs={
'mixture_distribution': self.mixture_dist,
'components_distribution': self.components_dist
},
tfp_dist_kwargs={
'mixture_distribution': self.tfp_mixture_dist,
'components_distribution': self.tfp_components_dist
},
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=2e-3))
# `pmap` must have at least one non-None value in `in_axes`.
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('mean', 'mean'),
('variance', 'variance'),
('stddev', 'stddev'),
)
def test_method(self, function_string):
super()._test_attribute(
attribute_string=function_string,
dist_kwargs={
'mixture_distribution': self.mixture_dist,
'components_distribution': self.components_dist
},
tfp_dist_kwargs={
'mixture_distribution': self.tfp_mixture_dist,
'components_distribution': self.tfp_components_dist
},
assertion_fn=self.assertion_fn(rtol=2e-3))
def test_jittable(self):
super()._test_jittable(
dist_kwargs={
'mixture_distribution': self.mixture_dist,
'components_distribution': self.components_dist},
assertion_fn=self.assertion_fn(rtol=1e-3))
class TFPLogitsCategoricalTFPUnivariateComponents(
TFPLogitsCategoricalTFPMultivariateComponents):
def _make_tfp_components(self, key_loc, key_scale):
components_shape = self.batch_shape + (self.num_components,)
return tfd.Normal(
loc=jax.random.normal(key=key_loc, shape=components_shape),
scale=jax.random.uniform(key=key_scale, shape=components_shape)+0.5)
_make_components = _make_tfp_components
# Enough to only test one type of components for `tfp.Categorical` with `probs`.
class TFPProbsCategoricalTFPMultivariateComponents(
TFPLogitsCategoricalTFPMultivariateComponents):
def _make_categorical(self, logits, probs):
del logits
return tfd.Categorical(probs=probs)
class DistraxLogitsCategoricalTFPMultivariateComponents(
TFPLogitsCategoricalTFPMultivariateComponents):
def _make_categorical(self, logits, probs):
del probs
return Categorical(logits=logits)
class DistraxProbsCategoricalTFPMultivariateComponents(
TFPLogitsCategoricalTFPMultivariateComponents):
def _make_categorical(self, logits, probs):
del logits
return Categorical(probs=probs)
class DistraxLogitsCategoricalTFPUnivariateComponents(
TFPLogitsCategoricalTFPUnivariateComponents):
def _make_categorical(self, logits, probs):
del probs
return Categorical(logits=logits)
class DistraxLogitsCategoricalDistraxMultivariateComponents(
DistraxLogitsCategoricalTFPMultivariateComponents):
def _make_components(self, key_loc, key_scale):
components_shape = self.batch_shape + (self.num_components,) + (2,)
return MultivariateNormalDiag(
loc=jax.random.normal(key=key_loc, shape=components_shape),
scale_diag=jax.random.uniform(key=key_scale, shape=components_shape) +
0.5)
class DistraxLogitsCategoricalDistraxUnivariateComponents(
DistraxLogitsCategoricalTFPUnivariateComponents):
def _make_components(self, key_loc, key_scale):
components_shape = self.batch_shape + (self.num_components,)
return Normal(
loc=jax.random.normal(key=key_loc, shape=components_shape),
scale=jax.random.uniform(key=key_scale, shape=components_shape) + 0.5)
class TFPLogitsCategoricalDistraxMultivariateComponents(
TFPLogitsCategoricalTFPMultivariateComponents):
def _make_components(self, key_loc, key_scale):
components_shape = self.batch_shape + (self.num_components,) + (2,)
return MultivariateNormalDiag(
loc=jax.random.normal(key=key_loc, shape=components_shape),
scale_diag=jax.random.uniform(key=key_scale, shape=components_shape) +
0.5)
class TFPLogitsCategoricalDistraxUnivariateComponents(
TFPLogitsCategoricalTFPUnivariateComponents):
def _make_components(self, key_loc, key_scale):
components_shape = self.batch_shape + (self.num_components,)
return Normal(
loc=jax.random.normal(key=key_loc, shape=components_shape),
scale=jax.random.uniform(key=key_scale, shape=components_shape) + 0.5)
class MixtureSameFamilySlicingTest(parameterized.TestCase):
"""Class to test the `getitem` method."""
def setUp(self):
super().setUp()
self.loc = np.random.randn(2, 3, 4, 5)
self.scale_diag = np.abs(np.random.randn(2, 3, 4, 5))
self.components_dist = MultivariateNormalDiag(
loc=self.loc, scale_diag=self.scale_diag)
self.logits = np.random.randn(2, 3, 4)
self.mixture_dist = Categorical(logits=self.logits)
self.dist = MixtureSameFamily(self.mixture_dist, self.components_dist)
def assertion_fn(self, rtol):
return lambda x, y: np.testing.assert_allclose(x, y, rtol=rtol)
@parameterized.named_parameters(
('single element', 1, (3,)),
('range', slice(-1), (1, 3)),
('range_2', (slice(None), slice(-1)), (2, 2)),
)
def test_slice(self, slice_, expected_batch_shape):
sliced_dist = self.dist[slice_]
self.assertEqual(sliced_dist.batch_shape, expected_batch_shape)
self.assertEqual(sliced_dist.event_shape, self.dist.event_shape)
self.assertEqual(sliced_dist.mixture_distribution.logits.shape[-1],
self.dist.mixture_distribution.logits.shape[-1])
self.assertIsInstance(sliced_dist, MixtureSameFamily)
self.assertIsInstance(
sliced_dist.components_distribution, MultivariateNormalDiag)
self.assertIsInstance(sliced_dist.mixture_distribution, Categorical)
self.assertion_fn(rtol=2e-3)(
sliced_dist.components_distribution.loc, self.loc[slice_])
self.assertion_fn(rtol=2e-3)(
sliced_dist.components_distribution.scale_diag, self.scale_diag[slice_])
def test_slice_ellipsis(self):
sliced_dist = self.dist[..., -1]
expected_batch_shape = (2,)
self.assertEqual(sliced_dist.batch_shape, expected_batch_shape)
self.assertEqual(sliced_dist.event_shape, self.dist.event_shape)
self.assertEqual(sliced_dist.mixture_distribution.logits.shape[-1],
self.dist.mixture_distribution.logits.shape[-1])
self.assertIsInstance(sliced_dist, MixtureSameFamily)
self.assertIsInstance(
sliced_dist.components_distribution, MultivariateNormalDiag)
self.assertIsInstance(sliced_dist.mixture_distribution, Categorical)
self.assertion_fn(rtol=2e-3)(
sliced_dist.components_distribution.loc,
self.loc[:, -1, ...])
self.assertion_fn(rtol=2e-3)(
sliced_dist.components_distribution.scale_diag,
self.scale_diag[:, -1, ...])
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/mixture_same_family_test.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `von_mises.py`."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import von_mises
from distrax._src.utils import equivalence
import jax
import jax.numpy as jnp
import numpy as np
from scipy import stats as sp_stats
import scipy.special
from tensorflow_probability.substrates import jax as tfp
class VonMisesTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(von_mises.VonMises)
self.loc = np.reshape(np.linspace(-5., 5., 7), [-1, 1])
self.concentration = np.reshape(np.logspace(-3., 3., 7), [1, -1])
self.rng = np.random.default_rng(317070)
@parameterized.named_parameters(
('1d std vonmises', (0, 1)),
('2d std vonmises', (np.zeros(2), np.ones(2))),
('rank 2 std vonmises', (np.zeros((3, 2)), np.ones((3, 2)))),
('broadcasted loc', (0, np.ones(3))),
('broadcasted concentration', (np.ones(3), 1)),
)
def test_event_shape(self, distr_params):
super()._test_event_shape(distr_params, dict())
@parameterized.named_parameters(
('0d concentration', ()),
('1d concentration', (4,)),
('2d concentration', (3, 4)),
)
def test_loc_shape_properties(self, shape):
loc = self.rng.uniform()
concentration = self.rng.uniform(size=shape)
dist = self.distrax_cls(loc=loc, concentration=concentration)
self.assertion_fn(rtol=1e-3)(dist.loc, loc)
self.assertion_fn(rtol=1e-3)(dist.concentration, concentration)
self.assertEqual(dist.event_shape, ())
self.assertEqual(dist.batch_shape, shape)
@parameterized.named_parameters(
('0d loc', ()),
('1d loc', (4,)),
('2d loc', (3, 4)),
)
def test_concentration_shape_properties(self, shape):
loc = self.rng.uniform(size=shape)
concentration = self.rng.uniform()
dist = self.distrax_cls(loc=loc, concentration=concentration)
self.assertion_fn(rtol=1e-3)(dist.loc, loc)
self.assertion_fn(rtol=1e-3)(dist.concentration, concentration)
self.assertEqual(dist.event_shape, ())
self.assertEqual(dist.batch_shape, shape)
@chex.all_variants
@parameterized.named_parameters(
('1d std vonmises, no shape', (0, 1), ()),
('1d std vonmises, int shape', (0, 1), 1),
('1d std vonmises, 1-tuple shape', (0, 1), (1,)),
('1d std vonmises, 2-tuple shape', (0, 1), (2, 2)),
('2d std vonmises, no shape', (np.zeros(2), np.ones(2)), ()),
('2d std vonmises, int shape', ([0, 0], [1, 1]), 1),
('2d std vonmises, 1-tuple shape', (np.zeros(2), np.ones(2)), (1,)),
('2d std vonmises, 2-tuple shape', ([0, 0], [1, 1]), (2, 2)),
('rank 2 std vonmises, 2-tuple shape', (np.zeros((3, 2)), np.ones(
(3, 2))), (2, 2)),
('broadcasted loc', (0, np.ones(3)), (2, 2)),
('broadcasted scale', (np.ones(3), 1), ()),
)
def test_sample_shape(self, distr_params, sample_shape):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_sample_shape(distr_params, dict(), sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('1d std vonmises, no shape', (0, 1), ()),
('1d std vonmises, int shape', (0, 1), 1),
('1d std vonmises, 1-tuple shape', (0, 1), (1,)),
('1d std vonmises, 2-tuple shape', (0, 1), (2, 2)),
('2d std vonmises, no shape', (np.zeros(2), np.ones(2)), ()),
('2d std vonmises, int shape', ([0, 0], [1, 1]), 1),
('2d std vonmises, 1-tuple shape', (np.zeros(2), np.ones(2)), (1,)),
('2d std vonmises, 2-tuple shape', ([0, 0], [1, 1]), (2, 2)),
('rank 2 std vonmises, 2-tuple shape', (np.zeros((3, 2)), np.ones(
(3, 2))), (2, 2)),
('broadcasted loc', (0, np.ones(3)), (2, 2)),
('broadcasted scale', (np.ones(3), 1), ()),
)
def test_sample_and_log_prob(self, distr_params, sample_shape):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_sample_and_log_prob(
dist_args=distr_params,
dist_kwargs=dict(),
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants
@parameterized.named_parameters(
('sample, float16', 'sample', jnp.float16),
('sample, float32', 'sample', jnp.float32),
('sample_and_log_prob, float16', 'sample_and_log_prob', jnp.float16),
('sample_and_log_prob, float32', 'sample_and_log_prob', jnp.float32),
)
def test_sample_dtype(self, method, dtype):
dist = self.distrax_cls(
loc=self.loc.astype(dtype),
concentration=self.concentration.astype(dtype),
)
samples = self.variant(getattr(dist, method))(seed=self.key)
samples = samples[0] if method == 'sample_and_log_prob' else samples
self.assertEqual(samples.dtype, dist.dtype)
self.assertEqual(samples.dtype, dtype)
@chex.all_variants
@parameterized.named_parameters(
('1d dist, 1d value', (0, 1), 1),
('1d dist, 2d value', (0., 1.), np.array([1, 2])),
('2d dist, 1d value', (np.zeros(2), np.ones(2)), 1),
('2d broadcasted dist, 1d value', (np.zeros(2), 1), 1),
('2d dist, 2d value', (np.zeros(2), np.ones(2)), np.array([1, 2])),
('1d dist, 1d value, edge case', (0, 1), np.pi),
)
def test_method_with_input(self, distr_params, value):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
value = np.asarray(value, dtype=np.float32)
for method in [
'log_prob', 'prob', 'cdf', 'log_cdf', 'survival_function',
'log_survival_function'
]:
with self.subTest(method):
super()._test_attribute(
attribute_string=method,
dist_args=distr_params,
dist_kwargs={},
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('entropy', (0., 1.), 'entropy'),
('mean', (0, 1), 'mean'),
('mean from 1d params', ([-1, 1], [1, 2]), 'mean'),
('variance', (0, 1), 'variance'),
('variance from np params', (np.ones(2), np.ones(2)), 'variance'),
('stddev', (0, 1), 'stddev'),
('stddev from rank 2 params', (np.ones((2, 3)), np.ones(
(2, 3))), 'stddev'),
('mode', (0, 1), 'mode'),
)
def test_method(self, distr_params, function_string):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_attribute(
function_string,
distr_params,
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax'))
def test_with_two_distributions(self, function_string, mode_string):
super()._test_with_two_distributions(
attribute_string=function_string,
mode_string=mode_string,
dist1_kwargs={
'loc': self.rng.standard_normal((4, 1, 2)),
'concentration': np.asarray([[0.8, 0.2], [0.1, 1.2], [1.4, 3.1]]),
},
dist2_kwargs={
'loc': self.rng.standard_normal((3, 2)),
'concentration': 0.1 + self.rng.standard_normal((4, 1, 2)),
},
assertion_fn=self.assertion_fn(rtol=1e-2))
def test_jittable(self):
super()._test_jittable(
(np.zeros((3,)), np.ones((3,))),
assertion_fn=functools.partial(
np.testing.assert_allclose, rtol=1e-04, atol=1e-04
)
)
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
('ellipsis', (Ellipsis, -1)),
)
def test_slice(self, slice_):
loc = jnp.array(self.rng.standard_normal((3, 4, 5)))
concentration = jnp.array(self.rng.standard_normal((3, 4, 5)))
dist = self.distrax_cls(loc=loc, concentration=concentration)
self.assertion_fn(rtol=1e-2)(dist[slice_].mean(), loc[slice_])
def test_slice_different_parameterization(self):
loc = jnp.array(self.rng.standard_normal((4)))
concentration = jnp.array(self.rng.standard_normal((3, 4)))
dist = self.distrax_cls(loc=loc, concentration=concentration)
self.assertion_fn(rtol=1e-2)(dist[0].mean(), loc) # Not slicing loc.
self.assertion_fn(rtol=1e-2)(dist[0].concentration, concentration[0])
def test_von_mises_log_pdf(self):
locs_v = .1
concentrations_v = .2
x = np.array([2., 3., 4., 5., 6., 7.])
vm = self.distrax_cls(locs_v, concentrations_v)
expected_log_prob = sp_stats.vonmises.logpdf( # pytype: disable=module-attr
x,
concentrations_v,
loc=locs_v
)
log_prob = vm.log_prob(x)
np.testing.assert_allclose(
expected_log_prob, log_prob, rtol=1e-04, atol=1e-04
)
def test_von_mises_log_pdf_uniform(self):
x = np.array([2., 3., 4., 5., 6., 7.])
vm = self.distrax_cls(.1, 0.)
log_prob = vm.log_prob(x)
expected_log_prob = np.array([-np.log(2. * np.pi)] * 6)
np.testing.assert_allclose(
expected_log_prob, log_prob, rtol=1e-04, atol=1e-04
)
def test_von_mises_pdf(self):
locs_v = .1
concentrations_v = .2
x = np.array([2., 3., 4., 5., 6., 7.])
vm = self.distrax_cls(locs_v, concentrations_v)
prob = vm.prob(x)
expected_prob = sp_stats.vonmises.pdf( # pytype: disable=module-attr
x, concentrations_v, loc=locs_v
)
np.testing.assert_allclose(expected_prob, prob, rtol=1e-04, atol=1e-04)
def test_von_mises_pdf_uniform(self):
x = np.array([2., 3., 4., 5., 6., 7.])
vm = self.distrax_cls(1., 0.)
prob = vm.prob(x)
expected_prob = np.array([1. / (2. * np.pi)] * 6)
np.testing.assert_allclose(expected_prob, prob, rtol=1.5e-7, atol=1e-7)
def test_von_mises_cdf(self):
# We follow the scipy definition for cdf when loc=0 and x is in [-pi, pi].
locs_v = np.zeros(shape=(7, 1, 1))
concentrations_v = np.reshape(np.logspace(-3., 3., 7), [1, -1, 1])
x = np.reshape(np.linspace(-np.pi, np.pi, 7), [1, 1, -1])
vm = self.distrax_cls(locs_v, concentrations_v)
cdf = vm.cdf(x)
expected_cdf = sp_stats.vonmises.cdf( # pytype: disable=module-attr
x, concentrations_v, loc=locs_v
)
np.testing.assert_allclose(expected_cdf, cdf, atol=1e-4, rtol=1e-4)
def test_von_mises_survival_function(self):
locs_v = np.reshape(np.linspace(-5, 5, 7), [-1, 1, 1])
concentrations_v = np.reshape(np.logspace(-3., 3., 7), [1, -1, 1])
x = np.reshape(np.linspace(-5, 5, 7), [1, 1, -1])
vm = self.distrax_cls(locs_v, concentrations_v)
cdf = vm.cdf(x)
surv = vm.survival_function(x)
np.testing.assert_allclose(surv, 1 - cdf, atol=1e-4, rtol=1e-4)
def test_von_mises_cdf_out_of_bounds(self):
locs_v = np.reshape(np.linspace(-np.pi, np.pi, 7), [-1, 1, 1])
concentrations_v = np.reshape(np.logspace(-3., 3., 7), [1, -1, 1])
vm = self.distrax_cls(locs_v, concentrations_v)
x = np.linspace(-5 * np.pi, -np.pi, 7)
cdf = vm.cdf(x)
expected_cdf = 0.
np.testing.assert_allclose(expected_cdf, cdf, rtol=1.5e-7, atol=1e-7)
x = np.linspace(np.pi, 5 * np.pi, 7)
cdf = vm.cdf(x)
expected_cdf = 1.
np.testing.assert_allclose(expected_cdf, cdf, rtol=1.5e-7, atol=1e-7)
def test_von_mises_log_cdf(self):
locs_v = np.reshape(np.linspace(-5, 5, 7), [-1, 1, 1])
concentrations_v = np.reshape(np.logspace(-3., 3., 7), [1, -1, 1])
x = np.reshape(np.linspace(-5, 5, 7), [1, 1, -1])
vm = self.distrax_cls(locs_v, concentrations_v)
cdf = vm.cdf(x)
logcdf = vm.log_cdf(x)
np.testing.assert_allclose(np.log(cdf), logcdf, atol=1e-4, rtol=1e-4)
def test_von_mises_log_survival(self):
locs_v = np.reshape(np.linspace(-5, 5, 7), [-1, 1, 1])
concentrations_v = np.reshape(np.logspace(-3., 3., 7), [1, -1, 1])
x = np.reshape(np.linspace(-5, 5, 7), [1, 1, -1])
vm = self.distrax_cls(locs_v, concentrations_v)
surv = vm.survival_function(x)
logsurv = vm.log_survival_function(x)
np.testing.assert_allclose(np.log(surv), logsurv, atol=1e-4, rtol=1e-4)
def test_von_mises_cdf_uniform(self):
x = np.linspace(-np.pi, np.pi, 7)
vm = self.distrax_cls(0., 0.)
cdf = vm.cdf(x)
expected_cdf = (x + np.pi) / (2. * np.pi)
np.testing.assert_allclose(expected_cdf, cdf, rtol=1.5e-7, atol=1e-7)
def test_von_mises_cdf_gradient_simple(self):
n = 10
locs = jnp.array([1.] * n)
concentrations = np.logspace(-3, 3, n)
x = np.linspace(-5, 5, n)
def f(x, l, c):
vm = self.distrax_cls(l, c)
cdf = vm.cdf(x)
return cdf
jax.test_util.check_grads(f, (x, locs, concentrations), order=1)
def test_von_mises_sample_gradient(self):
n = 10
locs = jnp.array([1.] * n)
concentrations = np.logspace(-3, 3, n)
def f(l, c):
vm = self.distrax_cls(l, c)
x = vm.sample(seed=1)
return x
jax.test_util.check_grads(
f,
(locs, concentrations),
order=1,
rtol=0.1
)
def test_von_mises_uniform_sample_gradient(self):
def f(c):
vm = self.distrax_cls(0., c)
x = vm.sample(seed=1)
return x
# The finite difference is not very accurate, but the analytic gradient
# should not be zero.
self.assertNotEqual(jax.grad(f)(0.), 0)
@parameterized.named_parameters(
('small concentration', 1.),
('medium concentration', 10.),
('large concentration', 1000.),
('uniform', 1e-6),
)
def test_von_mises_sample_gradient_comparison(self, concentration):
# Compares the von Mises sampling gradient against the reference
# implementation from tensorflow_probability.
locs = 0.
def f(seed, l, c):
vm = self.distrax_cls(l, c)
x = vm.sample(seed=seed) # pylint: disable=cell-var-from-loop
return x
jax_sample_and_grad = jax.value_and_grad(f, argnums=2)
def samples_grad(s, concentration):
broadcast_concentration = concentration
_, dcdf_dconcentration = tfp.math.value_and_gradient(
lambda conc: tfp.distributions.von_mises.von_mises_cdf(s, conc),
broadcast_concentration)
inv_prob = np.exp(-concentration * (np.cos(s) - 1.)) * (
(2. * np.pi) * scipy.special.i0e(concentration)
)
# Computes the implicit derivative,
# dz = dconc * -(dF(z; conc) / dconc) / p(z; conc)
dsamples = -dcdf_dconcentration * inv_prob
return dsamples
for seed in range(10):
sample, sample_grad = jax_sample_and_grad(
seed, jnp.array(locs), jnp.array(concentration)
)
comparison = samples_grad(sample, concentration)
np.testing.assert_allclose(
comparison, sample_grad, rtol=1e-06, atol=1e-06
)
def test_von_mises_sample_moments(self):
locs_v = np.array([-1., 0.3, 2.3])
concentrations_v = np.array([1., 2., 10.])
vm = self.distrax_cls(locs_v, concentrations_v)
n = 1000
samples = vm.sample(sample_shape=(n,), seed=1)
expected_mean = vm.mean()
actual_mean = jnp.arctan2(
jnp.mean(jnp.sin(samples), axis=0),
jnp.mean(jnp.cos(samples), axis=0),
)
expected_variance = vm.variance()
standardized_samples = samples - vm.mean()
variance_samples = jnp.mean(1. - jnp.cos(standardized_samples), axis=0)
np.testing.assert_allclose(actual_mean, expected_mean, rtol=0.1)
np.testing.assert_allclose(
variance_samples, expected_variance, rtol=0.1
)
def test_von_mises_sample_variance_uniform(self):
vm = self.distrax_cls(1., 0.)
n = 1000
samples = vm.sample(sample_shape=(n,), seed=1)
# For circular uniform distribution, the mean is not well-defined,
# so only checking the variance.
expected_variance = 1.
standardized_samples = samples - vm.mean()
variance_samples = jnp.mean(1. - jnp.cos(standardized_samples), axis=0)
np.testing.assert_allclose(
variance_samples, expected_variance, rtol=0.1
)
def test_von_mises_sample_extreme_concentration(self):
loc = jnp.array([1., np.nan, 1., 1., np.nan])
min_value = np.finfo(np.float32).min
max_value = np.finfo(np.float32).max
concentration = jnp.array([min_value, 1., max_value, np.nan, np.nan])
vm = self.distrax_cls(loc, concentration)
samples = vm.sample(seed=1)
# Check that it does not end up in an infinite loop.
self.assertEqual(samples.shape, (5,))
def test_von_mises_sample_ks_test(self):
concentrations_v = np.logspace(-3, 3, 7)
# We are fixing the location to zero. The reason is that for loc != 0,
# scipy's von Mises distribution CDF becomes shifted, so it's no longer
# in [0, 1], but is in something like [-0.3, 0.7]. This breaks kstest.
vm = self.distrax_cls(0., concentrations_v)
n = 1000
sample_values = vm.sample(sample_shape=(n,), seed=1)
self.assertEqual(sample_values.shape, (n, 7))
fails = 0
trials = 0
for concentrationi, concentration in enumerate(concentrations_v):
s = sample_values[:, concentrationi]
trials += 1
p = sp_stats.kstest(
s,
sp_stats.vonmises(concentration).cdf # pytype: disable=not-callable
)[1]
if p <= 0.05:
fails += 1
self.assertLess(fails, trials * 0.1)
def test_von_mises_sample_uniform_ks_test(self):
locs_v = np.linspace(-10., 10., 7)
vm = self.distrax_cls(locs_v, 0.)
n = 1000
sample_values = vm.sample(sample_shape=(n,), seed=1)
self.assertEqual(sample_values.shape, (n, 7))
fails = 0
trials = 0
for loci, _ in enumerate(locs_v):
s = sample_values[:, loci]
# [-pi, pi] -> [0, 1]
s = (s + np.pi) / (2. * np.pi)
trials += 1
# Compare to the CDF of Uniform(0, 1) random variable.
p = sp_stats.kstest(s, sp_stats.uniform.cdf)[1]
if p <= 0.05:
fails += 1
self.assertLess(fails, trials * 0.1)
def test_von_mises_sample_average_gradient(self):
loc = jnp.array([1.] * 7)
concentration = np.logspace(-3, 3, 7)
grad_ys = np.ones(7, dtype=np.float32)
n = 1000
def loss(loc, concentration):
vm = self.distrax_cls(loc, concentration)
samples = vm.sample(sample_shape=(n,), seed=1)
return jnp.mean(samples, axis=0)
grad_loc, grad_concentration = jnp.vectorize(
jax.grad(loss, argnums=(0, 1)),
signature='(),()->(),()',
)(loc, concentration)
# dsamples / dloc = 1 => dloss / dloc = dloss / dsamples = grad_ys
np.testing.assert_allclose(grad_loc, grad_ys, atol=1e-1, rtol=1e-1)
np.testing.assert_allclose(grad_concentration, [0.]*7, atol=1e-1, rtol=1e-1)
def test_von_mises_sample_circular_variance_gradient(self):
loc = jnp.array([1.] * 7)
concentration = np.logspace(-3, 3, 7)
n = 1000
def loss(loc, concentration):
vm = self.distrax_cls(loc, concentration)
samples = vm.sample(sample_shape=(n,), seed=1)
return jnp.mean(1-jnp.cos(samples-loc), axis=0)
grad_loc, grad_concentration = jnp.vectorize(
jax.grad(loss, argnums=(0, 1)),
signature='(),()->(),()',
)(loc, concentration)
def analytical_loss(concentration):
i1e = jax.scipy.special.i1e(concentration)
i0e = jax.scipy.special.i0e(concentration)
return 1. - i1e / i0e
expected_grad_concentration = jnp.vectorize(
jax.grad(analytical_loss)
)(concentration)
np.testing.assert_allclose(grad_loc, [0.] * 7, atol=1e-2, rtol=1e-2)
np.testing.assert_allclose(
grad_concentration, expected_grad_concentration, atol=1e-1, rtol=1e-1)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/von_mises_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `independent.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import independent
from distrax._src.distributions import mvn_diag
from distrax._src.distributions import normal
from distrax._src.utils import equivalence
import jax
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
class IndependentTest(parameterized.TestCase):
"""Class to test miscellaneous methods of the `Independent` distribution."""
def setUp(self):
super().setUp()
self.loc = np.random.randn(2, 3, 4)
self.scale = np.abs(np.random.randn(2, 3, 4))
self.base = normal.Normal(loc=self.loc, scale=self.scale)
self.dist = independent.Independent(self.base, reinterpreted_batch_ndims=1)
def assertion_fn(self, rtol):
return lambda x, y: np.testing.assert_allclose(x, y, rtol=rtol)
@parameterized.parameters(None, 0, 1, 2)
def test_constructor_is_jittable_given_ndims(self, ndims):
constructor = lambda d: independent.Independent(d, ndims)
jax.jit(constructor)(self.base)
@parameterized.named_parameters(
('single element', 1, (3,)),
('range', slice(-1), (1, 3)),
('range_2', (slice(None), slice(-1)), (2, 2)),
)
def test_slice(self, slice_, expected_batch_shape):
sliced_dist = self.dist[slice_]
self.assertEqual(sliced_dist.batch_shape, expected_batch_shape)
self.assertEqual(sliced_dist.event_shape, self.dist.event_shape)
self.assertIsInstance(sliced_dist, independent.Independent)
self.assertIsInstance(sliced_dist.distribution, self.base.__class__)
self.assertion_fn(rtol=1e-3)(
sliced_dist.distribution.loc, self.loc[slice_])
self.assertion_fn(rtol=1e-3)(
sliced_dist.distribution.scale, self.scale[slice_])
def test_slice_ellipsis(self):
sliced_dist = self.dist[..., -1]
expected_batch_shape = (2,)
self.assertEqual(sliced_dist.batch_shape, expected_batch_shape)
self.assertEqual(sliced_dist.event_shape, self.dist.event_shape)
self.assertIsInstance(sliced_dist, independent.Independent)
self.assertIsInstance(sliced_dist.distribution, self.base.__class__)
self.assertion_fn(rtol=1e-3)(
sliced_dist.distribution.loc, self.loc[:, -1, :])
self.assertion_fn(rtol=1e-3)(
sliced_dist.distribution.scale, self.scale[:, -1, :])
def test_vmap_inputs(self):
def log_prob_sum(dist, x):
return dist.log_prob(x).sum()
base = normal.Normal(
jnp.arange(3 * 4 * 5).reshape((3, 4, 5)), jnp.ones((3, 4, 5)))
dist = independent.Independent(base, reinterpreted_batch_ndims=1)
x = jnp.zeros((3, 4, 5))
with self.subTest('no vmap'):
actual = log_prob_sum(dist, x)
expected = dist.log_prob(x).sum()
self.assertion_fn(rtol=1e-6)(actual, expected)
with self.subTest('axis=0'):
actual = jax.vmap(log_prob_sum, in_axes=0)(dist, x)
expected = dist.log_prob(x).sum(axis=1)
self.assertion_fn(rtol=1e-6)(actual, expected)
with self.subTest('axis=1'):
actual = jax.vmap(log_prob_sum, in_axes=1)(dist, x)
expected = dist.log_prob(x).sum(axis=0)
self.assertion_fn(rtol=1e-6)(actual, expected)
def test_vmap_outputs(self):
def summed_dist(loc, scale):
return independent.Independent(
normal.Normal(loc.sum(keepdims=True), scale.sum(keepdims=True)),
reinterpreted_batch_ndims=1)
loc = jnp.arange((3 * 4 * 5)).reshape((3, 4, 5))
scale = jnp.ones((3, 4, 5))
actual = jax.vmap(summed_dist, in_axes=0)(loc, scale)
expected = independent.Independent(
normal.Normal(loc.sum(axis=(1, 2), keepdims=True),
scale.sum(axis=(1, 2), keepdims=True)),
reinterpreted_batch_ndims=1)
np.testing.assert_equal(actual.batch_shape, expected.batch_shape)
np.testing.assert_equal(actual.event_shape, expected.event_shape)
x = jnp.array([[[1]], [[2]], [[3]]])
self.assertion_fn(rtol=1e-6)(actual.log_prob(x),
expected.log_prob(x))
class TFPMultivariateNormalTest(equivalence.EquivalenceTest):
"""Class to test Distrax Independent distribution against its TFP counterpart.
This class tests the case when using a TFP multivariate Normal distribution
as input for the TFP and Distrax Independent. There are 2 methods to create
the base distributions, `_make_base_distribution` and
`_make_tfp_base_distribution`. By overloading these methods, different
base distributions can be used.
"""
def _make_tfp_base_distribution(self, loc, scale):
return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale)
# Define the function to create the base distribution.
_make_base_distribution = _make_tfp_base_distribution
def setUp(self):
super().setUp()
self._init_distr_cls(independent.Independent)
self.normal_loc = jax.random.normal(
key=jax.random.PRNGKey(42), shape=(5, 4, 3, 2))
self.normal_scale = 0.5 + np.abs(
jax.random.normal(key=jax.random.PRNGKey(43), shape=(5, 4, 3, 2)))
self.normal_loc2 = jax.random.normal(
key=jax.random.PRNGKey(43), shape=(5, 4, 3, 2))
self.normal_scale2 = 0.5 + np.abs(
jax.random.normal(key=jax.random.PRNGKey(44), shape=(5, 4, 3, 2)))
# For most tests, we use `base_dist` and `tfp_base_dist` as base
# distributions. The latter is used as input for the TFP Independent, which
# we compare against.
self.base_dist = self._make_base_distribution(self.normal_loc,
self.normal_scale)
self.tfp_base_dist = self._make_tfp_base_distribution(self.normal_loc,
self.normal_scale)
# Some methods (e.g., the KL divergence) require two distributions. We
# define here the base distribution for those methods.
self.base_dist2 = self._make_base_distribution(self.normal_loc2,
self.normal_scale2)
self.tfp_base_dist2 = self._make_tfp_base_distribution(self.normal_loc2,
self.normal_scale2)
def test_invalid_parameters(self):
self._test_raises_error(
dist_kwargs={'distribution': self.base_dist,
'reinterpreted_batch_ndims': -1})
self._test_raises_error(
dist_kwargs={'distribution': self.base_dist,
'reinterpreted_batch_ndims': 10})
@parameterized.named_parameters(
('batch dims None', None),
('batch dims 0', 0),
('batch dims 1', 1),
('batch dims 2', 2),
('batch dims 3', 3),
)
def test_event_shape(self, batch_ndims):
super()._test_event_shape(
(),
dist_kwargs={'distribution': self.base_dist,
'reinterpreted_batch_ndims': batch_ndims},
tfp_dist_kwargs={'distribution': self.tfp_base_dist,
'reinterpreted_batch_ndims': batch_ndims},
)
@parameterized.named_parameters(
('batch dims None', None),
('batch dims 0', 0),
('batch dims 1', 1),
('batch dims 2', 2),
('batch dims 3', 3),
)
def test_batch_shape(self, batch_ndims):
super()._test_batch_shape(
(),
dist_kwargs={'distribution': self.base_dist,
'reinterpreted_batch_ndims': batch_ndims},
tfp_dist_kwargs={'distribution': self.tfp_base_dist,
'reinterpreted_batch_ndims': batch_ndims},
)
@chex.all_variants
@parameterized.named_parameters(
('batch dims None, empty shape', None, ()),
('batch dims None, int shape', None, 10),
('batch dims None, 2-tuple shape', None, (10, 20)),
('batch dims 1, empty shape', 1, ()),
('batch dims 1, int shape', 1, 10),
('batch dims 1, 2-tuple shape', 1, (10, 20)),
('batch dims 3, empty shape', 3, ()),
('batch dims 3, int shape', 3, 10),
('batch dims 3, 2-tuple shape', 3, (10, 20)),
)
def test_sample_shape(self, batch_ndims, sample_shape):
super()._test_sample_shape(
(),
dist_kwargs={'distribution': self.base_dist,
'reinterpreted_batch_ndims': batch_ndims},
tfp_dist_kwargs={'distribution': self.tfp_base_dist,
'reinterpreted_batch_ndims': batch_ndims},
sample_shape=sample_shape)
@chex.all_variants
def test_sample_dtype(self):
dist = self.distrax_cls(self.base_dist)
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(dist.dtype, samples.dtype)
self.assertEqual(dist.dtype, self.base_dist.dtype)
@chex.all_variants
@parameterized.named_parameters(
('batch dims None, empty shape', None, ()),
('batch dims None, int shape', None, 10),
('batch dims None, 2-tuple shape', None, (10, 20)),
('batch dims 1, empty shape', 1, ()),
('batch dims 1, int shape', 1, 10),
('batch dims 1, 2-tuple shape', 1, (10, 20)),
('batch dims 3, empty shape', 3, ()),
('batch dims 3, int shape', 3, 10),
('batch dims 3, 2-tuple shape', 3, (10, 20)),
)
def test_sample_and_log_prob(self, batch_ndims, sample_shape):
super()._test_sample_and_log_prob(
dist_args=(),
dist_kwargs={'distribution': self.base_dist,
'reinterpreted_batch_ndims': batch_ndims},
tfp_dist_kwargs={'distribution': self.tfp_base_dist,
'reinterpreted_batch_ndims': batch_ndims},
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=1e-3))
@chex.all_variants
@parameterized.named_parameters(
('batch dims None', None, np.zeros((5, 4, 3, 2))),
('batch dims 0', 0, np.zeros((5, 4, 3, 2))),
('batch dims 1', 1, np.zeros((5, 4, 3, 2))),
('batch dims 2', 2, np.zeros((5, 4, 3, 2))),
('batch dims 3', 3, np.zeros((5, 4, 3, 2))),
)
def test_log_prob(self, batch_ndims, value):
super()._test_attribute(
attribute_string='log_prob',
dist_kwargs={'distribution': self.base_dist,
'reinterpreted_batch_ndims': batch_ndims},
tfp_dist_kwargs={'distribution': self.tfp_base_dist,
'reinterpreted_batch_ndims': batch_ndims},
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=1e-3))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('entropy, batch dims None', 'entropy', None),
('entropy, batch dims 1', 'entropy', 1),
('entropy, batch dims 3', 'entropy', 3),
('mean, batch dims None', 'mean', None),
('mean, batch dims 1', 'mean', 1),
('mean, batch dims 3', 'mean', 3),
('variance, batch dims None', 'variance', None),
('variance, batch dims 1', 'variance', 1),
('variance, batch dims 3', 'variance', 3),
('stddev, batch dims None', 'stddev', None),
('stddev, batch dims 1', 'stddev', 1),
('stddev, batch dims 3', 'stddev', 3),
('mode, batch dims None', 'mode', None),
('mode, batch dims 1', 'mode', 1),
('mode, batch dims 3', 'mode', 3),
)
def test_method(self, function_string, batch_ndims):
super()._test_attribute(
attribute_string=function_string,
dist_kwargs={'distribution': self.base_dist,
'reinterpreted_batch_ndims': batch_ndims},
tfp_dist_kwargs={'distribution': self.tfp_base_dist,
'reinterpreted_batch_ndims': batch_ndims},
assertion_fn=self.assertion_fn(rtol=1e-3))
@chex.all_variants(with_jit=False, with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax, no batch dims',
'kl_divergence', 'distrax_to_distrax', None),
('kl distrax_to_tfp, no batch dims',
'kl_divergence', 'distrax_to_tfp', None),
('kl tfp_to_distrax, no batch dims',
'kl_divergence', 'tfp_to_distrax', None),
('cross-ent distrax_to_distrax, no batch dims',
'cross_entropy', 'distrax_to_distrax', None),
('cross-ent distrax_to_tfp, no batch dims',
'cross_entropy', 'distrax_to_tfp', None),
('cross-ent tfp_to_distrax, no batch dims',
'cross_entropy', 'tfp_to_distrax', None),
('kl distrax_to_distrax, batch dims 2',
'kl_divergence', 'distrax_to_distrax', 2),
('kl distrax_to_tfp, batch dims 2',
'kl_divergence', 'distrax_to_tfp', 2),
('kl tfp_to_distrax, batch dims 2',
'kl_divergence', 'tfp_to_distrax', 2),
('cross-ent distrax_to_distrax, batch dims 2',
'cross_entropy', 'distrax_to_distrax', 2),
('cross-ent distrax_to_tfp, batch dims 2',
'cross_entropy', 'distrax_to_tfp', 2),
('cross-ent tfp_to_distrax, batch dims 2',
'cross_entropy', 'tfp_to_distrax', 2),
)
def test_with_two_distributions(self, function_string, mode_string,
batch_ndims):
super()._test_with_two_distributions(
attribute_string=function_string,
mode_string=mode_string,
dist1_kwargs={'distribution': self.base_dist,
'reinterpreted_batch_ndims': batch_ndims},
dist2_kwargs={'distribution': self.base_dist2,
'reinterpreted_batch_ndims': batch_ndims},
tfp_dist1_kwargs={'distribution': self.tfp_base_dist,
'reinterpreted_batch_ndims': batch_ndims},
tfp_dist2_kwargs={'distribution': self.tfp_base_dist2,
'reinterpreted_batch_ndims': batch_ndims},
assertion_fn=self.assertion_fn(rtol=1e-3))
class TFPUnivariateNormalTest(TFPMultivariateNormalTest):
"""Class to test Distrax Independent distribution against its TFP counterpart.
This class tests the case when using a TFP univariate Normal distribution
as input for the TFP and distrax Independent.
"""
def _make_tfp_base_distribution(self, loc, scale):
return tfd.Normal(loc=loc, scale=scale)
# Define the function to create the base distribution.
_make_base_distribution = _make_tfp_base_distribution
def test_jittable(self):
super()._test_jittable(
dist_kwargs={'distribution': self.base_dist,
'reinterpreted_batch_ndims': 1},
assertion_fn=self.assertion_fn(rtol=1e-4))
class DistraxUnivariateNormalTest(TFPMultivariateNormalTest):
"""Class to test Distrax Independent distribution against its TFP counterpart.
This class tests the case when using a distrax univariate Normal distribution
as input for the Distrax Independent.
"""
def _make_distrax_base_distribution(self, loc, scale):
return normal.Normal(loc=loc, scale=scale)
def _make_tfp_base_distribution(self, loc, scale):
return tfd.Normal(loc=loc, scale=scale)
# Define the function to create the base distribution.
_make_base_distribution = _make_distrax_base_distribution
class DistraxMultivariateNormalTest(TFPMultivariateNormalTest):
"""Class to test Distrax Independent distribution against its TFP counterpart.
This class tests the case when using a Distrax multivariate Normal
distribution as input for the Distrax Independent.
"""
def _make_distrax_base_distribution(self, loc, scale):
return mvn_diag.MultivariateNormalDiag(loc=loc, scale_diag=scale)
# Define the function to create the base distribution.
_make_base_distribution = _make_distrax_base_distribution
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/independent_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `uniform.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import uniform
import jax
from jax.config import config as jax_config
import jax.numpy as jnp
def setUpModule():
jax_config.update('jax_enable_x64', True)
class UniformFloat64Test(chex.TestCase):
def _assert_dtypes(self, dist, dtype):
"""Asserts dist methods' outputs' datatypes."""
# Sanity check to make sure float64 is enabled.
x_64 = jnp.zeros([])
self.assertEqual(jnp.float64, x_64.dtype)
key = jax.random.PRNGKey(1729)
z, log_prob = self.variant(
lambda: dist.sample_and_log_prob(seed=key, sample_shape=[3]))()
z2 = self.variant(
lambda: dist.sample(seed=key, sample_shape=[3]))()
self.assertEqual(dtype, z.dtype)
self.assertEqual(dtype, z2.dtype)
self.assertEqual(dtype, log_prob.dtype)
self.assertEqual(dtype, self.variant(dist.log_prob)(z).dtype)
self.assertEqual(dtype, self.variant(dist.prob)(z).dtype)
self.assertEqual(dtype, self.variant(dist.log_cdf)(z).dtype)
self.assertEqual(dtype, self.variant(dist.cdf)(z).dtype)
self.assertEqual(dtype, self.variant(dist.entropy)().dtype)
self.assertEqual(dtype, self.variant(dist.mean)().dtype)
self.assertEqual(dtype, self.variant(dist.median)().dtype)
self.assertEqual(dtype, self.variant(dist.stddev)().dtype)
self.assertEqual(dtype, self.variant(dist.variance)().dtype)
self.assertEqual(dtype, dist.low.dtype)
self.assertEqual(dtype, dist.high.dtype)
self.assertEqual(dtype, dist.range.dtype)
self.assertEqual(dtype, dist.dtype)
@chex.all_variants
@parameterized.named_parameters(
('float32', jnp.float32),
('float64', jnp.float64))
def test_dtype(self, dtype):
dist = uniform.Uniform(low=jnp.zeros([], dtype), high=jnp.ones([], dtype))
self._assert_dtypes(dist, dtype)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/uniform_float64_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `log_stddev_normal.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import log_stddev_normal as lsn
from distrax._src.distributions import normal
import jax
import jax.numpy as jnp
import mock
import numpy as np
from tensorflow_probability.substrates import jax as tfp
kl_module = tfp.distributions.kullback_leibler
class LogStddevNormalTest(parameterized.TestCase):
@parameterized.parameters(
(np.zeros((4,)), np.zeros((4,)), np.zeros((4,))),
(np.zeros(()), np.zeros((4,)), np.zeros((4,))),
(np.zeros((4,)), np.zeros(()), np.zeros((4,))),
)
def test_log_scale_property(self, mean, log_stddev, expected):
dist = lsn.LogStddevNormal(mean, log_stddev)
self.assertEqual(dist.log_scale.shape, expected.shape)
np.testing.assert_allclose(dist.log_scale, expected, atol=1e-4)
@parameterized.parameters(
(0.0, 1.0),
(4.0, 10.0),
)
def test_sampling_scalar(self, mean, stddev):
log_stddev = np.log(stddev)
dist = lsn.LogStddevNormal(mean, log_stddev)
num_samples = 1000000
prng_key = jax.random.PRNGKey(1331)
samples = dist.sample(seed=prng_key, sample_shape=num_samples)
chex.assert_shape(samples, (num_samples,))
np.testing.assert_allclose(jnp.mean(samples), mean, atol=4e-2)
np.testing.assert_allclose(jnp.std(samples), stddev, atol=4e-2)
@parameterized.parameters(
([3, 4], [1.5, 2.5]),
([0, 1, 0, 1, 10], [0.1, 0.5, 1.0, 5.0, 10.0]))
def test_sampling_vector(self, mean, stddev):
mean = np.array(mean)
log_stddev = np.log(stddev)
self.assertEqual(mean.shape, log_stddev.shape)
dist = lsn.LogStddevNormal(mean, log_stddev)
num_samples = 1000000
prng_key = jax.random.PRNGKey(1331)
samples = dist.sample(seed=prng_key, sample_shape=num_samples)
chex.assert_shape(samples, (num_samples,) + mean.shape)
np.testing.assert_allclose(jnp.mean(samples, axis=0), mean, atol=4e-2)
np.testing.assert_allclose(jnp.std(samples, axis=0), stddev, atol=4e-2)
def test_sampling_batched(self):
means = np.array([[3.0, 4.0], [-5, 48.0], [58, 64.0]])
stddevs = np.array([[1, 2], [2, 4], [4, 8]])
log_stddevs = np.log(stddevs)
dist = lsn.LogStddevNormal(means, log_stddevs)
num_samples = 1000000
prng_key = jax.random.PRNGKey(1331)
samples = dist.sample(seed=prng_key, sample_shape=num_samples)
# output shape is [num_samples] + means.shape
chex.assert_shape(samples, (num_samples, 3, 2))
np.testing.assert_allclose(jnp.mean(samples, axis=0), means, atol=4e-2)
np.testing.assert_allclose(jnp.std(samples, axis=0), stddevs, atol=4e-2)
def test_sampling_batched_custom_dim(self):
means = np.array([[3.0, 4.0], [-5, 48.0], [58, 64.0]])
stddevs = np.array([[1, 2], [2, 4], [4, 8]])
log_stddevs = np.log(stddevs)
dist = lsn.LogStddevNormal(means, log_stddevs)
num_samples = 1000000
prng_key = jax.random.PRNGKey(1331)
samples = dist.sample(seed=prng_key, sample_shape=num_samples)
chex.assert_shape(samples, (num_samples, 3, 2))
np.testing.assert_allclose(jnp.mean(samples, axis=0), means, atol=4e-2)
np.testing.assert_allclose(jnp.std(samples, axis=0), stddevs, atol=4e-2)
@chex.all_variants
@parameterized.named_parameters(
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist = lsn.LogStddevNormal(
loc=jnp.zeros((), dtype), log_scale=jnp.zeros((), dtype))
samples = self.variant(dist.sample)(seed=jax.random.PRNGKey(0))
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
def test_kl_versus_normal(self):
loc, scale = jnp.array([2.0]), jnp.array([2.0])
log_scale = jnp.log(scale)
lsn_prior = lsn.LogStddevNormal(jnp.array([0.0]), jnp.array([0.0]))
n_prior = normal.Normal(jnp.array([0.0]), jnp.array([1.0]))
lsn_dist = lsn.LogStddevNormal(loc, log_scale)
n_dist = normal.Normal(loc, scale)
kl1 = tfp.distributions.kl_divergence(lsn_dist, lsn_prior)
kl2 = tfp.distributions.kl_divergence(n_dist, lsn_prior)
kl3 = tfp.distributions.kl_divergence(n_dist, n_prior)
np.testing.assert_allclose(kl2, kl1)
np.testing.assert_allclose(kl3, kl2)
np.testing.assert_allclose(kl1, kl3)
# pylint:disable=protected-access
def test_custom_kl_registered(self):
# Check that our custom KL is registered inside the TFP dispatch table.
dist_pair = (lsn.LogStddevNormal, lsn.LogStddevNormal)
self.assertEqual(kl_module._DIVERGENCES[dist_pair],
lsn._kl_logstddevnormal_logstddevnormal)
@mock.patch.dict(
kl_module._DIVERGENCES,
{(lsn.LogStddevNormal,
lsn.LogStddevNormal): lambda *args, **kwargs: 42})
def test_calling_custom_kl(self):
# Check that the dispatch of tfp.kl_divergence actually goes to the
# table we checked for above.
dist_a = lsn.LogStddevNormal(jnp.array([0.0]), jnp.array([0.0]))
dist_b = lsn.LogStddevNormal(jnp.array([0.0]), jnp.array([0.0]))
self.assertEqual(tfp.distributions.kl_divergence(dist_a, dist_b), 42)
# pylint:enable=protected-access
def test_jitable(self):
@jax.jit
def jitted_function(event, dist):
return dist.log_prob(event)
dist = lsn.LogStddevNormal(np.array([0.0]), np.array([0.0]))
event = dist.sample(seed=jax.random.PRNGKey(0))
jitted_function(event, dist)
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
('ellipsis', (Ellipsis, -1)),
)
def test_slice(self, slice_):
loc = jnp.array(np.random.randn(3, 4, 5))
log_scale = jnp.array(np.random.randn(3, 4, 5))
dist = lsn.LogStddevNormal(loc=loc, log_scale=log_scale)
sliced_dist = dist[slice_]
np.testing.assert_allclose(sliced_dist.mean(), loc[slice_], rtol=1e-3)
np.testing.assert_allclose(
sliced_dist.log_scale, log_scale[slice_], rtol=1e-3)
self.assertIsInstance(sliced_dist, lsn.LogStddevNormal)
def test_slice_different_parameterization(self):
loc = jnp.array(np.random.randn(4))
log_scale = jnp.array(np.random.randn(3, 4))
dist = lsn.LogStddevNormal(loc=loc, log_scale=log_scale)
np.testing.assert_allclose(
dist[0].mean(), loc, rtol=1e-3) # Not slicing loc.
np.testing.assert_allclose(dist[0].log_scale, log_scale[0], rtol=1e-3)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/log_stddev_normal_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `multinomial.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import multinomial
from distrax._src.utils import equivalence
from distrax._src.utils import math
import jax
import jax.numpy as jnp
import numpy as np
from scipy import stats
class MultinomialTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(multinomial.Multinomial)
self.total_count = np.asarray(
[4, 3], dtype=np.float32) # float dtype required for TFP
self.probs = 0.5 * np.asarray([0.1, 0.4, 0.2, 0.3]) # unnormalized
self.logits = np.log(self.probs)
@parameterized.named_parameters(
('from probs', False),
('from logits', True))
def test_parameters(self, from_logits):
if from_logits:
dist_params = {'logits': self.logits, 'total_count': self.total_count}
else:
dist_params = {'probs': self.probs, 'total_count': self.total_count}
dist = self.distrax_cls(**dist_params)
self.assertion_fn(atol=1e-6, rtol=1e-3)(
dist.logits, np.tile(math.normalize(logits=self.logits), (2, 1)))
self.assertion_fn(atol=1e-6, rtol=1e-3)(
dist.probs, np.tile(math.normalize(probs=self.probs), (2, 1)))
@parameterized.named_parameters(
('probs and logits', {
'total_count': 3, 'logits': [0.1, -0.2], 'probs': [0.6, 0.4]}),
('both probs and logits are None', {
'total_count': 3, 'logits': None, 'probs': None}),
('logits are 0d', {'total_count': 3, 'logits': 3.}),
('probs are 0d', {'total_count': 3, 'probs': 1.}),
('logits have wrong dim', {'total_count': 3, 'logits': np.ones((4, 1))}),
('probs have wrong dim', {'total_count': 3, 'probs': np.ones((4, 1))}),
('bool dtype', {
'total_count': 3, 'logits': [0.1, 0.], 'dtype': jnp.bool_}),
('complex64 dtype', {
'total_count': 3, 'logits': [0.1, 0.], 'dtype': jnp.complex64}),
('complex128 dtype', {
'total_count': 3, 'logits': [0.1, 0.], 'dtype': jnp.complex128}),
)
def test_raises_on_invalid_inputs(self, dist_params):
with self.assertRaises(ValueError):
self.distrax_cls(**dist_params)
@parameterized.named_parameters(
('1d logits', {'logits': [0.0, 1.0, -0.5]}),
('1d probs', {'probs': [0.2, 0.5, 0.3]}),
('2d logits', {'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]}),
('2d probs', {'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]}),
)
def test_event_shape(self, dist_params):
dist_params = {k: jnp.asarray(v) for k, v in dist_params.items()}
dist_params.update({'total_count': self.total_count})
super()._test_event_shape((), dist_params)
@chex.all_variants
@parameterized.named_parameters(
('1d logits, no shape',
{'logits': [0.0, 1.0, -0.5]},
()),
('1d probs, no shape',
{'probs': [0.2, 0.5, 0.3]},
()),
('1d logits, int shape',
{'logits': [0.0, 1.0, -0.5]},
1),
('1d probs, int shape',
{'probs': [0.2, 0.5, 0.3]},
1),
('1d logits, 1-tuple shape',
{'logits': [0.0, 1.0, -0.5]},
(1,)),
('1d probs, 1-tuple shape',
{'probs': [0.2, 0.5, 0.3]},
(1,)),
('1d logits, 2-tuple shape',
{'logits': [0.0, 1.0, -0.5]},
(5, 4)),
('1d probs, 2-tuple shape',
{'probs': [0.2, 0.5, 0.3]},
(5, 4)),
('2d logits, no shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
()),
('2d probs, no shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
()),
('2d logits, int shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
4),
('2d probs, int shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
4),
('2d logits, 1-tuple shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
(5,)),
('2d probs, 1-tuple shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
(5,)),
('2d logits, 2-tuple shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
(5, 4)),
('2d probs, 2-tuple shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
(5, 4)),
)
def test_sample_shape_with_int_total_count(
self, dist_params, sample_shape):
dist_params = {k: jnp.asarray(v) for k, v in dist_params.items()}
dist_params.update({
'total_count': 3,
})
super()._test_sample_shape(
dist_args=(),
dist_kwargs=dist_params,
sample_shape=sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('1d logits, no shape',
{'logits': [0.0, 1.0, -0.5]},
()),
('1d probs, no shape',
{'probs': [0.2, 0.5, 0.3]},
()),
('1d logits, int shape',
{'logits': [0.0, 1.0, -0.5]},
1),
('1d probs, int shape',
{'probs': [0.2, 0.5, 0.3]},
1),
('1d logits, 1-tuple shape',
{'logits': [0.0, 1.0, -0.5]},
(1,)),
('1d probs, 1-tuple shape',
{'probs': [0.2, 0.5, 0.3]},
(1,)),
('1d logits, 2-tuple shape',
{'logits': [0.0, 1.0, -0.5]},
(5, 4)),
('1d probs, 2-tuple shape',
{'probs': [0.2, 0.5, 0.3]},
(5, 4)),
('2d logits, no shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
()),
('2d probs, no shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
()),
('2d logits, int shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
4),
('2d probs, int shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
4),
('2d logits, 1-tuple shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
(5,)),
('2d probs, 1-tuple shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
(5,)),
('2d logits, 2-tuple shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
(5, 4)),
('2d probs, 2-tuple shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
(5, 4)),
)
def test_sample_shape_with_1d_total_count(
self, dist_params, sample_shape):
dist_params = {k: jnp.asarray(v) for k, v in dist_params.items()}
dist_params.update({
'total_count': np.asarray([4, 3], dtype=np.float32),
})
super()._test_sample_shape(
dist_args=(),
dist_kwargs=dist_params,
sample_shape=sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('1d logits, no shape',
{'logits': [0.0, 1.0, -0.5]},
()),
('1d probs, no shape',
{'probs': [0.2, 0.5, 0.3]},
()),
('1d logits, int shape',
{'logits': [0.0, 1.0, -0.5]},
1),
('1d probs, int shape',
{'probs': [0.2, 0.5, 0.3]},
1),
('1d logits, 1-tuple shape',
{'logits': [0.0, 1.0, -0.5]},
(1,)),
('1d probs, 1-tuple shape',
{'probs': [0.2, 0.5, 0.3]},
(1,)),
('1d logits, 2-tuple shape',
{'logits': [0.0, 1.0, -0.5]},
(5, 4)),
('1d probs, 2-tuple shape',
{'probs': [0.2, 0.5, 0.3]},
(5, 4)),
('2d logits, no shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
()),
('2d probs, no shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
()),
('2d logits, int shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
4),
('2d probs, int shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
4),
('2d logits, 1-tuple shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
(5,)),
('2d probs, 1-tuple shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
(5,)),
('2d logits, 2-tuple shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
(5, 4)),
('2d probs, 2-tuple shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
(5, 4)),
)
def test_sample_shape_with_2d_total_count(
self, dist_params, sample_shape):
dist_params = {k: jnp.asarray(v) for k, v in dist_params.items()}
total_count = np.asarray(
[[4, 3], [5, 4], [3, 2], [1, 4]], dtype=np.float32)
dist_params.update({'total_count': total_count})
super()._test_sample_shape(
dist_args=(),
dist_kwargs=dist_params,
sample_shape=sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('1d logits, no shape',
{'logits': [0.0, 1.0, -0.5]},
()),
('1d probs, no shape',
{'probs': [0.2, 0.5, 0.3]},
()),
('1d logits, int shape',
{'logits': [0.0, 1.0, -0.5]},
1),
('1d probs, int shape',
{'probs': [0.2, 0.5, 0.3]},
1),
('1d logits, 1-tuple shape',
{'logits': [0.0, 1.0, -0.5]},
(1,)),
('1d probs, 1-tuple shape',
{'probs': [0.2, 0.5, 0.3]},
(1,)),
('1d logits, 2-tuple shape',
{'logits': [0.0, 1.0, -0.5]},
(5, 4)),
('1d probs, 2-tuple shape',
{'probs': [0.2, 0.5, 0.3]},
(5, 4)),
('2d logits, no shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
()),
('2d probs, no shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
()),
('2d logits, int shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
4),
('2d probs, int shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
4),
('2d logits, 1-tuple shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
(5,)),
('2d probs, 1-tuple shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
(5,)),
('2d logits, 2-tuple shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
(5, 4)),
('2d probs, 2-tuple shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
(5, 4)),
)
def test_sum_samples(
self, dist_params, sample_shape):
dist_params = {k: jnp.asarray(v) for k, v in dist_params.items()}
total_count = np.asarray(
[[4, 3], [5, 4], [3, 2], [1, 4]], dtype=np.float32)
dist_params.update({'total_count': total_count})
dist = self.distrax_cls(**dist_params)
sample_fn = self.variant(
lambda key: dist.sample(seed=key, sample_shape=sample_shape))
samples = sample_fn(self.key)
sum_samples = jnp.sum(samples, axis=-1)
self.assertion_fn(atol=1e-6, rtol=1e-3)(
np.asarray(sum_samples, dtype=np.float32),
np.broadcast_to(total_count, sum_samples.shape))
@chex.all_variants
@parameterized.named_parameters(
('1d logits, no shape',
{'logits': [0.0, 1.0, -0.5]},
()),
('1d probs, no shape',
{'probs': [0.2, 0.5, 0.3]},
()),
('1d logits, int shape',
{'logits': [0.0, 1.0, -0.5]},
1),
('1d probs, int shape',
{'probs': [0.2, 0.5, 0.3]},
1),
('1d logits, 1-tuple shape',
{'logits': [0.0, 1.0, -0.5]},
(1,)),
('1d probs, 1-tuple shape',
{'probs': [0.2, 0.5, 0.3]},
(1,)),
('1d logits, 2-tuple shape',
{'logits': [0.0, 1.0, -0.5]},
(5, 4)),
('1d probs, 2-tuple shape',
{'probs': [0.2, 0.5, 0.3]},
(5, 4)),
('2d logits, no shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
()),
('2d probs, no shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
()),
('2d logits, int shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
4),
('2d probs, int shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
4),
('2d logits, 1-tuple shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
(5,)),
('2d probs, 1-tuple shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
(5,)),
('2d logits, 2-tuple shape',
{'logits': [[0.0, 1.0, -0.5], [-0.1, 0.3, 0.0]]},
(5, 4)),
('2d probs, 2-tuple shape',
{'probs': [[0.1, 0.4, 0.5], [0.5, 0.25, 0.25]]},
(5, 4)),
)
def test_sample_and_log_prob(self, dist_params, sample_shape):
dist_params = {k: jnp.asarray(v) for k, v in dist_params.items()}
total_count = np.asarray(
[[4, 3], [5, 4], [3, 2], [1, 4]], dtype=np.float32)
dist_params.update({'total_count': total_count})
super()._test_sample_and_log_prob(
dist_args=(),
dist_kwargs=dist_params,
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(atol=1e-6, rtol=1e-3))
@chex.all_variants
@parameterized.named_parameters(
('int32', jnp.int32),
('int64', jnp.int64),
('uint32', jnp.uint32),
('uint64', jnp.uint64),
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist_params = {
'logits': self.logits, 'dtype': dtype, 'total_count': self.total_count}
dist = self.distrax_cls(**dist_params)
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
@chex.all_variants
def test_sample_extreme_probs(self):
dist_params = {
'probs': np.asarray([1., 0., 0., 0.]), 'total_count': 10}
dist = self.distrax_cls(**dist_params)
sample_fn = self.variant(
lambda key: dist.sample(seed=key, sample_shape=100))
samples = sample_fn(self.key)
np.testing.assert_equal(np.unique(samples[..., 0]), 10)
np.testing.assert_equal(np.unique(samples[..., 1:]), 0)
@chex.all_variants
@parameterized.named_parameters(
('1d logits, 1 input',
{'logits': [0.0, 0.5, -0.5]},
[2, 1, 0]),
('1d logits, 2 inputs',
{'logits': [0.0, 0.5, -0.5]},
[[1, 2, 0], [0, 1, 2]]),
('2d logits, 2 inputs',
{'logits': [[0.0, 0.5, -0.5], [-0.1, 0.1, 0.1]]},
[[1, 0, 2], [1, 1, 1]]),
('2d logits, rank-3 inputs',
{'logits': [[0.0, 0.5, -0.5], [-0.1, 0.1, 0.1]]},
np.asarray([[1, 2, 0], [1, 0, 2]])[None, ...]),
('1d probs, 1 input',
{'probs': [0.3, 0.2, 0.5]},
[1, 2, 0]),
('1d probs, 2 inputs',
{'probs': [0.3, 0.2, 0.5]},
[[1, 0, 2], [1, 1, 1]]),
('2d probs, 2 inputs',
{'probs': [[0.2, 0.4, 0.4], [0.1, 0.2, 0.7]]},
[[1, 2, 0], [2, 1, 0]]),
('2d probs, rank-3 inputs',
{'probs': [[0.2, 0.4, 0.4], [0.1, 0.2, 0.7]]},
np.asarray([[1, 0, 2], [1, 1, 1]])[None, ...]),
)
def test_log_prob(self, dist_params, value):
dist_params = {k: jnp.asarray(v) for k, v in dist_params.items()}
dist_params.update({'total_count': 3})
value = jnp.asarray(value)
super()._test_attribute(
attribute_string='log_prob',
dist_kwargs=dist_params,
call_args=(value,),
assertion_fn=self.assertion_fn(atol=1e-6, rtol=1e-3))
@chex.all_variants(with_jit=False, with_pmap=False)
def test_log_prob_extreme_probs(self):
dist_params = {
'probs': np.array([0.0, 1.0, 0.0]),
'total_count': 3,
}
value = np.array([[0, 3, 0], [1, 1, 1]])
expected_result = np.asarray([0., -np.inf])
dist = self.distrax_cls(**dist_params)
np.testing.assert_allclose(
self.variant(dist.log_prob)(value), expected_result, atol=1e-5)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('from 2d logits',
{'logits': np.asarray([[0.0, 0.5, -0.5], [-0.2, 0.3, 0.5]])}),
('from 2d probs',
{'probs': np.asarray([[0.1, 0.5, 0.4], [0.2, 0.4, 0.4]])}),
)
def test_entropy(self, dist_params):
# The TFP Multinomial does not implement `entropy`, so we use scipy for
# the tests.
dist_params.update({
'total_count': np.asarray([3, 10]),
})
dist = self.distrax_cls(**dist_params)
entropy = []
for probs, counts in zip(dist.probs, dist.total_count):
entropy.append(stats.multinomial(n=counts, p=probs).entropy())
self.assertion_fn(atol=1e-6, rtol=1e-3)(
self.variant(dist.entropy)(), np.asarray(entropy))
@chex.all_variants(with_pmap=False)
def test_entropy_extreme_probs(self):
dist_params = {
'probs': np.asarray([1.0, 0.0, 0.0]),
'total_count': np.asarray([3, 10]),
}
dist = self.distrax_cls(**dist_params)
expected_result = np.asarray([0., 0.])
np.testing.assert_allclose(
self.variant(dist.entropy)(), expected_result, atol=3e-4)
@chex.all_variants(with_pmap=False)
def test_entropy_scalar(self):
# The TFP Multinomial does not implement `entropy`, so we use scipy for
# the tests.
probs = np.asarray([0.1, 0.5, 0.4])
total_count = 5
scipy_entropy = stats.multinomial(n=total_count, p=probs).entropy()
distrax_entropy_fn = self.variant(
lambda x, y: multinomial.Multinomial._entropy_scalar(total_count, x, y))
self.assertion_fn(atol=1e-6, rtol=1e-3)(
distrax_entropy_fn(probs, np.log(probs)), scipy_entropy)
@chex.all_variants(with_pmap=False)
def test_entropy_scalar_extreme_probs(self):
probs = np.asarray([1., 0., 0.])
total_count = 5
expected_result = 0.
distrax_entropy_fn = self.variant(
lambda x, y: multinomial.Multinomial._entropy_scalar(total_count, x, y))
np.testing.assert_allclose(
distrax_entropy_fn(probs, np.log(probs)), expected_result, atol=1e-5)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('variance; from 2d logits',
'variance', {'logits': [[0.0, 0.5, -0.5], [-0.2, 0.3, 0.5]]}),
('variance; from 2d probs',
'variance', {'probs': [[0.1, 0.5, 0.4], [0.2, 0.4, 0.4]]}),
('mean; from 2d logits',
'mean', {'logits': [[0.0, 0.5, -0.5], [-0.2, 0.3, 0.5]]}),
('mean; from 2d probs',
'mean', {'probs': [[0.1, 0.5, 0.4], [0.2, 0.4, 0.4]]}),
('covariance; from 2d logits',
'covariance', {'logits': [[0.0, 0.5, -0.5], [-0.2, 0.3, 0.5]]}),
('covariance; from 2d probs',
'covariance', {'probs': [[0.1, 0.5, 0.4], [0.2, 0.4, 0.4]]}),
)
def test_method(self, function_string, dist_params):
dist_params = {k: jnp.asarray(v) for k, v in dist_params.items()}
total_count = np.asarray(
[[4, 3], [5, 4], [3, 2], [1, 4]], dtype=np.float32)
dist_params.update({'total_count': total_count})
super()._test_attribute(
attribute_string=function_string,
dist_kwargs=dist_params,
assertion_fn=self.assertion_fn(atol=1e-6, rtol=1e-3))
def test_jittable(self):
super()._test_jittable(
dist_kwargs={
'probs': np.asarray([1.0, 0.0, 0.0]),
'total_count': np.asarray([3, 10])
},
assertion_fn=self.assertion_fn(atol=3e-4, rtol=1e-3))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
)
def test_slice(self, slice_):
logits = jnp.array(np.random.randn(3, 4, 5))
probs = jax.nn.softmax(jnp.array(np.random.randn(3, 4, 5)), axis=-1)
total_count = jnp.full((3, 4), fill_value=2)
dist1 = self.distrax_cls(total_count=total_count, logits=logits)
dist2 = self.distrax_cls(total_count=total_count, probs=probs)
self.assertion_fn(atol=1e-6, rtol=1e-3)(
dist2[slice_].total_count, total_count[slice_])
self.assertion_fn(atol=1e-6, rtol=1e-3)(
jax.nn.softmax(dist1[slice_].logits, axis=-1),
jax.nn.softmax(logits[slice_], axis=-1))
self.assertion_fn(atol=1e-6, rtol=1e-3)(dist2[slice_].probs, probs[slice_])
def test_slice_ellipsis(self):
logits = jnp.array(np.random.randn(4, 4, 5))
probs = jax.nn.softmax(jnp.array(np.random.randn(4, 4, 5)), axis=-1)
total_count_value = 2
total_count = jnp.full((4, 4), fill_value=total_count_value)
dist1 = self.distrax_cls(total_count=total_count_value, logits=logits)
dist2 = self.distrax_cls(total_count=total_count_value, probs=probs)
self.assertion_fn(atol=1e-6, rtol=1e-3)(
dist1[..., -1].total_count, total_count[..., -1])
self.assertion_fn(atol=1e-6, rtol=1e-3)(
dist2[..., -1].total_count, total_count[..., -1])
self.assertion_fn(atol=1e-6, rtol=1e-3)(
jax.nn.softmax(dist1[..., -1].logits, axis=-1),
jax.nn.softmax(logits[:, -1], axis=-1))
self.assertion_fn(atol=1e-6, rtol=1e-3)(dist2[..., -1].probs, probs[:, -1])
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/multinomial_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized distribution."""
from typing import cast, Optional, Tuple
import chex
from distrax._src.distributions import distribution as base_distribution
from distrax._src.utils import conversion
from distrax._src.utils import math
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
PRNGKey = chex.PRNGKey
DistributionLike = base_distribution.DistributionLike
DistributionT = base_distribution.DistributionT
EventT = base_distribution.EventT
class Quantized(
base_distribution.Distribution[Array, Tuple[int, ...], jnp.dtype],):
"""Distribution representing the quantization `Y = ceil(X)`.
Given an input distribution `p(x)` over a univariate random variable `X`,
sampling from a `Quantized` distribution amounts to sampling `x ~ p(x)` and
then setting `y = ceil(x)`. The returned samples are integer-valued and of the
same `dtype` as the base distribution.
"""
equiv_tfp_cls = tfd.QuantizedDistribution
def __init__(self,
distribution: DistributionLike,
low: Optional[Numeric] = None,
high: Optional[Numeric] = None,
eps: Optional[Numeric] = None):
"""Initializes a Quantized distribution.
Args:
distribution: The base distribution to be quantized.
low: Lowest possible quantized value, such that samples are `y >=
ceil(low)`. Its shape must broadcast with the shape of samples from
`distribution` and must not result in additional batch dimensions after
broadcasting.
high: Highest possible quantized value, such that samples are `y <=
floor(high)`. Its shape must broadcast with the shape of samples from
`distribution` and must not result in additional batch dimensions after
broadcasting.
eps: An optional gap to enforce between "big" and "small". Useful for
avoiding NANs in computing log_probs, when "big" and "small"
are too close.
"""
self._dist: base_distribution.Distribution[Array, Tuple[
int, ...], jnp.dtype] = conversion.as_distribution(distribution)
self._eps = eps
if self._dist.event_shape:
raise ValueError(f'The base distribution must be univariate, but its '
f'`event_shape` is {self._dist.event_shape}.')
dtype = self._dist.dtype
if low is None:
self._low = None
else:
self._low = jnp.asarray(jnp.ceil(low), dtype=dtype)
if len(self._low.shape) > len(self._dist.batch_shape):
raise ValueError('The parameter `low` must not result in additional '
'batch dimensions.')
if high is None:
self._high = None
else:
self._high = jnp.asarray(jnp.floor(high), dtype=dtype)
if len(self._high.shape) > len(self._dist.batch_shape):
raise ValueError('The parameter `high` must not result in additional '
'batch dimensions.')
super().__init__()
@property
def distribution(
self
) -> base_distribution.Distribution[Array, Tuple[int, ...], jnp.dtype]:
"""Base distribution `p(x)`."""
return self._dist
@property
def low(self) -> Optional[Array]:
"""Lowest value that quantization returns."""
if self._low is None:
return None
return jnp.broadcast_to(self._low, self.batch_shape + self.event_shape)
@property
def high(self) -> Optional[Array]:
"""Highest value that quantization returns."""
if self._high is None:
return None
return jnp.broadcast_to(self._high, self.batch_shape + self.event_shape)
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of event of distribution samples."""
event_shape = self.distribution.event_shape
# TODO(b/149413467): Remove explicit casting when resolved.
return cast(Tuple[int, ...], event_shape)
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return self.distribution.batch_shape
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
samples = self.distribution.sample(seed=key, sample_shape=n)
samples = jnp.ceil(samples)
# Apply overflow and underflow conditions.
if self.low is not None:
samples = jnp.where(samples < self.low, self.low, samples)
if self.high is not None:
samples = jnp.where(samples > self.high, self.high, samples)
return samples
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
samples = self._sample_n(key, n)
log_cdf = self.distribution.log_cdf(samples)
log_cdf_m1 = self.distribution.log_cdf(samples - 1.)
log_sf = self.distribution.log_survival_function(samples)
log_sf_m1 = self.distribution.log_survival_function(samples - 1.)
if self.high is not None:
# `samples - 1.` is definitely lower than `high`.
log_cdf = jnp.where(samples < self.high, log_cdf, 0.)
log_sf = jnp.where(samples < self.high, log_sf, -jnp.inf)
if self.low is not None:
# `samples` is definitely greater than or equal to `low`.
log_cdf_m1 = jnp.where(samples - 1. < self.low, -jnp.inf, log_cdf_m1)
log_sf_m1 = jnp.where(samples - 1. < self.low, 0., log_sf_m1)
# Use the survival function instead of the CDF when its value is smaller,
# which happens to the right of the median of the distribution.
big = jnp.where(log_sf < log_cdf, log_sf_m1, log_cdf)
small = jnp.where(log_sf < log_cdf, log_sf, log_cdf_m1)
log_probs = math.log_expbig_minus_expsmall(big, small)
return samples, log_probs
def log_prob(self, value: EventT) -> Array:
"""Calculates the log probability of an event.
This implementation differs slightly from the one in TFP, as it returns
`-jnp.inf` on non-integer values instead of returning the log prob of the
floor of the input. In addition, this implementation also returns `-jnp.inf`
on inputs that are outside the support of the distribution (as opposed to
`nan`, like TFP does). On other integer values, both implementations are
identical.
Similar to TFP, the log prob is computed using either the CDF or the
survival function to improve numerical stability. With infinite precision
the two computations would be equal.
Args:
value: An event.
Returns:
The log probability log P(value).
"""
log_cdf = self.log_cdf(value)
log_cdf_m1 = self.log_cdf(value - 1.)
log_sf = self.log_survival_function(value)
log_sf_m1 = self.log_survival_function(value - 1.)
# Use the survival function instead of the CDF when its value is smaller,
# which happens to the right of the median of the distribution.
big = jnp.where(log_sf < log_cdf, log_sf_m1, log_cdf)
small = jnp.where(log_sf < log_cdf, log_sf, log_cdf_m1)
if self._eps is not None:
# use stop_gradient to block updating in this case
big = jnp.where(big - small > self._eps, big,
jax.lax.stop_gradient(small) + self._eps)
log_probs = math.log_expbig_minus_expsmall(big, small)
# Return -inf when evaluating on non-integer value.
is_integer = jnp.where(value > jnp.floor(value), False, True)
log_probs = jnp.where(is_integer, log_probs, -jnp.inf)
# Return -inf and not NaN when outside of [low, high].
# If the CDF is used, `value > high` is already treated correctly;
# to fix the return value for `value < low` we test whether `log_cdf` is
# finite; `log_sf_m1` will be `0.` in this regime.
# If the survival function is used the reverse case applies; to fix the
# case `value > high` we test whether `log_sf_m1` is finite; `log_cdf` will
# be `0.` in this regime.
is_outside = jnp.logical_or(jnp.isinf(log_cdf), jnp.isinf(log_sf_m1))
log_probs = jnp.where(is_outside, -jnp.inf, log_probs)
return log_probs
def prob(self, value: EventT) -> Array:
"""Calculates the probability of an event.
This implementation differs slightly from the one in TFP, as it returns 0
on non-integer values instead of returning the prob of the floor of the
input. It is identical for integer values.
Similar to TFP, the probability is computed using either the CDF or the
survival function to improve numerical stability. With infinite precision
the two computations would be equal.
Args:
value: An event.
Returns:
The probability P(value).
"""
cdf = self.cdf(value)
cdf_m1 = self.cdf(value - 1.)
sf = self.survival_function(value)
sf_m1 = self.survival_function(value - 1.)
# Use the survival function instead of the CDF when its value is smaller,
# which happens to the right of the median of the distribution.
probs = jnp.where(sf < cdf, sf_m1 - sf, cdf - cdf_m1)
# Return 0. when evaluating on non-integer value.
is_integer = jnp.where(value > jnp.floor(value), False, True)
probs = jnp.where(is_integer, probs, 0.)
return probs
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
# The log CDF of a quantized distribution is piecewise constant on half-open
# intervals:
# ... [n-2 n-1) [n-1 n) [n n+1) [n+1 n+2) ...
# with log CDF(n) <= log CDF(n+1), because the distribution only has mass on
# integer values. Therefore: P[Y <= value] = P[Y <= floor(value)].
y = jnp.floor(value)
result = self.distribution.log_cdf(y)
# Update result outside of the interval [low, high].
if self.low is not None:
result = jnp.where(y < self.low, -jnp.inf, result)
if self.high is not None:
result = jnp.where(y < self.high, result, 0.)
return result
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
# The CDF of a quantized distribution is piecewise constant on half-open
# intervals:
# ... [n-2 n-1) [n-1 n) [n n+1) [n+1 n+2) ...
# with CDF(n) <= CDF(n+1), because the distribution only has mass on integer
# values. Therefore: P[Y <= value] = P[Y <= floor(value)].
y = jnp.floor(value)
result = self.distribution.cdf(y)
# Update result outside of the interval [low, high].
if self.low is not None:
result = jnp.where(y < self.low, 0., result)
if self.high is not None:
result = jnp.where(y < self.high, result, 1.)
return result
def log_survival_function(self, value: EventT) -> Array:
"""Calculates the log of the survival function of an event.
This implementation differs slightly from TFP, in that it returns the
correct log of the survival function for non-integer values, that is, it
always equates to `log(1 - CDF(value))`. It is identical for integer values.
Args:
value: An event.
Returns:
The log of the survival function `log P[Y > value]`.
"""
# The log of the survival function of a quantized distribution is piecewise
# constant on half-open intervals:
# ... [n-2 n-1) [n-1 n) [n n+1) [n+1 n+2) ...
# with log sf(n) >= log sf(n+1), because the distribution only has mass on
# integer values. Therefore: log P[Y > value] = log P[Y > floor(value)].
y = jnp.floor(value)
result = self.distribution.log_survival_function(y)
# Update result outside of the interval [low, high].
if self._low is not None:
result = jnp.where(y < self._low, 0., result)
if self._high is not None:
result = jnp.where(y < self._high, result, -jnp.inf)
return result
def survival_function(self, value: EventT) -> Array:
"""Calculates the survival function of an event.
This implementation differs slightly from TFP, in that it returns the
correct survival function for non-integer values, that is, it always
equates to `1 - CDF(value)`. It is identical for integer values.
Args:
value: An event.
Returns:
The survival function `P[Y > value]`.
"""
# The survival function of a quantized distribution is piecewise
# constant on half-open intervals:
# ... [n-2 n-1) [n-1 n) [n n+1) [n+1 n+2) ...
# with sf(n) >= sf(n+1), because the distribution only has mass on
# integer values. Therefore: P[Y > value] = P[Y > floor(value)].
y = jnp.floor(value)
result = self.distribution.survival_function(y)
# Update result outside of the interval [low, high].
if self._low is not None:
result = jnp.where(y < self._low, 1., result)
if self._high is not None:
result = jnp.where(y < self._high, result, 0.)
return result
def __getitem__(self, index) -> 'Quantized':
"""See `Distribution.__getitem__`."""
index = base_distribution.to_batch_shape_index(self.batch_shape, index)
low = None if self._low is None else self.low[index]
high = None if self._high is None else self.high[index]
return Quantized(distribution=self.distribution[index], low=low, high=high)
| distrax-master | distrax/_src/distributions/quantized.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `straight_through.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import one_hot_categorical
from distrax._src.distributions import straight_through
from distrax._src.utils import equivalence
from distrax._src.utils import math
import jax
import jax.numpy as jnp
import numpy as np
class StraightThroughTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(
straight_through.straight_through_wrapper(
one_hot_categorical.OneHotCategorical))
@chex.all_variants
@parameterized.named_parameters(
('1d logits, no shape', {'logits': [0.0, 1.0, -0.5]}, ()),
('1d probs, no shape', {'probs': [0.2, 0.5, 0.3]}, ()),
('1d logits, int shape', {'logits': [0.0, 1.0, -0.5]}, 1),
('1d probs, int shape', {'probs': [0.2, 0.5, 0.3]}, 1),
('1d logits, 1-tuple shape', {'logits': [0.0, 1.0, -0.5]}, (1,)),
('1d probs, 1-tuple shape', {'probs': [0.2, 0.5, 0.3]}, (1,)),
('1d logits, 2-tuple shape', {'logits': [0.0, 50., -0.5]}, (5, 4)),
('1d probs, 2-tuple shape', {'probs': [0.01, 0.99, 0.]}, (5, 4)),
('2d logits, no shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 0.0]]}, ()),
('2d probs, no shape', {'probs': [[0.1, 0.4, 0.5],
[0.5, 0.25, 0.25]]}, ()),
('2d logits, int shape', {'logits': [[0.0, 50.0, -0.5],
[-0.1, -0.3, 0.2]]}, 4),
('2d probs, int shape', {'probs': [[0.005, 0.005, 0.99],
[0.99, 0., 0.01]]}, 4),
('2d logits, 1-tuple shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 200.0]]}, (5,)),
('2d probs, 1-tuple shape', {'probs': [[0., 0.01, 0.99],
[0., 0.99, 0.01]]}, (5,)),
('2d logits, 2-tuple shape', {'logits': [[0.0, 1.0, -0.5],
[-0.1, 0.3, 1000.0]]}, (5, 4)),
('2d probs, 2-tuple shape', {'probs': [[0.01, 0.99, 0.],
[0.99, 0., 0.01]]}, (5, 4)),
)
def test_sample(self, dist_params, sample_shape):
def loss(dist_params, dist_cls, sample_shape):
"""Loss on sample, used both for distrax and TFP."""
# Sample.
dist = dist_cls(**dist_params)
sample_fn = dist.sample
def sample_fn_wrapper(seed, sample_shape):
"""To test with pmap that requires positional arguments."""
return sample_fn(seed=seed, sample_shape=sample_shape)
if hasattr(self, 'variant'):
sample_fn_wrapper = self.variant(static_argnums=(1,))(sample_fn_wrapper)
sample = sample_fn_wrapper(self.key, sample_shape)
return jnp.sum((sample)**2).astype(jnp.float32), sample
# TFP softmax gradient.
def straight_through_tfp_loss(dist_params, dist_cls, sample_shape):
"""Loss on a straight-through gradient of the tfp sample."""
# Distrax normalises the distribution parameters. We want to make sure
# that they are normalised for tfp too, or the gradient might differ.
try:
dist_params['logits'] = math.normalize(logits=dist_params['logits'])
except KeyError:
dist_params['probs'] = math.normalize(probs=dist_params['probs'])
# Sample.
dist = dist_cls(**dist_params)
sample_fn = dist.sample
def sample_fn_wrapper(seed, sample_shape):
"""To test with pmap that requires positional arguments."""
return sample_fn(seed=seed, sample_shape=sample_shape)
if hasattr(self, 'variant'):
sample_fn_wrapper = self.variant(static_argnums=(1,))(sample_fn_wrapper)
sample = sample_fn_wrapper(self.key, sample_shape)
# Straight-through gradient.
def _pad(probs, shape):
if isinstance(shape, int):
return probs
while len(probs.shape) < len(shape):
probs = probs[None]
return probs
probs = dist.probs_parameter()
padded_probs = _pad(probs, sample_shape)
sample += padded_probs - jax.lax.stop_gradient(padded_probs)
return jnp.sum((sample)**2).astype(jnp.float32), sample
# Straight-through gradient and sample.
sample_grad, sample = jax.grad(loss, has_aux=True)(dist_params,
self.distrax_cls,
sample_shape)
# TFP gradient (zero) and sample.
tfp_sample_grad, tfp_sample = jax.grad(loss, has_aux=True)(dist_params,
self.tfp_cls,
sample_shape)
# TFP straight-through gradient and sample.
tfp_st_sample_grad, tfp_st_sample = jax.grad(straight_through_tfp_loss,
has_aux=True)(dist_params,
self.tfp_cls,
sample_shape)
# TEST: the samples have the same size, and the straight-through gradient
# doesn't affect the tfp sample.
chex.assert_equal_shape((sample, tfp_sample))
self.assertion_fn(rtol=2e-3)(tfp_sample, tfp_st_sample)
# TEST: the TFP gradient is zero.
assert (jnp.asarray(*tfp_sample_grad.values()) == 0).all()
# TEST: the TFP straight-through gradient is non zero.
assert (jnp.asarray(*tfp_st_sample_grad.values()) != 0).any()
# Test that the TFP straight-through gradient is equal to the one from
# distrax when the samples from distrax and tfp are the same (due to
# stochasticity the samples can differ - we are using skewed distributions
# on purpose in the parametrization of the test to make sure that the
# samples match most of the time).
sample_grad_v = jnp.stack(jnp.array(*sample_grad.values()))
tfp_st_sample_grad_v = jnp.stack(jnp.array(*tfp_st_sample_grad.values()))
if np.all(sample == tfp_st_sample):
self.assertion_fn(rtol=2e-3)(sample_grad_v, tfp_st_sample_grad_v)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/straight_through_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `joint.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions.categorical import Categorical
from distrax._src.distributions.joint import Joint
from distrax._src.distributions.mvn_diag import MultivariateNormalDiag
from distrax._src.distributions.normal import Normal
import jax
import jax.numpy as jnp
import numpy as np
import tree
def _make_nested_distributions_and_inputs(batch_shape=None, shift=0.0):
distributions = dict(
categoricals=[
Categorical(np.array([0 + shift, 1, 2])),
Categorical(np.array([2 + shift, 0, 1])),
],
normals=(
Normal(np.array(1) + shift, np.array(2) + shift),
Normal(np.array(2) + shift, np.array(1) + shift),
Normal(np.array(5) + shift, np.array(4) + shift),
),
multivariate=MultivariateNormalDiag(np.array([5, 6, 7]) + shift,
np.array([3, 3, 3]) + shift))
inputs = dict(
categoricals=[
np.array(0),
np.array(1),
],
normals=(
np.array(2.5),
np.array(3.1),
np.array(4.2),
),
multivariate=np.array([5.1, 5.2, 5.3]))
if isinstance(batch_shape, tuple):
def _add_batch_shape_to_tensor(x):
for dim in reversed(batch_shape):
x = jnp.repeat(x[None, ...], dim, axis=0)
return x
def _add_batch_shape_to_distribution(d):
if isinstance(d, Categorical):
return Categorical(_add_batch_shape_to_tensor(d.logits_parameter()))
elif isinstance(d, Normal):
return Normal(_add_batch_shape_to_tensor(d.loc),
_add_batch_shape_to_tensor(d.scale))
elif isinstance(d, MultivariateNormalDiag):
return MultivariateNormalDiag(_add_batch_shape_to_tensor(d.loc),
_add_batch_shape_to_tensor(d.scale_diag))
distributions = tree.map_structure(_add_batch_shape_to_distribution,
distributions)
inputs = tree.map_structure(_add_batch_shape_to_tensor, inputs)
return distributions, inputs
class JointTest(parameterized.TestCase):
@chex.all_variants
@parameterized.named_parameters(
('categorical', Categorical, (np.array([0, 1, 2]),), np.array(0)),
('normal', Normal, (np.array([0, 1, 2]), np.array([1, 2, 3])),
np.array([0, 0, 0])),
('mvn', MultivariateNormalDiag,
(np.array([[0, 1, 2], [3, 4, 5]]), np.array([[1, 2, 3], [4, 5, 6]])),
np.array([[0, 0, 0], [0, 0, 0]])),
)
def test_single_distribution(self, fn, params, x):
dist = fn(*params)
joint = Joint(dist)
key = jax.random.PRNGKey(0)
subkey, = jax.random.split(key, 1)
with self.subTest('sample'):
actual = self.variant(joint.sample)(seed=key)
expected = dist.sample(seed=subkey)
np.testing.assert_allclose(actual, expected, rtol=1e-6)
with self.subTest('log_prob'):
actual = self.variant(joint.log_prob)(x)
expected = dist.log_prob(x)
np.testing.assert_allclose(actual, expected, rtol=3e-5)
with self.subTest('sample_and_log_prob'):
actual_sample, actual_log_prob = self.variant(joint.sample_and_log_prob)(
seed=key)
expected_sample, expected_log_prob = dist.sample_and_log_prob(seed=subkey)
np.testing.assert_allclose(actual_sample, expected_sample, rtol=3e-5)
np.testing.assert_allclose(actual_log_prob, expected_log_prob, rtol=3e-5)
@chex.all_variants
def test_distribution_tuple(self):
distributions = (
Categorical(np.array([0, 1, 2])),
MultivariateNormalDiag(np.array([1, 2, 3]), np.array([2, 3, 4])))
inputs = (np.array(0), np.array([0.1, 0.2, 0.3]))
joint = Joint(distributions)
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 2)
with self.subTest('sample'):
actuals = self.variant(joint.sample)(seed=key)
assert isinstance(actuals, tuple)
for actual, dist, subkey in zip(actuals, distributions, subkeys):
expected = dist.sample(seed=subkey)
np.testing.assert_allclose(actual, expected, rtol=1e-6)
with self.subTest('log_prob'):
actual = self.variant(joint.log_prob)(inputs)
log_probs = [dist.log_prob(x) for dist, x in zip(distributions, inputs)]
expected = sum(log_probs)
np.testing.assert_array_equal(actual, expected)
with self.subTest('sample_and_log_prob'):
actual_sample, actual_log_prob = self.variant(joint.sample_and_log_prob)(
seed=key)
assert isinstance(actual_sample, tuple)
samples = []
log_probs = []
for dist, subkey in zip(distributions, subkeys):
sample, log_prob = dist.sample_and_log_prob(seed=subkey)
samples.append(sample)
log_probs.append(log_prob)
expected_sample = tuple(samples)
expected_log_prob = sum(log_probs)
for actual, expected in zip(actual_sample, expected_sample):
np.testing.assert_allclose(actual, expected, rtol=1e-6)
np.testing.assert_array_equal(actual_log_prob, expected_log_prob)
@chex.all_variants
def test_distribution_list(self):
distributions = [
Categorical(np.array([0, 1, 2])),
MultivariateNormalDiag(np.array([1, 2, 3]), np.array([2, 3, 4]))]
inputs = [np.array(0), np.array([0.1, 0.2, 0.3])]
joint = Joint(distributions)
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 2)
with self.subTest('sample'):
actuals = self.variant(joint.sample)(seed=key)
assert isinstance(actuals, list)
for actual, dist, subkey in zip(actuals, distributions, subkeys):
expected = dist.sample(seed=subkey)
np.testing.assert_allclose(actual, expected, rtol=1e-6)
with self.subTest('log_prob'):
actual = self.variant(joint.log_prob)(inputs)
log_probs = [dist.log_prob(x) for dist, x in zip(distributions, inputs)]
expected = sum(log_probs)
np.testing.assert_array_equal(actual, expected)
with self.subTest('sample_and_log_prob'):
actual_sample, actual_log_prob = self.variant(joint.sample_and_log_prob)(
seed=key)
assert isinstance(actual_sample, list)
expected_sample = []
log_probs = []
for dist, subkey in zip(distributions, subkeys):
sample, log_prob = dist.sample_and_log_prob(seed=subkey)
expected_sample.append(sample)
log_probs.append(log_prob)
expected_log_prob = sum(log_probs)
for actual, expected in zip(actual_sample, expected_sample):
np.testing.assert_allclose(actual, expected, rtol=1e-6)
np.testing.assert_array_equal(actual_log_prob, expected_log_prob)
@chex.all_variants
def test_distributions_with_batch_shape(self):
distributions = [
Categorical(np.array([[0, 1, 2], [3, 4, 5]])),
MultivariateNormalDiag(
np.array([[0, 1, 2, 3, 4], [2, 3, 4, 5, 6]]),
np.array([[1, 2, 3, 5, 6], [2, 3, 4, 5, 6]]))]
inputs = [np.array([0, 1]), np.zeros((2, 5))]
joint = Joint(distributions)
assert joint.batch_shape == distributions[0].batch_shape
assert joint.batch_shape == distributions[1].batch_shape
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 2)
with self.subTest('sample'):
actuals = self.variant(joint.sample)(seed=key)
assert isinstance(actuals, list)
assert actuals[0].shape == (2,)
assert actuals[1].shape == (2, 5)
for actual, dist, subkey in zip(actuals, distributions, subkeys):
expected = dist.sample(seed=subkey)
np.testing.assert_allclose(actual, expected, rtol=1e-6)
with self.subTest('log_prob'):
actual = self.variant(joint.log_prob)(inputs)
assert actual.shape == (2,)
log_probs = [dist.log_prob(x) for dist, x in zip(distributions, inputs)]
expected = sum(log_probs)
np.testing.assert_allclose(actual, expected, rtol=1e-6)
with self.subTest('sample_and_log_prob'):
actual_sample, actual_log_prob = self.variant(joint.sample_and_log_prob)(
seed=key)
assert isinstance(actual_sample, list)
assert actual_sample[0].shape == (2,)
assert actual_sample[1].shape == (2, 5)
assert actual_log_prob.shape == (2,)
expected_sample = []
log_probs = []
for dist, subkey in zip(distributions, subkeys):
sample, log_prob = dist.sample_and_log_prob(seed=subkey)
expected_sample.append(sample)
log_probs.append(log_prob)
expected_log_prob = sum(log_probs)
for actual, expected in zip(actual_sample, expected_sample):
np.testing.assert_allclose(actual, expected, rtol=1e-6)
np.testing.assert_allclose(actual_log_prob, expected_log_prob, rtol=1e-6)
@chex.all_variants
def test_nested_distributions(self):
distributions, inputs = _make_nested_distributions_and_inputs()
joint = Joint(distributions)
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 6)
with self.subTest('sample'):
actuals = self.variant(joint.sample)(seed=key)
assert isinstance(actuals, dict)
assert isinstance(actuals['categoricals'], list)
assert isinstance(actuals['normals'], tuple)
assert isinstance(actuals['multivariate'], jnp.ndarray)
flat_actuals = tree.flatten(actuals)
flat_dists = tree.flatten(distributions)
for actual, dist, subkey in zip(flat_actuals, flat_dists, subkeys):
expected = dist.sample(seed=subkey)
np.testing.assert_allclose(actual, expected, rtol=1e-6)
with self.subTest('log_prob'):
actual = self.variant(joint.log_prob)(inputs)
flat_dists = tree.flatten(distributions)
flat_inputs = tree.flatten(inputs)
log_probs = [dist.log_prob(x) for dist, x in zip(flat_dists, flat_inputs)]
expected = sum(log_probs)
np.testing.assert_array_equal(actual, expected)
with self.subTest('sample_and_log_prob'):
actual_sample, actual_log_prob = self.variant(joint.sample_and_log_prob)(
seed=key)
assert isinstance(actual_sample, dict)
assert isinstance(actual_sample['categoricals'], list)
assert isinstance(actual_sample['normals'], tuple)
assert isinstance(actual_sample['multivariate'], jnp.ndarray)
expected_sample = []
log_probs = []
flat_dists = tree.flatten(distributions)
for dist, subkey in zip(flat_dists, subkeys):
sample, log_prob = dist.sample_and_log_prob(seed=subkey)
expected_sample.append(sample)
log_probs.append(log_prob)
expected_log_prob = sum(log_probs)
flat_actuals = tree.flatten(actual_sample)
for actual, expected in zip(flat_actuals, expected_sample):
np.testing.assert_allclose(actual, expected, rtol=1e-6)
np.testing.assert_allclose(actual_log_prob, expected_log_prob, rtol=1e-6)
@chex.all_variants(with_pmap=False)
def test_entropy(self):
distributions, _ = _make_nested_distributions_and_inputs()
joint = Joint(distributions)
actual = self.variant(joint.entropy)()
flat_dists = tree.flatten(distributions)
expected = sum(dist.entropy() for dist in flat_dists)
np.testing.assert_allclose(actual, expected, rtol=1e-6)
@chex.all_variants(with_pmap=False)
def test_mode(self):
distributions, _ = _make_nested_distributions_and_inputs()
joint = Joint(distributions)
actual = self.variant(joint.mode)()
expected = tree.map_structure(lambda d: d.mode(), distributions)
for actual, expected in zip(tree.flatten(actual), tree.flatten(expected)):
np.testing.assert_array_equal(actual, expected)
@chex.all_variants(with_pmap=False)
def test_mean(self):
distributions, _ = _make_nested_distributions_and_inputs()
del distributions['categoricals'] # Mean is not defined for these.
joint = Joint(distributions)
actual = self.variant(joint.mean)()
expected = tree.map_structure(lambda d: d.mean(), distributions)
for actual, expected in zip(tree.flatten(actual), tree.flatten(expected)):
np.testing.assert_array_equal(actual, expected)
@chex.all_variants(with_pmap=False)
def test_median(self):
distributions, _ = _make_nested_distributions_and_inputs()
del distributions['categoricals'] # Median is not defined for these.
joint = Joint(distributions)
actual = self.variant(joint.median)()
expected = tree.map_structure(lambda d: d.median(), distributions)
for actual, expected in zip(tree.flatten(actual), tree.flatten(expected)):
np.testing.assert_array_equal(actual, expected)
@chex.all_variants
def test_kl_divergence(self):
dists_a, _ = _make_nested_distributions_and_inputs(shift=0.0)
dists_b, _ = _make_nested_distributions_and_inputs(shift=1.0)
joint_a = Joint(dists_a)
joint_b = Joint(dists_b)
actual = self.variant(joint_a.kl_divergence)(joint_b)
kls = []
for dist_a, dist_b in zip(tree.flatten(dists_a), tree.flatten(dists_b)):
kls.append(dist_a.kl_divergence(dist_b))
expected = sum(kls)
np.testing.assert_allclose(actual, expected, rtol=1e-4)
@chex.all_variants
def test_log_cdf(self):
distributions, inputs = _make_nested_distributions_and_inputs()
joint = Joint(distributions)
actual = self.variant(joint.log_cdf)(inputs)
flat_dists = tree.flatten(distributions)
flat_inputs = tree.flatten(inputs)
expected = sum(dist.log_cdf(x) for dist, x in zip(flat_dists, flat_inputs))
np.testing.assert_allclose(actual, expected, rtol=1e-6)
def test_distributions_property(self):
distributions, _ = _make_nested_distributions_and_inputs()
joint = Joint(distributions)
tree.assert_same_structure(joint.distributions, distributions)
def test_event_shape_property(self):
distributions, _ = _make_nested_distributions_and_inputs()
joint = Joint(distributions)
all_event_shapes = joint.event_shape
for dist, event_shape in zip(tree.flatten(distributions),
tree.flatten_up_to(distributions,
all_event_shapes)):
np.testing.assert_equal(dist.event_shape, event_shape)
def test_dtype_property(self):
distributions, _ = _make_nested_distributions_and_inputs()
joint = Joint(distributions)
all_dtypes = joint.dtype
for dist, dtype in zip(tree.flatten(distributions),
tree.flatten(all_dtypes)):
np.testing.assert_equal(dist.dtype, dtype)
@chex.all_variants
@parameterized.named_parameters(
('1d batch first element', (10,), 0),
('1d batch last element', (10,), -1),
('1d batch first two elements', (10,), slice(0, 2)),
('1d batch first and third elements', (10,), slice(0, 4, 2)),
('2d batch first element', (10, 7), 0),
('2d batch last element', (10, 7), -1),
('2d batch first two elements', (10, 7), slice(0, 2)),
('2d batch first and third elements', (10, 7), slice(0, 4, 2)),
)
def test_indexing(self, batch_shape, index):
distributions, inputs = _make_nested_distributions_and_inputs(
batch_shape=batch_shape)
inputs = tree.map_structure(lambda x: x[index], inputs)
joint = Joint(distributions)
joint_indexed = joint[index]
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 6)
with self.subTest('batch shape'):
for dist, indexed in zip(tree.flatten(distributions),
tree.flatten(joint_indexed.distributions)):
assert dist[index].batch_shape == indexed.batch_shape
with self.subTest('event shape'):
for dist, indexed in zip(tree.flatten(distributions),
tree.flatten(joint_indexed.distributions)):
assert dist[index].event_shape == indexed.event_shape
with self.subTest('sample'):
all_samples = self.variant(joint_indexed.sample)(seed=key)
for dist, subkey, actual in zip(tree.flatten(distributions),
subkeys,
tree.flatten(all_samples)):
expected = dist[index].sample(seed=subkey)
np.testing.assert_allclose(actual, expected, rtol=1e-4)
with self.subTest('sample_and_log_prob'):
actual_samples, actual_log_probs = self.variant(
joint_indexed.sample_and_log_prob)(seed=key)
expected_outputs = [
dist[index].sample_and_log_prob(seed=subkey)
for dist, subkey in zip(tree.flatten(distributions), subkeys)]
expected_samples = [sample for sample, _ in expected_outputs]
expected_log_probs = sum(lp for _, lp in expected_outputs)
for actual, expected in zip(tree.flatten(actual_samples),
expected_samples):
np.testing.assert_allclose(actual, expected, rtol=1e-4)
np.testing.assert_allclose(
actual_log_probs, expected_log_probs, rtol=1e-6)
with self.subTest('log_prob'):
actual = self.variant(joint_indexed.log_prob)(inputs)
expected = sum(dist[index].log_prob(x) for dist, x in zip(
tree.flatten(distributions), tree.flatten(inputs)))
np.testing.assert_allclose(actual, expected, rtol=1e-6)
def test_raise_on_mismatched_batch_shape(self):
distributions = dict(
unbatched=Categorical(np.zeros((3,))),
batched=Normal(np.zeros((3, 4, 5)), np.ones((3, 4, 5))))
with self.assertRaises(ValueError):
Joint(distributions)
@chex.all_variants
def test_raise_on_incompatible_distributions_kl(self):
distributions, _ = _make_nested_distributions_and_inputs()
incompatible = dict(
categoricals=distributions['normals'],
normals=distributions['categoricals'],
multivariate=distributions['multivariate'])
joint_a = Joint(distributions)
joint_b = Joint(incompatible)
with self.assertRaises(ValueError):
self.variant(joint_a.kl_divergence)(joint_b)
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/joint_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deterministic distribution."""
from typing import Optional, Tuple, Union
import chex
from distrax._src.distributions import distribution
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
Array = chex.Array
Numeric = chex.Numeric
PRNGKey = chex.PRNGKey
EventT = distribution.EventT
class Deterministic(distribution.Distribution):
"""Scalar Deterministic distribution on the real line."""
equiv_tfp_cls = tfd.Deterministic
def __init__(self,
loc: Numeric,
atol: Optional[Numeric] = None,
rtol: Optional[Numeric] = None):
"""Initializes a Deterministic distribution.
Args:
loc: Batch of points on which the distribution is supported.
atol: Absolute tolerance for comparing closeness to `loc`. It must be
broadcastable with `loc`, and it must not lead to additional batch
dimensions after broadcasting.
rtol: Relative tolerance for comparing closeness to `loc`. It must be
broadcastable with `loc`, and it must not lead to additional batch
dimensions after broadcasting.
"""
super().__init__()
self._loc = jnp.asarray(loc)
self._atol = jnp.asarray(0. if atol is None else atol)
self._rtol = jnp.asarray(0. if rtol is None else rtol)
if len(self._rtol.shape) > len(self._loc.shape):
raise ValueError(f'The parameter `rtol` cannot have more dimensions than '
f'`loc`, but their shapes are {self._rtol.shape} and '
f'{self._loc.shape}, respectively.')
if len(self._atol.shape) > len(self._loc.shape):
raise ValueError(f'The parameter `atol` cannot have more dimensions than '
f'`loc`, but their shapes are {self._atol.shape} and '
f'{self._loc.shape}, respectively.')
@property
def event_shape(self) -> Tuple[int, ...]:
"""Shape of the events."""
return ()
@property
def batch_shape(self) -> Tuple[int, ...]:
"""Shape of batch of distribution samples."""
return self._loc.shape
@property
def loc(self) -> Array:
"""Point(s) on which this distribution is supported."""
return self._loc
@property
def atol(self) -> Array:
"""Absolute tolerance for comparing closeness to `loc`."""
return jnp.broadcast_to(self._atol, self.batch_shape)
@property
def rtol(self) -> Array:
"""Relative tolerance for comparing closeness to `loc`."""
return jnp.broadcast_to(self._rtol, self.batch_shape)
@property
def slack(self) -> Array:
return jnp.where(
self.rtol == 0,
self.atol,
self.atol + self.rtol * jnp.abs(self.loc))
def _sample_n(self, key: PRNGKey, n: int) -> Array:
"""See `Distribution._sample_n`."""
del key # unused
loc = jnp.expand_dims(self.loc, axis=0)
return jnp.repeat(loc, n, axis=0)
def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:
"""See `Distribution._sample_n_and_log_prob`."""
samples = self._sample_n(key, n)
log_prob = jnp.zeros_like(samples)
return samples, log_prob
def log_prob(self, value: EventT) -> Array:
"""See `Distribution.log_prob`."""
return jnp.log(self.prob(value))
def prob(self, value: EventT) -> Array:
"""See `Distribution.prob`."""
return jnp.where(
jnp.abs(value - self.loc) <= self.slack, 1., 0.)
def entropy(self) -> Array:
"""Calculates the Shannon entropy (in nats)."""
return jnp.zeros(self.batch_shape, jnp.float_)
def mean(self) -> Array:
"""Calculates the mean."""
return self.loc
def mode(self) -> Array:
"""Calculates the mode."""
return self.mean()
def variance(self) -> Array:
"""Calculates the variance."""
return jnp.zeros(self.batch_shape, jnp.float_)
def stddev(self) -> Array:
"""Calculates the standard deviation."""
return self.variance()
def log_cdf(self, value: EventT) -> Array:
"""See `Distribution.log_cdf`."""
return jnp.log(self.cdf(value))
def cdf(self, value: EventT) -> Array:
"""See `Distribution.cdf`."""
return jnp.where(value >= self.loc - self.slack, 1., 0.)
def __getitem__(self, index) -> 'Deterministic':
"""See `Distribution.__getitem__`."""
index = distribution.to_batch_shape_index(self.batch_shape, index)
return Deterministic(
loc=self.loc[index], atol=self.atol[index], rtol=self.rtol[index])
def _kl_divergence_deterministic_deterministic(
dist1: Union[Deterministic, tfd.Deterministic],
dist2: Union[Deterministic, tfd.Deterministic],
*unused_args, **unused_kwargs,
) -> Array:
"""KL divergence `KL(dist1 || dist2)` between two Deterministic distributions.
Note that the KL divergence is infinite if the support of `dist1` is not a
subset of the support of `dist2`.
Args:
dist1: A Deterministic distribution.
dist2: A Deterministic distribution.
Returns:
Batchwise `KL(dist1 || dist2)`.
"""
slack2 = dist2.atol + dist2.rtol * jnp.abs(dist2.loc)
return - jnp.log(jnp.where(jnp.abs(dist1.loc - dist2.loc) <= slack2, 1., 0.))
# Register the KL functions with TFP.
tfd.RegisterKL(Deterministic, Deterministic)(
_kl_divergence_deterministic_deterministic)
tfd.RegisterKL(Deterministic, Deterministic.equiv_tfp_cls)(
_kl_divergence_deterministic_deterministic)
tfd.RegisterKL(Deterministic.equiv_tfp_cls, Deterministic)(
_kl_divergence_deterministic_deterministic)
| distrax-master | distrax/_src/distributions/deterministic.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `normal.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import normal
from distrax._src.utils import equivalence
import jax
import jax.numpy as jnp
import numpy as np
class NormalTest(equivalence.EquivalenceTest):
def setUp(self):
super().setUp()
self._init_distr_cls(normal.Normal)
@parameterized.named_parameters(
('1d std normal', (0, 1)),
('2d std normal', (np.zeros(2), np.ones(2))),
('rank 2 std normal', (np.zeros((3, 2)), np.ones((3, 2)))),
('broadcasted loc', (0, np.ones(3))),
('broadcasted scale', (np.ones(3), 1)),
)
def test_event_shape(self, distr_params):
super()._test_event_shape(distr_params, dict())
@chex.all_variants
@parameterized.named_parameters(
('1d std normal, no shape', (0, 1), ()),
('1d std normal, int shape', (0, 1), 1),
('1d std normal, 1-tuple shape', (0, 1), (1,)),
('1d std normal, 2-tuple shape', (0, 1), (2, 2)),
('2d std normal, no shape', (np.zeros(2), np.ones(2)), ()),
('2d std normal, int shape', ([0, 0], [1, 1]), 1),
('2d std normal, 1-tuple shape', (np.zeros(2), np.ones(2)), (1,)),
('2d std normal, 2-tuple shape', ([0, 0], [1, 1]), (2, 2)),
('rank 2 std normal, 2-tuple shape', (np.zeros((3, 2)), np.ones(
(3, 2))), (2, 2)),
('broadcasted loc', (0, np.ones(3)), (2, 2)),
('broadcasted scale', (np.ones(3), 1), ()),
)
def test_sample_shape(self, distr_params, sample_shape):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_sample_shape(distr_params, dict(), sample_shape)
@chex.all_variants
@jax.numpy_rank_promotion('raise')
@parameterized.named_parameters(
('1d std normal, no shape', (0, 1), ()),
('1d std normal, int shape', (0, 1), 1),
('1d std normal, 1-tuple shape', (0, 1), (1,)),
('1d std normal, 2-tuple shape', (0, 1), (2, 2)),
('2d std normal, no shape', (np.zeros(2), np.ones(2)), ()),
('2d std normal, int shape', ([0, 0], [1, 1]), 1),
('2d std normal, 1-tuple shape', (np.zeros(2), np.ones(2)), (1,)),
('2d std normal, 2-tuple shape', ([0, 0], [1, 1]), (2, 2)),
('rank 2 std normal, 2-tuple shape', (np.zeros((3, 2)), np.ones(
(3, 2))), (2, 2)),
('broadcasted loc', (0, np.ones(3)), (2, 2)),
('broadcasted scale', (np.ones(3), 1), ()),
)
def test_sample_and_log_prob(self, distr_params, sample_shape):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_sample_and_log_prob(
dist_args=distr_params,
dist_kwargs=dict(),
sample_shape=sample_shape,
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants
@parameterized.named_parameters(
('1d dist, 1d value', (0, 1), 1),
('1d dist, 2d value', (0., 1.), np.array([1, 2])),
('2d dist, 1d value', (np.zeros(2), np.ones(2)), 1),
('2d broadcasted dist, 1d value', (np.zeros(2), 1), 1),
('2d dist, 2d value', (np.zeros(2), np.ones(2)), np.array([1, 2])),
('1d dist, 1d value, edge case', (0, 1), 200),
)
def test_method_with_input(self, distr_params, value):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
value = np.asarray(value, dtype=np.float32)
for method in ['log_prob', 'prob', 'cdf', 'log_cdf', 'survival_function',
'log_survival_function']:
with self.subTest(method):
super()._test_attribute(
attribute_string=method,
dist_args=distr_params,
dist_kwargs={},
call_args=(value,),
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('entropy', (0., 1.), 'entropy'),
('mean', (0, 1), 'mean'),
('mean from 1d params', ([-1, 1], [1, 2]), 'mean'),
('variance', (0, 1), 'variance'),
('variance from np params', (np.ones(2), np.ones(2)), 'variance'),
('stddev', (0, 1), 'stddev'),
('stddev from rank 2 params', (np.ones((2, 3)), np.ones(
(2, 3))), 'stddev'),
('mode', (0, 1), 'mode'),
)
def test_method(self, distr_params, function_string):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
super()._test_attribute(
function_string,
distr_params,
assertion_fn=self.assertion_fn(rtol=1e-2))
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('no broadcast', ([0., 1., -0.5], [0.5, 1., 1.5])),
('broadcasted loc', (0.5, [0.5, 1., 1.5])),
('broadcasted scale', ([0., 1., -0.5], 0.8)),
)
def test_median(self, distr_params):
distr_params = (np.asarray(distr_params[0], dtype=np.float32),
np.asarray(distr_params[1], dtype=np.float32))
dist = self.distrax_cls(*distr_params)
self.assertion_fn(rtol=1e-2)(self.variant(dist.median)(), dist.mean())
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax'))
def test_with_two_distributions(self, function_string, mode_string):
super()._test_with_two_distributions(
attribute_string=function_string,
mode_string=mode_string,
dist1_kwargs={
'loc': np.random.randn(4, 1, 2),
'scale': np.asarray([[0.8, 0.2], [0.1, 1.2], [1.4, 3.1]]),
},
dist2_kwargs={
'loc': np.random.randn(3, 2),
'scale': 0.1 + np.random.rand(4, 1, 2),
},
assertion_fn=self.assertion_fn(rtol=3e-2))
def test_jittable(self):
super()._test_jittable((np.zeros((3,)), np.ones((3,))))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
('ellipsis', (Ellipsis, -1)),
)
def test_slice(self, slice_):
loc = jnp.array(np.random.randn(3, 4, 5))
scale = jnp.array(np.random.randn(3, 4, 5))
dist = self.distrax_cls(loc=loc, scale=scale)
self.assertion_fn(rtol=1e-2)(dist[slice_].mean(), loc[slice_])
def test_slice_different_parameterization(self):
loc = jnp.array(np.random.randn(4))
scale = jnp.array(np.random.randn(3, 4))
dist = self.distrax_cls(loc=loc, scale=scale)
self.assertion_fn(rtol=1e-2)(dist[0].mean(), loc) # Not slicing loc.
self.assertion_fn(rtol=1e-2)(dist[0].stddev(), scale[0])
def test_vmap_inputs(self):
def log_prob_sum(dist, x):
return dist.log_prob(x).sum()
dist = normal.Normal(
jnp.arange(3 * 4 * 5).reshape((3, 4, 5)), jnp.ones((3, 4, 5)))
x = jnp.zeros((3, 4, 5))
with self.subTest('no vmap'):
actual = log_prob_sum(dist, x)
expected = dist.log_prob(x).sum()
self.assertion_fn()(actual, expected)
with self.subTest('axis=0'):
actual = jax.vmap(log_prob_sum, in_axes=0)(dist, x)
expected = dist.log_prob(x).sum(axis=(1, 2))
self.assertion_fn()(actual, expected)
with self.subTest('axis=1'):
actual = jax.vmap(log_prob_sum, in_axes=1)(dist, x)
expected = dist.log_prob(x).sum(axis=(0, 2))
self.assertion_fn()(actual, expected)
def test_vmap_outputs(self):
def summed_dist(loc, scale):
return normal.Normal(loc.sum(keepdims=True), scale.sum(keepdims=True))
loc = jnp.arange((3 * 4 * 5)).reshape((3, 4, 5))
scale = jnp.ones((3, 4, 5))
actual = jax.vmap(summed_dist)(loc, scale)
expected = normal.Normal(
loc.sum(axis=(1, 2), keepdims=True),
scale.sum(axis=(1, 2), keepdims=True))
np.testing.assert_equal(actual.batch_shape, expected.batch_shape)
np.testing.assert_equal(actual.event_shape, expected.event_shape)
x = jnp.array([[[1]], [[2]], [[3]]])
self.assertion_fn(rtol=1e-6)(actual.log_prob(x), expected.log_prob(x))
if __name__ == '__main__':
absltest.main()
| distrax-master | distrax/_src/distributions/normal_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.