python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transporter architecture in Sonnet/TF 1: https://arxiv.org/abs/1906.11883."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import sonnet as snt
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
nest = contrib_framework.nest
# Paper submission used BatchNorm, but we have since found that Layer & Instance
# norm can be quite a lot more stable.
_NORMALIZATION_CTORS = {
"layer": snt.LayerNorm,
"instance": functools.partial(snt.LayerNorm, axis=[1, 2]),
"batch": snt.BatchNormV2,
}
def _connect_module_with_kwarg_if_supported(module,
input_tensor,
kwarg_name,
kwarg_value):
"""Connects a module to some input, plus a kwarg= if supported by module."""
if snt.supports_kwargs(module, kwarg_name) == "supported":
kwargs = {kwarg_name: kwarg_value}
else:
kwargs = {}
return module(input_tensor, **kwargs)
class Transporter(snt.AbstractModule):
"""Sonnet module implementing the Transporter architecture."""
def __init__(
self,
encoder,
keypointer,
decoder,
name="transporter"):
"""Initialize the Transporter module.
Args:
encoder: `snt.AbstractModule` mapping images to features (see `Encoder`)
keypointer: `snt.AbstractModule` mapping images to keypoint masks (see
`KeyPointer`)
decoder: `snt.AbstractModule` decoding features to images (see `Decoder`)
name: `str` module name
"""
super(Transporter, self).__init__(name=name)
self._encoder = encoder
self._decoder = decoder
self._keypointer = keypointer
def _build(self, image_a, image_b, is_training):
"""Reconstructs image_b using feature transport from image_a.
This approaches matches the NeurIPS submission.
Args:
image_a: Tensor of shape [B, H, W, C] containing a batch of images.
image_b: Tensor of shape [B, H, W, C] containing a batch of images.
is_training: `bool` indication whether the model is in training mode.
Returns:
A dict containing keys:
'reconstructed_image_b': Reconstruction of image_b, with the same shape.
'features_a': Tensor of shape [B, F_h, F_w, N] of the extracted features
for `image_a`.
'features_b': Tensor of shape [B, F_h, F_w, N] of the extracted features
for `image_b`.
'keypoints_a': The result of the keypointer module on image_a, with stop
gradients applied.
'keypoints_b': The result of the keypointer module on image_b.
"""
# Process both images. All gradients related to image_a are stopped.
image_a_features = tf.stop_gradient(
self._encoder(image_a, is_training=is_training))
image_a_keypoints = nest.map_structure(
tf.stop_gradient, self._keypointer(image_a, is_training=is_training))
image_b_features = self._encoder(image_b, is_training=is_training)
image_b_keypoints = self._keypointer(image_b, is_training=is_training)
# Transport features
num_keypoints = image_a_keypoints["heatmaps"].shape[-1]
transported_features = image_a_features
for k in range(num_keypoints):
mask_a = image_a_keypoints["heatmaps"][..., k, None]
mask_b = image_b_keypoints["heatmaps"][..., k, None]
# suppress features from image a, around both keypoint locations.
transported_features = (
(1 - mask_a) * (1 - mask_b) * transported_features)
# copy features from image b around keypoints for image b.
transported_features += (mask_b * image_b_features)
reconstructed_image_b = self._decoder(
transported_features, is_training=is_training)
return {
"reconstructed_image_b": reconstructed_image_b,
"features_a": image_a_features,
"features_b": image_b_features,
"keypoints_a": image_a_keypoints,
"keypoints_b": image_b_keypoints,
}
def reconstruction_loss(image, predicted_image, loss_type="l2"):
"""Returns the reconstruction loss between the image and the predicted_image.
Args:
image: target image tensor of shape [B, H, W, C]
predicted_image: reconstructed image as returned by the model
loss_type: `str` reconstruction loss, either `l2` (default) or `l1`.
Returns:
The reconstruction loss
"""
if loss_type == "l2":
return tf.reduce_mean(tf.square(image - predicted_image))
elif loss_type == "l1":
return tf.reduce_mean(tf.abs(image - predicted_image))
else:
raise ValueError("Unknown loss type: {}".format(loss_type))
class Encoder(snt.AbstractModule):
"""Encoder module mapping an image to features.
The encoder is a standard convolutional network with ReLu activations.
"""
def __init__(
self,
filters=(16, 16, 32, 32),
kernel_sizes=(7, 3, 3, 3),
strides=(1, 1, 2, 1),
norm_type="batch",
name="encoder"):
"""Initialize the Encoder.
Args:
filters: tuple of `int`. The ith layer of the encoder will
consist of `filters[i]` filters.
kernel_sizes: tuple of `int` kernel sizes for each layer
strides: tuple of `int` strides for each layer
norm_type: string, one of 'instance', 'layer', 'batch'.
name: `str` name of the module.
"""
super(Encoder, self).__init__(name=name)
if len({len(filters), len(kernel_sizes), len(strides)}) != 1:
raise ValueError(
"length of filters/kernel_sizes/strides lists must be the same")
self._filters = filters
self._kernels = kernel_sizes
self._strides = strides
self._norm_ctor = _NORMALIZATION_CTORS[norm_type]
def _build(self, image, is_training):
"""Connect the Encoder.
Args:
image: A batch of images of shape [B, H, W, C]
is_training: `bool` indicating if the model is in training mode.
Returns:
A tensor of features of shape [B, F_h, F_w, N] where F_h and F_w are the
height and width of the feature map and N = 4 * `self._filters`
"""
regularizers = {"w": contrib_layers.l2_regularizer(1.0)}
features = image
for l in range(len(self._filters)):
with tf.variable_scope("conv_{}".format(l + 1)):
conv = snt.Conv2D(
self._filters[l],
self._kernels[l],
self._strides[l],
padding=snt.SAME,
regularizers=regularizers,
name="conv_{}".format(l+1))
norm_module = self._norm_ctor(name="normalization")
features = conv(features)
features = _connect_module_with_kwarg_if_supported(
norm_module, features, "is_training", is_training)
features = tf.nn.relu(features)
return features
class KeyPointer(snt.AbstractModule):
"""Module for extracting keypoints from an image."""
def __init__(self,
num_keypoints,
gauss_std,
keypoint_encoder,
custom_getter=None,
name="key_pointer"):
"""Iniitialize the keypointer.
Args:
num_keypoints: `int` number of keypoints to extract
gauss_std: `float` size of the keypoints, relative to the image dimensions
normalized to the range [-1, 1]
keypoint_encoder: sonnet Module which produces a feature map. Must accept
an is_training kwarg. When used in the Transporter, the output spatial
resolution of this encoder should match the output spatial resolution
of the other encoder, although these two encoders should not share
weights.
custom_getter: optional custom getter for variables in this module.
name: `str` name of the module
"""
super(KeyPointer, self).__init__(name=name, custom_getter=custom_getter)
self._num_keypoints = num_keypoints
self._gauss_std = gauss_std
self._keypoint_encoder = keypoint_encoder
def _build(self, image, is_training):
"""Compute the gaussian keypoints for the image.
Args:
image: Image tensor of shape [B, H, W, C]
is_training: `bool` whether the model is in training or evaluation mode
Returns:
a dict with keys:
'centers': A tensor of shape [B, K, 2] of the center locations for each
of the K keypoints.
'heatmaps': A tensor of shape [B, F_h, F_w, K] of gaussian maps over the
keypoints, where [F_h, F_w] is the size of the keypoint_encoder
feature maps.
"""
conv = snt.Conv2D(
self._num_keypoints, [1, 1],
stride=1,
regularizers={"w": contrib_layers.l2_regularizer(1.0)},
name="conv_1/conv_1")
image_features = self._keypoint_encoder(image, is_training=is_training)
keypoint_features = conv(image_features)
return get_keypoint_data_from_feature_map(
keypoint_features, self._gauss_std)
def get_keypoint_data_from_feature_map(feature_map, gauss_std):
"""Returns keypoint information from a feature map.
Args:
feature_map: [B, H, W, K] Tensor, should be activations from a convnet.
gauss_std: float, the standard deviation of the gaussians to be put around
the keypoints.
Returns:
a dict with keys:
'centers': A tensor of shape [B, K, 2] of the center locations for each
of the K keypoints.
'heatmaps': A tensor of shape [B, H, W, K] of gaussian maps over the
keypoints.
"""
gauss_mu = _get_keypoint_mus(feature_map)
map_size = feature_map.shape.as_list()[1:3]
gauss_maps = _get_gaussian_maps(gauss_mu, map_size, 1.0 / gauss_std)
return {
"centers": gauss_mu,
"heatmaps": gauss_maps,
}
def _get_keypoint_mus(keypoint_features):
"""Returns the keypoint center points.
Args:
keypoint_features: A tensor of shape [B, F_h, F_w, K] where K is the number
of keypoints to extract.
Returns:
A tensor of shape [B, K, 2] of the y, x center points of each keypoint. Each
center point are in the range [-1, 1]^2. Note: the first element is the y
coordinate, the second is the x coordinate.
"""
gauss_y = _get_coord(keypoint_features, 1)
gauss_x = _get_coord(keypoint_features, 2)
gauss_mu = tf.stack([gauss_y, gauss_x], axis=2)
return gauss_mu
def _get_coord(features, axis):
"""Returns the keypoint coordinate encoding for the given axis.
Args:
features: A tensor of shape [B, F_h, F_w, K] where K is the number of
keypoints to extract.
axis: `int` which axis to extract the coordinate for. Has to be axis 1 or 2.
Returns:
A tensor of shape [B, K] containing the keypoint centers along the given
axis. The location is given in the range [-1, 1].
"""
if axis != 1 and axis != 2:
raise ValueError("Axis needs to be 1 or 2.")
other_axis = 1 if axis == 2 else 2
axis_size = features.shape[axis]
# Compute the normalized weight for each row/column along the axis
g_c_prob = tf.reduce_mean(features, axis=other_axis)
g_c_prob = tf.nn.softmax(g_c_prob, axis=1)
# Linear combination of the interval [-1, 1] using the normalized weights to
# give a single coordinate in the same interval [-1, 1]
scale = tf.cast(tf.linspace(-1.0, 1.0, axis_size), tf.float32)
scale = tf.reshape(scale, [1, axis_size, 1])
coordinate = tf.reduce_sum(g_c_prob * scale, axis=1)
return coordinate
def _get_gaussian_maps(mu, map_size, inv_std, power=2):
"""Transforms the keypoint center points to a gaussian masks."""
mu_y, mu_x = mu[:, :, 0:1], mu[:, :, 1:2]
y = tf.cast(tf.linspace(-1.0, 1.0, map_size[0]), tf.float32)
x = tf.cast(tf.linspace(-1.0, 1.0, map_size[1]), tf.float32)
mu_y, mu_x = tf.expand_dims(mu_y, -1), tf.expand_dims(mu_x, -1)
y = tf.reshape(y, [1, 1, map_size[0], 1])
x = tf.reshape(x, [1, 1, 1, map_size[1]])
g_y = tf.pow(y - mu_y, power)
g_x = tf.pow(x - mu_x, power)
dist = (g_y + g_x) * tf.pow(inv_std, power)
g_yx = tf.exp(-dist)
g_yx = tf.transpose(g_yx, perm=[0, 2, 3, 1])
return g_yx
class Decoder(snt.AbstractModule):
"""Decoder reconstruction network.
The decoder is a standard convolutional network with ReLu activations.
"""
def __init__(self, initial_filters, output_size,
output_channels=3,
norm_type="batch",
name="decoder"):
"""Initialize the decoder.
Args:
initial_filters: `int` number of initial filters used in the decoder
output_size: tuple of `int` height and width of the reconstructed image
output_channels: `int` number of output channels, for RGB use 3 (default)
norm_type: string, one of 'instance', 'layer', 'batch'.
name: `str` name of the module
"""
super(Decoder, self).__init__(name=name)
self._initial_filters = initial_filters
self._output_height = output_size[0]
self._output_width = output_size[1]
self._output_channels = output_channels
self._norm_ctor = _NORMALIZATION_CTORS[norm_type]
def _build(self, features, is_training):
"""Connect the Decoder.
Args:
features: Tensor of shape [B, F_h, F_w, N]
is_training: `bool` whether the module is in training mode.
Returns:
A reconstructed image tensor of shape [B, output_height, output_width,
output_channels]
"""
height, width = features.shape.as_list()[1:3]
filters = self._initial_filters
regularizers = {"w": contrib_layers.l2_regularizer(1.0)}
layer = 0
while height <= self._output_height:
layer += 1
with tf.variable_scope("conv_{}".format(layer)):
conv1 = snt.Conv2D(
filters,
[3, 3],
stride=1,
regularizers=regularizers,
name="conv_{}".format(layer))
norm_module = self._norm_ctor(name="normalization")
features = conv1(features)
features = _connect_module_with_kwarg_if_supported(
norm_module, features, "is_training", is_training)
features = tf.nn.relu(features)
if height == self._output_height:
layer += 1
with tf.variable_scope("conv_{}".format(layer)):
conv2 = snt.Conv2D(
self._output_channels,
[3, 3],
stride=1,
regularizers=regularizers,
name="conv_{}".format(layer))
features = conv2(features)
break
else:
layer += 1
with tf.variable_scope("conv_{}".format(layer)):
conv2 = snt.Conv2D(
filters,
[3, 3],
stride=1,
regularizers=regularizers,
name="conv_{}".format(layer))
norm_module = self._norm_ctor(name="normalization")
features = conv2(features)
features = _connect_module_with_kwarg_if_supported(
norm_module, features, "is_training", is_training)
features = tf.nn.relu(features)
height *= 2
width *= 2
features = tf.image.resize(features, [height, width])
if filters >= 8:
filters /= 2
assert height == self._output_height
assert width == self._output_width
return features
| deepmind-research-master | transporter/transporter.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Single file script for doing a quick evaluation of a model.
This script is called by run.sh.
Usage:
user@host:/path/to/deepmind_research$ unsupervised_adversarial_training/run.sh
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import app
from absl import flags
import cleverhans
from cleverhans import attacks
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
UAT_HUB_URL = ('https://tfhub.dev/deepmind/unsupervised-adversarial-training/'
'cifar10/wrn_106/1')
FLAGS = flags.FLAGS
flags.DEFINE_enum('attack_fn_name', 'fgsm', ['fgsm', 'none'],
'Name of the attack method to use.')
flags.DEFINE_float('epsilon_attack', 8.0 / 255,
'Maximum allowable perturbation size, between 0 and 1.')
flags.DEFINE_integer('num_steps', 20, 'Number of attack iterations.')
flags.DEFINE_integer('num_batches', 100, 'Number of batches to evaluate.')
flags.DEFINE_integer('batch_size', 32, 'Batch size.')
flags.DEFINE_integer('skip_batches', 0,
'Controls index of start image. This can be used to '
'evaluate the model on different subsets of the test set.')
flags.DEFINE_float('learning_rate', 0.003, 'Attack optimizer learning rate.')
def _top_1_accuracy(logits, labels):
return tf.reduce_mean(tf.cast(tf.nn.in_top_k(logits, labels, 1), tf.float32))
def make_classifier():
model = hub.Module(UAT_HUB_URL)
def classifier(x):
x = _cifar_meanstd_normalize(x)
model_input = dict(x=x, decay_rate=0.1, prefix='default')
return model(model_input)
return classifier
def eval_cifar():
"""Evaluate an adversarially trained model."""
attack_fn_name = FLAGS.attack_fn_name
total_batches = FLAGS.num_batches
batch_size = FLAGS.batch_size
# Note that a `classifier` is a function mapping [0,1]-scaled image Tensors
# to a logit Tensor. In particular, it includes *both* the preprocessing
# function, and the neural network.
classifier = make_classifier()
cleverhans_model = cleverhans.model.CallableModelWrapper(classifier, 'logits')
_, data_test = tf.keras.datasets.cifar10.load_data()
data = _build_dataset(data_test, batch_size=batch_size, shuffle=False)
# Generate adversarial images.
if attack_fn_name == 'fgsm':
attack = attacks.MadryEtAl(cleverhans_model)
num_cifar_classes = 10
adv_x = attack.generate(data.image,
eps=FLAGS.epsilon_attack,
eps_iter=FLAGS.learning_rate,
nb_iter=FLAGS.num_steps,
y=tf.one_hot(data.label, depth=num_cifar_classes))
elif attack_fn_name == 'none':
adv_x = data.image
logits = classifier(adv_x)
probs = tf.nn.softmax(logits)
adv_acc = _top_1_accuracy(logits, data.label)
with tf.train.SingularMonitoredSession() as sess:
total_acc = 0.
for _ in range(FLAGS.skip_batches):
sess.run(data.image)
for _ in range(total_batches):
_, _, adv_acc_val = sess.run([probs, data.label, adv_acc])
total_acc += adv_acc_val
print('Batch accuracy: {}'.format(adv_acc_val))
print('Total accuracy against {}: {}'.format(
FLAGS.attack_fn_name, total_acc / total_batches))
########## Utilities ##########
# Defines a dataset sample."""
Sample = collections.namedtuple('Sample', ['image', 'label'])
def _build_dataset(raw_data, batch_size=32, shuffle=False):
"""Builds a dataset from raw NumPy tensors.
Args:
raw_data: Pair (images, labels) of numpy arrays. `images` should have shape
(N, H, W, C) with values in [0, 255], and `labels` should have shape
(N,) or (N, 1) indicating class indices.
batch_size: int, batch size
shuffle: bool, whether to shuffle the data (default: True).
Returns:
(image_tensor, label_tensor), which iterate over the dataset, which are
(batch_size, H, W, C) tf.float32 and (batch_size,) tf.int32 Tensors
respectively
"""
images, labels = raw_data
labels = np.squeeze(labels)
samples = Sample(images.astype(np.float32) / 255., labels.astype(np.int64))
data = tf.data.Dataset.from_tensor_slices(samples)
if shuffle:
data = data.shuffle(1000)
return data.repeat().batch(batch_size).make_one_shot_iterator().get_next()
def _cifar_meanstd_normalize(image):
"""Mean + stddev whitening for CIFAR-10 used in ResNets.
Args:
image: Numpy array or TF Tensor, with values in [0, 255]
Returns:
image: Numpy array or TF Tensor, shifted and scaled by mean/stdev on
CIFAR-10 dataset.
"""
# Channel-wise means and std devs calculated from the CIFAR-10 training set
cifar_means = [125.3, 123.0, 113.9]
cifar_devs = [63.0, 62.1, 66.7]
rescaled_means = [x / 255. for x in cifar_means]
rescaled_devs = [x / 255. for x in cifar_devs]
image = (image - rescaled_means) / rescaled_devs
return image
def main(unused_argv):
eval_cifar()
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | unsupervised_adversarial_training/quick_eval_cifar.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Loads images from the 80M@200K training set and saves them in PNG format.
Usage:
cd /path/to/deepmind_research
python -m unsupervised_adversarial_training.save_example_images \
--data_bin_path=/path/to/tiny_images.bin
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import numpy as np
from PIL import Image
DIR_NAME = os.path.dirname(__file__)
FLAGS = flags.FLAGS
flags.DEFINE_string('data_bin_path', None,
'path to 80M Tiny Images data binary')
flags.DEFINE_string('idxs_path', os.path.join(DIR_NAME, 'tiny_200K_idxs.txt'),
'path to file of indices indicating subset of 80M dataset')
flags.DEFINE_string('output_dir', os.path.join(DIR_NAME, 'images'),
'path to output directory for images')
flags.mark_flag_as_required('data_bin_path')
CIFAR_LABEL_IDX_TO_NAME = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
DATASET_SIZE = 79302017
def _load_dataset_as_array(ds_path):
dataset = np.memmap(filename=ds_path, dtype=np.uint8, mode='r',
shape=(DATASET_SIZE, 3, 32, 32))
return dataset.transpose([0, 3, 2, 1])
def main(unused_argv):
dataset = _load_dataset_as_array(FLAGS.data_bin_path)
# Load the indices and labels of the 80M@200K training set
data_idxs, data_labels = np.loadtxt(
FLAGS.idxs_path,
delimiter=',',
dtype=[('index', np.uint64), ('label', np.uint8)],
unpack=True)
# Save images as PNG files
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
for i in range(100):
class_name = CIFAR_LABEL_IDX_TO_NAME[data_labels[i]]
file_name = 'im{}_{}.png'.format(i, class_name)
file_path = os.path.join(FLAGS.output_dir, file_name)
img = dataset[data_idxs[i]]
Image.fromarray(img).save(file_path)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | unsupervised_adversarial_training/save_example_images.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the PolyGen open-source version."""
from modules import FaceModel
from modules import VertexModel
import numpy as np
import tensorflow as tf
_BATCH_SIZE = 4
_TRANSFORMER_CONFIG = {
'num_layers': 2,
'hidden_size': 64,
'fc_size': 256
}
_CLASS_CONDITIONAL = True
_NUM_CLASSES = 4
_NUM_INPUT_VERTS = 50
_NUM_PAD_VERTS = 10
_NUM_INPUT_FACE_INDICES = 200
_QUANTIZATION_BITS = 8
_VERTEX_MODEL_USE_DISCRETE_EMBEDDINGS = True
_FACE_MODEL_DECODER_CROSS_ATTENTION = True
_FACE_MODEL_DISCRETE_EMBEDDINGS = True
_MAX_SAMPLE_LENGTH_VERTS = 10
_MAX_SAMPLE_LENGTH_FACES = 10
def _get_vertex_model_batch():
"""Returns batch with placeholders for vertex model inputs."""
return {
'class_label': tf.range(_BATCH_SIZE),
'vertices_flat': tf.placeholder(
dtype=tf.int32, shape=[_BATCH_SIZE, None]),
}
def _get_face_model_batch():
"""Returns batch with placeholders for face model inputs."""
return {
'vertices': tf.placeholder(
dtype=tf.float32, shape=[_BATCH_SIZE, None, 3]),
'vertices_mask': tf.placeholder(
dtype=tf.float32, shape=[_BATCH_SIZE, None]),
'faces': tf.placeholder(
dtype=tf.int32, shape=[_BATCH_SIZE, None]),
}
class VertexModelTest(tf.test.TestCase):
def setUp(self):
"""Defines a vertex model."""
super(VertexModelTest, self).setUp()
self.model = VertexModel(
decoder_config=_TRANSFORMER_CONFIG,
class_conditional=_CLASS_CONDITIONAL,
num_classes=_NUM_CLASSES,
max_num_input_verts=_NUM_INPUT_VERTS,
quantization_bits=_QUANTIZATION_BITS,
use_discrete_embeddings=_VERTEX_MODEL_USE_DISCRETE_EMBEDDINGS)
def test_model_runs(self):
"""Tests if the model runs without crashing."""
batch = _get_vertex_model_batch()
pred_dist = self.model(batch, is_training=False)
logits = pred_dist.logits
with self.session() as sess:
sess.run(tf.global_variables_initializer())
vertices_flat = np.random.randint(
2**_QUANTIZATION_BITS + 1,
size=[_BATCH_SIZE, _NUM_INPUT_VERTS * 3 + 1])
sess.run(logits, {batch['vertices_flat']: vertices_flat})
def test_sample_outputs_range(self):
"""Tests if the model produces samples in the correct range."""
context = {'class_label': tf.zeros((_BATCH_SIZE,), dtype=tf.int32)}
sample_dict = self.model.sample(
_BATCH_SIZE, max_sample_length=_MAX_SAMPLE_LENGTH_VERTS,
context=context)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
sample_dict_np = sess.run(sample_dict)
in_range = np.logical_and(
0 <= sample_dict_np['vertices'],
sample_dict_np['vertices'] <= 2**_QUANTIZATION_BITS).all()
self.assertTrue(in_range)
class FaceModelTest(tf.test.TestCase):
def setUp(self):
"""Defines a face model."""
super(FaceModelTest, self).setUp()
self.model = FaceModel(
encoder_config=_TRANSFORMER_CONFIG,
decoder_config=_TRANSFORMER_CONFIG,
class_conditional=False,
max_seq_length=_NUM_INPUT_FACE_INDICES,
decoder_cross_attention=_FACE_MODEL_DECODER_CROSS_ATTENTION,
use_discrete_vertex_embeddings=_FACE_MODEL_DISCRETE_EMBEDDINGS,
quantization_bits=_QUANTIZATION_BITS)
def test_model_runs(self):
"""Tests if the model runs without crashing."""
batch = _get_face_model_batch()
pred_dist = self.model(batch, is_training=False)
logits = pred_dist.logits
with self.session() as sess:
sess.run(tf.global_variables_initializer())
vertices = np.random.rand(_BATCH_SIZE, _NUM_INPUT_VERTS, 3) - 0.5
vertices_mask = np.ones([_BATCH_SIZE, _NUM_INPUT_VERTS])
faces = np.random.randint(
_NUM_INPUT_VERTS + 2, size=[_BATCH_SIZE, _NUM_INPUT_FACE_INDICES])
sess.run(
logits,
{batch['vertices']: vertices,
batch['vertices_mask']: vertices_mask,
batch['faces']: faces}
)
def test_sample_outputs_range(self):
"""Tests if the model produces samples in the correct range."""
context = _get_face_model_batch()
del context['faces']
sample_dict = self.model.sample(
context, max_sample_length=_MAX_SAMPLE_LENGTH_FACES)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
# Pad the vertices in order to test that the face model only outputs
# vertex indices in the unpadded range
vertices = np.pad(
np.random.rand(_BATCH_SIZE, _NUM_INPUT_VERTS, 3) - 0.5,
[[0, 0], [0, _NUM_PAD_VERTS], [0, 0]], mode='constant')
vertices_mask = np.pad(
np.ones([_BATCH_SIZE, _NUM_INPUT_VERTS]),
[[0, 0], [0, _NUM_PAD_VERTS]], mode='constant')
sample_dict_np = sess.run(
sample_dict,
{context['vertices']: vertices,
context['vertices_mask']: vertices_mask})
in_range = np.logical_and(
0 <= sample_dict_np['faces'],
sample_dict_np['faces'] <= _NUM_INPUT_VERTS + 1).all()
self.assertTrue(in_range)
if __name__ == '__main__':
tf.test.main()
| deepmind-research-master | polygen/model_test.py |
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['numpy', 'dm-sonnet==1.36', 'tensorflow==1.14',
'tensor2tensor==1.15', 'networkx', 'matplotlib', 'six']
setup(
name='polygen',
version='0.1',
description='A library for PolyGen: An Autoregressive Generative Model of 3D Meshes.',
url='https://github.com/deepmind/deepmind-research/polygen',
author='DeepMind',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
platforms=['any'],
license='Apache 2.0',
)
| deepmind-research-master | polygen/setup.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mesh data utilities."""
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d # pylint: disable=unused-import
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import networkx as nx
import numpy as np
import six
from six.moves import range
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
def random_shift(vertices, shift_factor=0.25):
"""Apply random shift to vertices."""
max_shift_pos = tf.cast(255 - tf.reduce_max(vertices, axis=0), tf.float32)
max_shift_pos = tf.maximum(max_shift_pos, 1e-9)
max_shift_neg = tf.cast(tf.reduce_min(vertices, axis=0), tf.float32)
max_shift_neg = tf.maximum(max_shift_neg, 1e-9)
shift = tfd.TruncatedNormal(
tf.zeros([1, 3]), shift_factor*255, -max_shift_neg,
max_shift_pos).sample()
shift = tf.cast(shift, tf.int32)
vertices += shift
return vertices
def make_vertex_model_dataset(ds, apply_random_shift=False):
"""Prepare dataset for vertex model training."""
def _vertex_model_map_fn(example):
vertices = example['vertices']
# Randomly shift vertices
if apply_random_shift:
vertices = random_shift(vertices)
# Re-order vertex coordinates as (z, y, x).
vertices_permuted = tf.stack(
[vertices[:, 2], vertices[:, 1], vertices[:, 0]], axis=-1)
# Flatten quantized vertices, reindex starting from 1, and pad with a
# zero stopping token.
vertices_flat = tf.reshape(vertices_permuted, [-1])
example['vertices_flat'] = tf.pad(vertices_flat + 1, [[0, 1]])
# Create mask to indicate valid tokens after padding and batching.
example['vertices_flat_mask'] = tf.ones_like(
example['vertices_flat'], dtype=tf.float32)
return example
return ds.map(_vertex_model_map_fn)
def make_face_model_dataset(
ds, apply_random_shift=False, shuffle_vertices=True, quantization_bits=8):
"""Prepare dataset for face model training."""
def _face_model_map_fn(example):
vertices = example['vertices']
# Randomly shift vertices
if apply_random_shift:
vertices = random_shift(vertices)
example['num_vertices'] = tf.shape(vertices)[0]
# Optionally shuffle vertices and re-order faces to match
if shuffle_vertices:
permutation = tf.random_shuffle(tf.range(example['num_vertices']))
vertices = tf.gather(vertices, permutation)
face_permutation = tf.concat(
[tf.constant([0, 1], dtype=tf.int32), tf.argsort(permutation) + 2],
axis=0)
example['faces'] = tf.cast(
tf.gather(face_permutation, example['faces']), tf.int64)
def _dequantize_verts(verts, n_bits):
min_range = -0.5
max_range = 0.5
range_quantize = 2**n_bits - 1
verts = tf.cast(verts, tf.float32)
verts = verts * (max_range - min_range) / range_quantize + min_range
return verts
# Vertices are quantized. So convert to floats for input to face model
example['vertices'] = _dequantize_verts(vertices, quantization_bits)
example['vertices_mask'] = tf.ones_like(
example['vertices'][..., 0], dtype=tf.float32)
example['faces_mask'] = tf.ones_like(example['faces'], dtype=tf.float32)
return example
return ds.map(_face_model_map_fn)
def read_obj_file(obj_file):
"""Read vertices and faces from already opened file."""
vertex_list = []
flat_vertices_list = []
flat_vertices_indices = {}
flat_triangles = []
for line in obj_file:
tokens = line.split()
if not tokens:
continue
line_type = tokens[0]
# We skip lines not starting with v or f.
if line_type == 'v':
vertex_list.append([float(x) for x in tokens[1:]])
elif line_type == 'f':
triangle = []
for i in range(len(tokens) - 1):
vertex_name = tokens[i + 1]
if vertex_name in flat_vertices_indices:
triangle.append(flat_vertices_indices[vertex_name])
continue
flat_vertex = []
for index in six.ensure_str(vertex_name).split('/'):
if not index:
continue
# obj triangle indices are 1 indexed, so subtract 1 here.
flat_vertex += vertex_list[int(index) - 1]
flat_vertex_index = len(flat_vertices_list)
flat_vertices_list.append(flat_vertex)
flat_vertices_indices[vertex_name] = flat_vertex_index
triangle.append(flat_vertex_index)
flat_triangles.append(triangle)
return np.array(flat_vertices_list, dtype=np.float32), flat_triangles
def read_obj(obj_path):
"""Open .obj file from the path provided and read vertices and faces."""
with open(obj_path) as obj_file:
return read_obj_file(obj_file)
def write_obj(vertices, faces, file_path, transpose=True, scale=1.):
"""Write vertices and faces to obj."""
if transpose:
vertices = vertices[:, [1, 2, 0]]
vertices *= scale
if faces is not None:
if min(min(faces)) == 0:
f_add = 1
else:
f_add = 0
with open(file_path, 'w') as f:
for v in vertices:
f.write('v {} {} {}\n'.format(v[0], v[1], v[2]))
for face in faces:
line = 'f'
for i in face:
line += ' {}'.format(i + f_add)
line += '\n'
f.write(line)
def quantize_verts(verts, n_bits=8):
"""Convert vertices in [-1., 1.] to discrete values in [0, n_bits**2 - 1]."""
min_range = -0.5
max_range = 0.5
range_quantize = 2**n_bits - 1
verts_quantize = (verts - min_range) * range_quantize / (
max_range - min_range)
return verts_quantize.astype('int32')
def dequantize_verts(verts, n_bits=8, add_noise=False):
"""Convert quantized vertices to floats."""
min_range = -0.5
max_range = 0.5
range_quantize = 2**n_bits - 1
verts = verts.astype('float32')
verts = verts * (max_range - min_range) / range_quantize + min_range
if add_noise:
verts += np.random.uniform(size=verts.shape) * (1 / range_quantize)
return verts
def face_to_cycles(face):
"""Find cycles in face."""
g = nx.Graph()
for v in range(len(face) - 1):
g.add_edge(face[v], face[v + 1])
g.add_edge(face[-1], face[0])
return list(nx.cycle_basis(g))
def flatten_faces(faces):
"""Converts from list of faces to flat face array with stopping indices."""
if not faces:
return np.array([0])
else:
l = [f + [-1] for f in faces[:-1]]
l += [faces[-1] + [-2]]
return np.array([item for sublist in l for item in sublist]) + 2 # pylint: disable=g-complex-comprehension
def unflatten_faces(flat_faces):
"""Converts from flat face sequence to a list of separate faces."""
def group(seq):
g = []
for el in seq:
if el == 0 or el == -1:
yield g
g = []
else:
g.append(el - 1)
yield g
outputs = list(group(flat_faces - 1))[:-1]
# Remove empty faces
return [o for o in outputs if len(o) > 2]
def center_vertices(vertices):
"""Translate the vertices so that bounding box is centered at zero."""
vert_min = vertices.min(axis=0)
vert_max = vertices.max(axis=0)
vert_center = 0.5 * (vert_min + vert_max)
return vertices - vert_center
def normalize_vertices_scale(vertices):
"""Scale the vertices so that the long diagonal of the bounding box is one."""
vert_min = vertices.min(axis=0)
vert_max = vertices.max(axis=0)
extents = vert_max - vert_min
scale = np.sqrt(np.sum(extents**2))
return vertices / scale
def quantize_process_mesh(vertices, faces, tris=None, quantization_bits=8):
"""Quantize vertices, remove resulting duplicates and reindex faces."""
vertices = quantize_verts(vertices, quantization_bits)
vertices, inv = np.unique(vertices, axis=0, return_inverse=True)
# Sort vertices by z then y then x.
sort_inds = np.lexsort(vertices.T)
vertices = vertices[sort_inds]
# Re-index faces and tris to re-ordered vertices.
faces = [np.argsort(sort_inds)[inv[f]] for f in faces]
if tris is not None:
tris = np.array([np.argsort(sort_inds)[inv[t]] for t in tris])
# Merging duplicate vertices and re-indexing the faces causes some faces to
# contain loops (e.g [2, 3, 5, 2, 4]). Split these faces into distinct
# sub-faces.
sub_faces = []
for f in faces:
cliques = face_to_cycles(f)
for c in cliques:
c_length = len(c)
# Only append faces with more than two verts.
if c_length > 2:
d = np.argmin(c)
# Cyclically permute faces just that first index is the smallest.
sub_faces.append([c[(d + i) % c_length] for i in range(c_length)])
faces = sub_faces
if tris is not None:
tris = np.array([v for v in tris if len(set(v)) == len(v)])
# Sort faces by lowest vertex indices. If two faces have the same lowest
# index then sort by next lowest and so on.
faces.sort(key=lambda f: tuple(sorted(f)))
if tris is not None:
tris = tris.tolist()
tris.sort(key=lambda f: tuple(sorted(f)))
tris = np.array(tris)
# After removing degenerate faces some vertices are now unreferenced.
# Remove these.
num_verts = vertices.shape[0]
vert_connected = np.equal(
np.arange(num_verts)[:, None], np.hstack(faces)[None]).any(axis=-1)
vertices = vertices[vert_connected]
# Re-index faces and tris to re-ordered vertices.
vert_indices = (
np.arange(num_verts) - np.cumsum(1 - vert_connected.astype('int')))
faces = [vert_indices[f].tolist() for f in faces]
if tris is not None:
tris = np.array([vert_indices[t].tolist() for t in tris])
return vertices, faces, tris
def process_mesh(vertices, faces, quantization_bits=8):
"""Process mesh vertices and faces."""
# Transpose so that z-axis is vertical.
vertices = vertices[:, [2, 0, 1]]
# Translate the vertices so that bounding box is centered at zero.
vertices = center_vertices(vertices)
# Scale the vertices so that the long diagonal of the bounding box is equal
# to one.
vertices = normalize_vertices_scale(vertices)
# Quantize and sort vertices, remove resulting duplicates, sort and reindex
# faces.
vertices, faces, _ = quantize_process_mesh(
vertices, faces, quantization_bits=quantization_bits)
# Flatten faces and add 'new face' = 1 and 'stop' = 0 tokens.
faces = flatten_faces(faces)
# Discard degenerate meshes without faces.
return {
'vertices': vertices,
'faces': faces,
}
def load_process_mesh(mesh_obj_path, quantization_bits=8):
"""Load obj file and process."""
# Load mesh
vertices, faces = read_obj(mesh_obj_path)
return process_mesh(vertices, faces, quantization_bits)
def plot_meshes(mesh_list,
ax_lims=0.3,
fig_size=4,
el=30,
rot_start=120,
vert_size=10,
vert_alpha=0.75,
n_cols=4):
"""Plots mesh data using matplotlib."""
n_plot = len(mesh_list)
n_cols = np.minimum(n_plot, n_cols)
n_rows = np.ceil(n_plot / n_cols).astype('int')
fig = plt.figure(figsize=(fig_size * n_cols, fig_size * n_rows))
for p_inc, mesh in enumerate(mesh_list):
for key in [
'vertices', 'faces', 'vertices_conditional', 'pointcloud', 'class_name'
]:
if key not in list(mesh.keys()):
mesh[key] = None
ax = fig.add_subplot(n_rows, n_cols, p_inc + 1, projection='3d')
if mesh['faces'] is not None:
if mesh['vertices_conditional'] is not None:
face_verts = np.concatenate(
[mesh['vertices_conditional'], mesh['vertices']], axis=0)
else:
face_verts = mesh['vertices']
collection = []
for f in mesh['faces']:
collection.append(face_verts[f])
plt_mesh = Poly3DCollection(collection)
plt_mesh.set_edgecolor((0., 0., 0., 0.3))
plt_mesh.set_facecolor((1, 0, 0, 0.2))
ax.add_collection3d(plt_mesh)
if mesh['vertices'] is not None:
ax.scatter3D(
mesh['vertices'][:, 0],
mesh['vertices'][:, 1],
mesh['vertices'][:, 2],
lw=0.,
s=vert_size,
c='g',
alpha=vert_alpha)
if mesh['vertices_conditional'] is not None:
ax.scatter3D(
mesh['vertices_conditional'][:, 0],
mesh['vertices_conditional'][:, 1],
mesh['vertices_conditional'][:, 2],
lw=0.,
s=vert_size,
c='b',
alpha=vert_alpha)
if mesh['pointcloud'] is not None:
ax.scatter3D(
mesh['pointcloud'][:, 0],
mesh['pointcloud'][:, 1],
mesh['pointcloud'][:, 2],
lw=0.,
s=2.5 * vert_size,
c='b',
alpha=1.)
ax.set_xlim(-ax_lims, ax_lims)
ax.set_ylim(-ax_lims, ax_lims)
ax.set_zlim(-ax_lims, ax_lims)
ax.view_init(el, rot_start)
display_string = ''
if mesh['faces'] is not None:
display_string += 'Num. faces: {}\n'.format(len(collection))
if mesh['vertices'] is not None:
num_verts = mesh['vertices'].shape[0]
if mesh['vertices_conditional'] is not None:
num_verts += mesh['vertices_conditional'].shape[0]
display_string += 'Num. verts: {}\n'.format(num_verts)
if mesh['class_name'] is not None:
display_string += 'Synset: {}'.format(mesh['class_name'])
if mesh['pointcloud'] is not None:
display_string += 'Num. pointcloud: {}\n'.format(
mesh['pointcloud'].shape[0])
ax.text2D(0.05, 0.8, display_string, transform=ax.transAxes)
plt.subplots_adjust(
left=0., right=1., bottom=0., top=1., wspace=0.025, hspace=0.025)
plt.show()
| deepmind-research-master | polygen/data_utils.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules and networks for mesh generation."""
import sonnet as snt
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
import tensorflow.compat.v1 as tf
from tensorflow.python.framework import function
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
def dequantize_verts(verts, n_bits, add_noise=False):
"""Quantizes vertices and outputs integers with specified n_bits."""
min_range = -0.5
max_range = 0.5
range_quantize = 2**n_bits - 1
verts = tf.cast(verts, tf.float32)
verts = verts * (max_range - min_range) / range_quantize + min_range
if add_noise:
verts += tf.random_uniform(tf.shape(verts)) * (1 / float(range_quantize))
return verts
def quantize_verts(verts, n_bits):
"""Dequantizes integer vertices to floats."""
min_range = -0.5
max_range = 0.5
range_quantize = 2**n_bits - 1
verts_quantize = (
(verts - min_range) * range_quantize / (max_range - min_range))
return tf.cast(verts_quantize, tf.int32)
def top_k_logits(logits, k):
"""Masks logits such that logits not in top-k are small."""
if k == 0:
return logits
else:
values, _ = tf.math.top_k(logits, k=k)
k_largest = tf.reduce_min(values)
logits = tf.where(tf.less_equal(logits, k_largest),
tf.ones_like(logits)*-1e9, logits)
return logits
def top_p_logits(logits, p):
"""Masks logits using nucleus (top-p) sampling."""
if p == 1:
return logits
else:
logit_shape = tf.shape(logits)
seq, dim = logit_shape[1], logit_shape[2]
logits = tf.reshape(logits, [-1, dim])
sort_indices = tf.argsort(logits, axis=-1, direction='DESCENDING')
probs = tf.gather(tf.nn.softmax(logits), sort_indices, batch_dims=1)
cumprobs = tf.cumsum(probs, axis=-1, exclusive=True)
# The top 1 candidate always will not be masked.
# This way ensures at least 1 indices will be selected.
sort_mask = tf.cast(tf.greater(cumprobs, p), logits.dtype)
batch_indices = tf.tile(
tf.expand_dims(tf.range(tf.shape(logits)[0]), axis=-1), [1, dim])
top_p_mask = tf.scatter_nd(
tf.stack([batch_indices, sort_indices], axis=-1), sort_mask,
tf.shape(logits))
logits -= top_p_mask * 1e9
return tf.reshape(logits, [-1, seq, dim])
_function_cache = {} # For multihead_self_attention_memory_efficient
def multihead_self_attention_memory_efficient(x,
bias,
num_heads,
head_size=None,
cache=None,
epsilon=1e-6,
forget=True,
test_vars=None,
name=None):
"""Memory-efficient Multihead scaled-dot-product self-attention.
Based on Tensor2Tensor version but adds optional caching.
Returns multihead-self-attention(layer_norm(x))
Computes one attention head at a time to avoid exhausting memory.
If forget=True, then forget all forwards activations and recompute on
the backwards pass.
Args:
x: a Tensor with shape [batch, length, input_size]
bias: an attention bias tensor broadcastable to [batch, 1, length, length]
num_heads: an integer
head_size: an optional integer - defaults to input_size/num_heads
cache: Optional dict containing tensors which are the results of previous
attentions, used for fast decoding. Expects the dict to contain two
keys ('k' and 'v'), for the initial call the values for these keys
should be empty Tensors of the appropriate shape.
'k' [batch_size, 0, key_channels] 'v' [batch_size, 0, value_channels]
epsilon: a float, for layer norm
forget: a boolean - forget forwards activations and recompute on backprop
test_vars: optional tuple of variables for testing purposes
name: an optional string
Returns:
A Tensor.
"""
io_size = x.get_shape().as_list()[-1]
if head_size is None:
assert io_size % num_heads == 0
head_size = io_size / num_heads
def forward_internal(x, wqkv, wo, attention_bias, norm_scale, norm_bias):
"""Forward function."""
n = common_layers.layer_norm_compute(x, epsilon, norm_scale, norm_bias)
wqkv_split = tf.unstack(wqkv, num=num_heads)
wo_split = tf.unstack(wo, num=num_heads)
y = 0
if cache is not None:
cache_k = []
cache_v = []
for h in range(num_heads):
with tf.control_dependencies([y] if h > 0 else []):
combined = tf.nn.conv1d(n, wqkv_split[h], 1, 'SAME')
q, k, v = tf.split(combined, 3, axis=2)
if cache is not None:
k = tf.concat([cache['k'][:, h], k], axis=1)
v = tf.concat([cache['v'][:, h], v], axis=1)
cache_k.append(k)
cache_v.append(v)
o = common_attention.scaled_dot_product_attention_simple(
q, k, v, attention_bias)
y += tf.nn.conv1d(o, wo_split[h], 1, 'SAME')
if cache is not None:
cache['k'] = tf.stack(cache_k, axis=1)
cache['v'] = tf.stack(cache_v, axis=1)
return y
key = (
'multihead_self_attention_memory_efficient %s %s' % (num_heads, epsilon))
if not forget:
forward_fn = forward_internal
elif key in _function_cache:
forward_fn = _function_cache[key]
else:
@function.Defun(compiled=True)
def grad_fn(x, wqkv, wo, attention_bias, norm_scale, norm_bias, dy):
"""Custom gradient function."""
with tf.control_dependencies([dy]):
n = common_layers.layer_norm_compute(x, epsilon, norm_scale, norm_bias)
wqkv_split = tf.unstack(wqkv, num=num_heads)
wo_split = tf.unstack(wo, num=num_heads)
deps = []
dwqkvs = []
dwos = []
dn = 0
for h in range(num_heads):
with tf.control_dependencies(deps):
combined = tf.nn.conv1d(n, wqkv_split[h], 1, 'SAME')
q, k, v = tf.split(combined, 3, axis=2)
o = common_attention.scaled_dot_product_attention_simple(
q, k, v, attention_bias)
partial_y = tf.nn.conv1d(o, wo_split[h], 1, 'SAME')
pdn, dwqkvh, dwoh = tf.gradients(
ys=[partial_y],
xs=[n, wqkv_split[h], wo_split[h]],
grad_ys=[dy])
dn += pdn
dwqkvs.append(dwqkvh)
dwos.append(dwoh)
deps = [dn, dwqkvh, dwoh]
dwqkv = tf.stack(dwqkvs)
dwo = tf.stack(dwos)
with tf.control_dependencies(deps):
dx, dnorm_scale, dnorm_bias = tf.gradients(
ys=[n], xs=[x, norm_scale, norm_bias], grad_ys=[dn])
return (dx, dwqkv, dwo, tf.zeros_like(attention_bias), dnorm_scale,
dnorm_bias)
@function.Defun(
grad_func=grad_fn, compiled=True, separate_compiled_gradients=True)
def forward_fn(x, wqkv, wo, attention_bias, norm_scale, norm_bias):
return forward_internal(x, wqkv, wo, attention_bias, norm_scale,
norm_bias)
_function_cache[key] = forward_fn
if bias is not None:
bias = tf.squeeze(bias, 1)
with tf.variable_scope(name, default_name='multihead_attention', values=[x]):
if test_vars is not None:
wqkv, wo, norm_scale, norm_bias = list(test_vars)
else:
wqkv = tf.get_variable(
'wqkv', [num_heads, 1, io_size, 3 * head_size],
initializer=tf.random_normal_initializer(stddev=io_size**-0.5))
wo = tf.get_variable(
'wo', [num_heads, 1, head_size, io_size],
initializer=tf.random_normal_initializer(
stddev=(head_size * num_heads)**-0.5))
norm_scale, norm_bias = common_layers.layer_norm_vars(io_size)
y = forward_fn(x, wqkv, wo, bias, norm_scale, norm_bias)
y.set_shape(x.get_shape()) # pytype: disable=attribute-error
return y
class TransformerEncoder(snt.AbstractModule):
"""Transformer encoder.
Sonnet Transformer encoder module as described in Vaswani et al. 2017. Uses
the Tensor2Tensor multihead_attention function for full self attention
(no masking). Layer norm is applied inside the residual path as in sparse
transformers (Child 2019).
This module expects inputs to be already embedded, and does not add position
embeddings.
"""
def __init__(self,
hidden_size=256,
fc_size=1024,
num_heads=4,
layer_norm=True,
num_layers=8,
dropout_rate=0.2,
re_zero=True,
memory_efficient=False,
name='transformer_encoder'):
"""Initializes TransformerEncoder.
Args:
hidden_size: Size of embedding vectors.
fc_size: Size of fully connected layer.
num_heads: Number of attention heads.
layer_norm: If True, apply layer normalization
num_layers: Number of Transformer blocks, where each block contains a
multi-head attention layer and a MLP.
dropout_rate: Dropout rate applied immediately after the ReLU in each
fully-connected layer.
re_zero: If True, alpha scale residuals with zero init.
memory_efficient: If True, recompute gradients for memory savings.
name: Name of variable scope
"""
super(TransformerEncoder, self).__init__(name=name)
self.hidden_size = hidden_size
self.num_heads = num_heads
self.layer_norm = layer_norm
self.fc_size = fc_size
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.re_zero = re_zero
self.memory_efficient = memory_efficient
def _build(self, inputs, is_training=False):
"""Passes inputs through Transformer encoder network.
Args:
inputs: Tensor of shape [batch_size, sequence_length, embed_size]. Zero
embeddings are masked in self-attention.
is_training: If True, dropout is applied.
Returns:
output: Tensor of shape [batch_size, sequence_length, embed_size].
"""
if is_training:
dropout_rate = self.dropout_rate
else:
dropout_rate = 0.
# Identify elements with all zeros as padding, and create bias to mask
# out padding elements in self attention.
encoder_padding = common_attention.embedding_to_padding(inputs)
encoder_self_attention_bias = (
common_attention.attention_bias_ignore_padding(encoder_padding))
x = inputs
for layer_num in range(self.num_layers):
with tf.variable_scope('layer_{}'.format(layer_num)):
# Multihead self-attention from Tensor2Tensor.
res = x
if self.memory_efficient:
res = multihead_self_attention_memory_efficient(
res,
bias=encoder_self_attention_bias,
num_heads=self.num_heads,
head_size=self.hidden_size // self.num_heads,
forget=True if is_training else False,
name='self_attention'
)
else:
if self.layer_norm:
res = common_layers.layer_norm(res, name='self_attention')
res = common_attention.multihead_attention(
res,
memory_antecedent=None,
bias=encoder_self_attention_bias,
total_key_depth=self.hidden_size,
total_value_depth=self.hidden_size,
output_depth=self.hidden_size,
num_heads=self.num_heads,
dropout_rate=0.,
make_image_summary=False,
name='self_attention')
if self.re_zero:
res *= tf.get_variable('self_attention/alpha', initializer=0.)
if dropout_rate:
res = tf.nn.dropout(res, rate=dropout_rate)
x += res
# MLP
res = x
if self.layer_norm:
res = common_layers.layer_norm(res, name='fc')
res = tf.layers.dense(
res, self.fc_size, activation=tf.nn.relu, name='fc_1')
res = tf.layers.dense(res, self.hidden_size, name='fc_2')
if self.re_zero:
res *= tf.get_variable('fc/alpha', initializer=0.)
if dropout_rate:
res = tf.nn.dropout(res, rate=dropout_rate)
x += res
if self.layer_norm:
output = common_layers.layer_norm(x, name='output')
else:
output = x
return output
class TransformerDecoder(snt.AbstractModule):
"""Transformer decoder.
Sonnet Transformer decoder module as described in Vaswani et al. 2017. Uses
the Tensor2Tensor multihead_attention function for masked self attention, and
non-masked cross attention attention. Layer norm is applied inside the
residual path as in sparse transformers (Child 2019).
This module expects inputs to be already embedded, and does not
add position embeddings.
"""
def __init__(self,
hidden_size=256,
fc_size=1024,
num_heads=4,
layer_norm=True,
num_layers=8,
dropout_rate=0.2,
re_zero=True,
memory_efficient=False,
name='transformer_decoder'):
"""Initializes TransformerDecoder.
Args:
hidden_size: Size of embedding vectors.
fc_size: Size of fully connected layer.
num_heads: Number of attention heads.
layer_norm: If True, apply layer normalization. If mem_efficient_attention
is True, then layer norm is always applied.
num_layers: Number of Transformer blocks, where each block contains a
multi-head attention layer and a MLP.
dropout_rate: Dropout rate applied immediately after the ReLU in each
fully-connected layer.
re_zero: If True, alpha scale residuals with zero init.
memory_efficient: If True, recompute gradients for memory savings.
name: Name of variable scope
"""
super(TransformerDecoder, self).__init__(name=name)
self.hidden_size = hidden_size
self.num_heads = num_heads
self.layer_norm = layer_norm
self.fc_size = fc_size
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.re_zero = re_zero
self.memory_efficient = memory_efficient
def _build(self,
inputs,
sequential_context_embeddings=None,
is_training=False,
cache=None):
"""Passes inputs through Transformer decoder network.
Args:
inputs: Tensor of shape [batch_size, sequence_length, embed_size]. Zero
embeddings are masked in self-attention.
sequential_context_embeddings: Optional tensor with global context
(e.g image embeddings) of shape
[batch_size, context_seq_length, context_embed_size].
is_training: If True, dropout is applied.
cache: Optional dict containing tensors which are the results of previous
attentions, used for fast decoding. Expects the dict to contain two
keys ('k' and 'v'), for the initial call the values for these keys
should be empty Tensors of the appropriate shape.
'k' [batch_size, 0, key_channels] 'v' [batch_size, 0, value_channels]
Returns:
output: Tensor of shape [batch_size, sequence_length, embed_size].
"""
if is_training:
dropout_rate = self.dropout_rate
else:
dropout_rate = 0.
# create bias to mask future elements for causal self-attention.
seq_length = tf.shape(inputs)[1]
decoder_self_attention_bias = common_attention.attention_bias_lower_triangle(
seq_length)
# If using sequential_context, identify elements with all zeros as padding,
# and create bias to mask out padding elements in self attention.
if sequential_context_embeddings is not None:
encoder_padding = common_attention.embedding_to_padding(
sequential_context_embeddings)
encoder_decoder_attention_bias = (
common_attention.attention_bias_ignore_padding(encoder_padding))
x = inputs
for layer_num in range(self.num_layers):
with tf.variable_scope('layer_{}'.format(layer_num)):
# If using cached decoding, access cache for current layer, and create
# bias that enables un-masked attention into the cache
if cache is not None:
layer_cache = cache[layer_num]
layer_decoder_bias = tf.zeros([1, 1, 1, 1])
# Otherwise use standard masked bias
else:
layer_cache = None
layer_decoder_bias = decoder_self_attention_bias
# Multihead self-attention from Tensor2Tensor.
res = x
if self.memory_efficient:
res = multihead_self_attention_memory_efficient(
res,
bias=layer_decoder_bias,
cache=layer_cache,
num_heads=self.num_heads,
head_size=self.hidden_size // self.num_heads,
forget=True if is_training else False,
name='self_attention'
)
else:
if self.layer_norm:
res = common_layers.layer_norm(res, name='self_attention')
res = common_attention.multihead_attention(
res,
memory_antecedent=None,
bias=layer_decoder_bias,
total_key_depth=self.hidden_size,
total_value_depth=self.hidden_size,
output_depth=self.hidden_size,
num_heads=self.num_heads,
cache=layer_cache,
dropout_rate=0.,
make_image_summary=False,
name='self_attention')
if self.re_zero:
res *= tf.get_variable('self_attention/alpha', initializer=0.)
if dropout_rate:
res = tf.nn.dropout(res, rate=dropout_rate)
x += res
# Optional cross attention into sequential context
if sequential_context_embeddings is not None:
res = x
if self.layer_norm:
res = common_layers.layer_norm(res, name='cross_attention')
res = common_attention.multihead_attention(
res,
memory_antecedent=sequential_context_embeddings,
bias=encoder_decoder_attention_bias,
total_key_depth=self.hidden_size,
total_value_depth=self.hidden_size,
output_depth=self.hidden_size,
num_heads=self.num_heads,
dropout_rate=0.,
make_image_summary=False,
name='cross_attention')
if self.re_zero:
res *= tf.get_variable('cross_attention/alpha', initializer=0.)
if dropout_rate:
res = tf.nn.dropout(res, rate=dropout_rate)
x += res
# FC layers
res = x
if self.layer_norm:
res = common_layers.layer_norm(res, name='fc')
res = tf.layers.dense(
res, self.fc_size, activation=tf.nn.relu, name='fc_1')
res = tf.layers.dense(res, self.hidden_size, name='fc_2')
if self.re_zero:
res *= tf.get_variable('fc/alpha', initializer=0.)
if dropout_rate:
res = tf.nn.dropout(res, rate=dropout_rate)
x += res
if self.layer_norm:
output = common_layers.layer_norm(x, name='output')
else:
output = x
return output
def create_init_cache(self, batch_size):
"""Creates empty cache dictionary for use in fast decoding."""
def compute_cache_shape_invariants(tensor):
"""Helper function to get dynamic shapes for cache tensors."""
shape_list = tensor.shape.as_list()
if len(shape_list) == 4:
return tf.TensorShape(
[shape_list[0], shape_list[1], None, shape_list[3]])
elif len(shape_list) == 3:
return tf.TensorShape([shape_list[0], None, shape_list[2]])
# Build cache
k = common_attention.split_heads(
tf.zeros([batch_size, 0, self.hidden_size]), self.num_heads)
v = common_attention.split_heads(
tf.zeros([batch_size, 0, self.hidden_size]), self.num_heads)
cache = [{'k': k, 'v': v} for _ in range(self.num_layers)]
shape_invariants = tf.nest.map_structure(
compute_cache_shape_invariants, cache)
return cache, shape_invariants
def conv_residual_block(inputs,
output_channels=None,
downsample=False,
kernel_size=3,
re_zero=True,
dropout_rate=0.,
name='conv_residual_block'):
"""Convolutional block with residual connections for 2D or 3D inputs.
Args:
inputs: Input tensor of shape [batch_size, height, width, channels] or
[batch_size, height, width, depth, channels].
output_channels: Number of output channels.
downsample: If True, downsample by 1/2 in this block.
kernel_size: Spatial size of convolutional kernels.
re_zero: If True, alpha scale residuals with zero init.
dropout_rate: Dropout rate applied after second ReLU in residual path.
name: Name for variable scope.
Returns:
outputs: Output tensor of shape [batch_size, height, width, output_channels]
or [batch_size, height, width, depth, output_channels].
"""
with tf.variable_scope(name):
input_shape = inputs.get_shape().as_list()
num_dims = len(input_shape) - 2
if num_dims == 2:
conv = tf.layers.conv2d
elif num_dims == 3:
conv = tf.layers.conv3d
input_channels = input_shape[-1]
if output_channels is None:
output_channels = input_channels
if downsample:
shortcut = conv(
inputs,
filters=output_channels,
strides=2,
kernel_size=kernel_size,
padding='same',
name='conv_shortcut')
else:
shortcut = inputs
res = inputs
res = tf.nn.relu(res)
res = conv(
res, filters=input_channels, kernel_size=kernel_size, padding='same',
name='conv_1')
res = tf.nn.relu(res)
if dropout_rate:
res = tf.nn.dropout(res, rate=dropout_rate)
if downsample:
out_strides = 2
else:
out_strides = 1
res = conv(
res,
filters=output_channels,
kernel_size=kernel_size,
padding='same',
strides=out_strides,
name='conv_2')
if re_zero:
res *= tf.get_variable('alpha', initializer=0.)
return shortcut + res
class ResNet(snt.AbstractModule):
"""ResNet architecture for 2D image or 3D voxel inputs."""
def __init__(self,
num_dims,
hidden_sizes=(64, 256),
num_blocks=(2, 2),
dropout_rate=0.1,
re_zero=True,
name='res_net'):
"""Initializes ResNet.
Args:
num_dims: Number of spatial dimensions. 2 for images or 3 for voxels.
hidden_sizes: Sizes of hidden layers in resnet blocks.
num_blocks: Number of resnet blocks at each size.
dropout_rate: Dropout rate applied immediately after the ReLU in each
fully-connected layer.
re_zero: If True, alpha scale residuals with zero init.
name: Name of variable scope
"""
super(ResNet, self).__init__(name=name)
self.num_dims = num_dims
self.hidden_sizes = hidden_sizes
self.num_blocks = num_blocks
self.dropout_rate = dropout_rate
self.re_zero = re_zero
def _build(self, inputs, is_training=False):
"""Passes inputs through resnet.
Args:
inputs: Tensor of shape [batch_size, height, width, channels] or
[batch_size, height, width, depth, channels].
is_training: If True, dropout is applied.
Returns:
output: Tensor of shape [batch_size, height, width, depth, output_size].
"""
if is_training:
dropout_rate = self.dropout_rate
else:
dropout_rate = 0.
# Initial projection with large kernel as in original resnet architecture
if self.num_dims == 3:
conv = tf.layers.conv3d
elif self.num_dims == 2:
conv = tf.layers.conv2d
x = conv(
inputs,
filters=self.hidden_sizes[0],
kernel_size=7,
strides=2,
padding='same',
name='conv_input')
if self.num_dims == 2:
x = tf.layers.max_pooling2d(
x, strides=2, pool_size=3, padding='same', name='pool_input')
for d, (hidden_size,
blocks) in enumerate(zip(self.hidden_sizes, self.num_blocks)):
with tf.variable_scope('resolution_{}'.format(d)):
# Downsample at the start of each collection of blocks
x = conv_residual_block(
x,
downsample=False if d == 0 else True,
dropout_rate=dropout_rate,
output_channels=hidden_size,
re_zero=self.re_zero,
name='block_1_downsample')
for i in range(blocks - 1):
x = conv_residual_block(
x,
dropout_rate=dropout_rate,
output_channels=hidden_size,
re_zero=self.re_zero,
name='block_{}'.format(i + 2))
return x
class VertexModel(snt.AbstractModule):
"""Autoregressive generative model of quantized mesh vertices.
Operates on flattened vertex sequences with a stopping token:
[z_0, y_0, x_0, z_1, y_1, x_1, ..., z_n, y_n, z_n, STOP]
Input vertex coordinates are embedded and tagged with learned coordinate and
position indicators. A transformer decoder outputs logits for a quantized
vertex distribution.
"""
def __init__(self,
decoder_config,
quantization_bits,
class_conditional=False,
num_classes=55,
max_num_input_verts=2500,
use_discrete_embeddings=True,
name='vertex_model'):
"""Initializes VertexModel.
Args:
decoder_config: Dictionary with TransformerDecoder config
quantization_bits: Number of quantization used in mesh preprocessing.
class_conditional: If True, then condition on learned class embeddings.
num_classes: Number of classes to condition on.
max_num_input_verts: Maximum number of vertices. Used for learned position
embeddings.
use_discrete_embeddings: If True, use discrete rather than continuous
vertex embeddings.
name: Name of variable scope
"""
super(VertexModel, self).__init__(name=name)
self.embedding_dim = decoder_config['hidden_size']
self.class_conditional = class_conditional
self.num_classes = num_classes
self.max_num_input_verts = max_num_input_verts
self.quantization_bits = quantization_bits
self.use_discrete_embeddings = use_discrete_embeddings
with self._enter_variable_scope():
self.decoder = TransformerDecoder(**decoder_config)
@snt.reuse_variables
def _embed_class_label(self, labels):
"""Embeds class label with learned embedding matrix."""
init_dict = {'embeddings': tf.glorot_uniform_initializer}
return snt.Embed(
vocab_size=self.num_classes,
embed_dim=self.embedding_dim,
initializers=init_dict,
densify_gradients=True,
name='class_label')(labels)
@snt.reuse_variables
def _prepare_context(self, context, is_training=False):
"""Prepare class label context."""
if self.class_conditional:
global_context_embedding = self._embed_class_label(context['class_label'])
else:
global_context_embedding = None
return global_context_embedding, None
@snt.reuse_variables
def _embed_inputs(self, vertices, global_context_embedding=None):
"""Embeds flat vertices and adds position and coordinate information."""
# Dequantize inputs and get shapes
input_shape = tf.shape(vertices)
batch_size, seq_length = input_shape[0], input_shape[1]
# Coord indicators (x, y, z)
coord_embeddings = snt.Embed(
vocab_size=3,
embed_dim=self.embedding_dim,
initializers={'embeddings': tf.glorot_uniform_initializer},
densify_gradients=True,
name='coord_embeddings')(tf.mod(tf.range(seq_length), 3))
# Position embeddings
pos_embeddings = snt.Embed(
vocab_size=self.max_num_input_verts,
embed_dim=self.embedding_dim,
initializers={'embeddings': tf.glorot_uniform_initializer},
densify_gradients=True,
name='coord_embeddings')(tf.floordiv(tf.range(seq_length), 3))
# Discrete vertex value embeddings
if self.use_discrete_embeddings:
vert_embeddings = snt.Embed(
vocab_size=2**self.quantization_bits + 1,
embed_dim=self.embedding_dim,
initializers={'embeddings': tf.glorot_uniform_initializer},
densify_gradients=True,
name='value_embeddings')(vertices)
# Continuous vertex value embeddings
else:
vert_embeddings = tf.layers.dense(
dequantize_verts(vertices[..., None], self.quantization_bits),
self.embedding_dim,
use_bias=True,
name='value_embeddings')
# Step zero embeddings
if global_context_embedding is None:
zero_embed = tf.get_variable(
'embed_zero', shape=[1, 1, self.embedding_dim])
zero_embed_tiled = tf.tile(zero_embed, [batch_size, 1, 1])
else:
zero_embed_tiled = global_context_embedding[:, None]
# Aggregate embeddings
embeddings = vert_embeddings + (coord_embeddings + pos_embeddings)[None]
embeddings = tf.concat([zero_embed_tiled, embeddings], axis=1)
return embeddings
@snt.reuse_variables
def _project_to_logits(self, inputs):
"""Projects transformer outputs to logits for predictive distribution."""
return tf.layers.dense(
inputs,
2**self.quantization_bits + 1, # + 1 for stopping token
use_bias=True,
kernel_initializer=tf.zeros_initializer(),
name='project_to_logits')
@snt.reuse_variables
def _create_dist(self,
vertices,
global_context_embedding=None,
sequential_context_embeddings=None,
temperature=1.,
top_k=0,
top_p=1.,
is_training=False,
cache=None):
"""Outputs categorical dist for quantized vertex coordinates."""
# Embed inputs
decoder_inputs = self._embed_inputs(vertices, global_context_embedding)
if cache is not None:
decoder_inputs = decoder_inputs[:, -1:]
# pass through decoder
outputs = self.decoder(
decoder_inputs, cache=cache,
sequential_context_embeddings=sequential_context_embeddings,
is_training=is_training)
# Get logits and optionally process for sampling
logits = self._project_to_logits(outputs)
logits /= temperature
logits = top_k_logits(logits, top_k)
logits = top_p_logits(logits, top_p)
cat_dist = tfd.Categorical(logits=logits)
return cat_dist
def _build(self, batch, is_training=False):
"""Pass batch through vertex model and get log probabilities under model.
Args:
batch: Dictionary containing:
'vertices_flat': int32 vertex tensors of shape [batch_size, seq_length].
is_training: If True, use dropout.
Returns:
pred_dist: tfd.Categorical predictive distribution with batch shape
[batch_size, seq_length].
"""
global_context, seq_context = self._prepare_context(
batch, is_training=is_training)
pred_dist = self._create_dist(
batch['vertices_flat'][:, :-1], # Last element not used for preds
global_context_embedding=global_context,
sequential_context_embeddings=seq_context,
is_training=is_training)
return pred_dist
def sample(self,
num_samples,
context=None,
max_sample_length=None,
temperature=1.,
top_k=0,
top_p=1.,
recenter_verts=True,
only_return_complete=True):
"""Autoregressive sampling with caching.
Args:
num_samples: Number of samples to produce.
context: Dictionary of context, such as class labels. See _prepare_context
for details.
max_sample_length: Maximum length of sampled vertex sequences. Sequences
that do not complete are truncated.
temperature: Scalar softmax temperature > 0.
top_k: Number of tokens to keep for top-k sampling.
top_p: Proportion of probability mass to keep for top-p sampling.
recenter_verts: If True, center vertex samples around origin. This should
be used if model is trained using shift augmentations.
only_return_complete: If True, only return completed samples. Otherwise
return all samples along with completed indicator.
Returns:
outputs: Output dictionary with fields:
'completed': Boolean tensor of shape [num_samples]. If True then
corresponding sample completed within max_sample_length.
'vertices': Tensor of samples with shape [num_samples, num_verts, 3].
'num_vertices': Tensor indicating number of vertices for each example
in padded vertex samples.
'vertices_mask': Tensor of shape [num_samples, num_verts] that masks
corresponding invalid elements in 'vertices'.
"""
# Obtain context for decoder
global_context, seq_context = self._prepare_context(
context, is_training=False)
# num_samples is the minimum value of num_samples and the batch size of
# context inputs (if present).
if global_context is not None:
num_samples = tf.minimum(num_samples, tf.shape(global_context)[0])
global_context = global_context[:num_samples]
if seq_context is not None:
seq_context = seq_context[:num_samples]
elif seq_context is not None:
num_samples = tf.minimum(num_samples, tf.shape(seq_context)[0])
seq_context = seq_context[:num_samples]
def _loop_body(i, samples, cache):
"""While-loop body for autoregression calculation."""
cat_dist = self._create_dist(
samples,
global_context_embedding=global_context,
sequential_context_embeddings=seq_context,
cache=cache,
temperature=temperature,
top_k=top_k,
top_p=top_p)
next_sample = cat_dist.sample()
samples = tf.concat([samples, next_sample], axis=1)
return i + 1, samples, cache
def _stopping_cond(i, samples, cache):
"""Stopping condition for sampling while-loop."""
del i, cache # Unused
return tf.reduce_any(tf.reduce_all(tf.not_equal(samples, 0), axis=-1))
# Initial values for loop variables
samples = tf.zeros([num_samples, 0], dtype=tf.int32)
max_sample_length = max_sample_length or self.max_num_input_verts
cache, cache_shape_invariants = self.decoder.create_init_cache(num_samples)
_, v, _ = tf.while_loop(
cond=_stopping_cond,
body=_loop_body,
loop_vars=(0, samples, cache),
shape_invariants=(tf.TensorShape([]), tf.TensorShape([None, None]),
cache_shape_invariants),
maximum_iterations=max_sample_length * 3 + 1,
back_prop=False,
parallel_iterations=1)
# Check if samples completed. Samples are complete if the stopping token
# is produced.
completed = tf.reduce_any(tf.equal(v, 0), axis=-1)
# Get the number of vertices in the sample. This requires finding the
# index of the stopping token. For complete samples use to argmax to get
# first nonzero index.
stop_index_completed = tf.argmax(
tf.cast(tf.equal(v, 0), tf.int32), axis=-1, output_type=tf.int32)
# For incomplete samples the stopping index is just the maximum index.
stop_index_incomplete = (
max_sample_length * 3 * tf.ones_like(stop_index_completed))
stop_index = tf.where(
completed, stop_index_completed, stop_index_incomplete)
num_vertices = tf.floordiv(stop_index, 3)
# Convert to 3D vertices by reshaping and re-ordering x -> y -> z
v = v[:, :(tf.reduce_max(num_vertices) * 3)] - 1
verts_dequantized = dequantize_verts(v, self.quantization_bits)
vertices = tf.reshape(verts_dequantized, [num_samples, -1, 3])
vertices = tf.stack(
[vertices[..., 2], vertices[..., 1], vertices[..., 0]], axis=-1)
# Pad samples to max sample length. This is required in order to concatenate
# Samples across different replicator instances. Pad with stopping tokens
# for incomplete samples.
pad_size = max_sample_length - tf.shape(vertices)[1]
vertices = tf.pad(vertices, [[0, 0], [0, pad_size], [0, 0]])
# 3D Vertex mask
vertices_mask = tf.cast(
tf.range(max_sample_length)[None] < num_vertices[:, None], tf.float32)
if recenter_verts:
vert_max = tf.reduce_max(
vertices - 1e10 * (1. - vertices_mask)[..., None], axis=1,
keepdims=True)
vert_min = tf.reduce_min(
vertices + 1e10 * (1. - vertices_mask)[..., None], axis=1,
keepdims=True)
vert_centers = 0.5 * (vert_max + vert_min)
vertices -= vert_centers
vertices *= vertices_mask[..., None]
if only_return_complete:
vertices = tf.boolean_mask(vertices, completed)
num_vertices = tf.boolean_mask(num_vertices, completed)
vertices_mask = tf.boolean_mask(vertices_mask, completed)
completed = tf.boolean_mask(completed, completed)
# Outputs
outputs = {
'completed': completed,
'vertices': vertices,
'num_vertices': num_vertices,
'vertices_mask': vertices_mask,
}
return outputs
class ImageToVertexModel(VertexModel):
"""Generative model of quantized mesh vertices with image conditioning.
Operates on flattened vertex sequences with a stopping token:
[z_0, y_0, x_0, z_1, y_1, x_1, ..., z_n, y_n, z_n, STOP]
Input vertex coordinates are embedded and tagged with learned coordinate and
position indicators. A transformer decoder outputs logits for a quantized
vertex distribution. Image inputs are encoded and used to condition the
vertex decoder.
"""
def __init__(self,
res_net_config,
decoder_config,
quantization_bits,
use_discrete_embeddings=True,
max_num_input_verts=2500,
name='image_to_vertex_model'):
"""Initializes VoxelToVertexModel.
Args:
res_net_config: Dictionary with ResNet config.
decoder_config: Dictionary with TransformerDecoder config.
quantization_bits: Number of quantization used in mesh preprocessing.
use_discrete_embeddings: If True, use discrete rather than continuous
vertex embeddings.
max_num_input_verts: Maximum number of vertices. Used for learned position
embeddings.
name: Name of variable scope
"""
super(ImageToVertexModel, self).__init__(
decoder_config=decoder_config,
quantization_bits=quantization_bits,
max_num_input_verts=max_num_input_verts,
use_discrete_embeddings=use_discrete_embeddings,
name=name)
with self._enter_variable_scope():
self.res_net = ResNet(num_dims=2, **res_net_config)
@snt.reuse_variables
def _prepare_context(self, context, is_training=False):
# Pass images through encoder
image_embeddings = self.res_net(
context['image'] - 0.5, is_training=is_training)
# Add 2D coordinate grid embedding
processed_image_resolution = tf.shape(image_embeddings)[1]
x = tf.linspace(-1., 1., processed_image_resolution)
image_coords = tf.stack(tf.meshgrid(x, x), axis=-1)
image_coord_embeddings = tf.layers.dense(
image_coords,
self.embedding_dim,
use_bias=True,
name='image_coord_embeddings')
image_embeddings += image_coord_embeddings[None]
# Reshape spatial grid to sequence
batch_size = tf.shape(image_embeddings)[0]
sequential_context_embedding = tf.reshape(
image_embeddings, [batch_size, -1, self.embedding_dim])
return None, sequential_context_embedding
class VoxelToVertexModel(VertexModel):
"""Generative model of quantized mesh vertices with voxel conditioning.
Operates on flattened vertex sequences with a stopping token:
[z_0, y_0, x_0, z_1, y_1, x_1, ..., z_n, y_n, z_n, STOP]
Input vertex coordinates are embedded and tagged with learned coordinate and
position indicators. A transformer decoder outputs logits for a quantized
vertex distribution. Image inputs are encoded and used to condition the
vertex decoder.
"""
def __init__(self,
res_net_config,
decoder_config,
quantization_bits,
use_discrete_embeddings=True,
max_num_input_verts=2500,
name='voxel_to_vertex_model'):
"""Initializes VoxelToVertexModel.
Args:
res_net_config: Dictionary with ResNet config.
decoder_config: Dictionary with TransformerDecoder config.
quantization_bits: Integer number of bits used for vertex quantization.
use_discrete_embeddings: If True, use discrete rather than continuous
vertex embeddings.
max_num_input_verts: Maximum number of vertices. Used for learned position
embeddings.
name: Name of variable scope
"""
super(VoxelToVertexModel, self).__init__(
decoder_config=decoder_config,
quantization_bits=quantization_bits,
max_num_input_verts=max_num_input_verts,
use_discrete_embeddings=use_discrete_embeddings,
name=name)
with self._enter_variable_scope():
self.res_net = ResNet(num_dims=3, **res_net_config)
@snt.reuse_variables
def _prepare_context(self, context, is_training=False):
# Embed binary input voxels
voxel_embeddings = snt.Embed(
vocab_size=2,
embed_dim=self.pre_embed_dim,
initializers={'embeddings': tf.glorot_uniform_initializer},
densify_gradients=True,
name='voxel_embeddings')(context['voxels'])
# Pass embedded voxels through voxel encoder
voxel_embeddings = self.res_net(
voxel_embeddings, is_training=is_training)
# Add 3D coordinate grid embedding
processed_voxel_resolution = tf.shape(voxel_embeddings)[1]
x = tf.linspace(-1., 1., processed_voxel_resolution)
voxel_coords = tf.stack(tf.meshgrid(x, x, x), axis=-1)
voxel_coord_embeddings = tf.layers.dense(
voxel_coords,
self.embedding_dim,
use_bias=True,
name='voxel_coord_embeddings')
voxel_embeddings += voxel_coord_embeddings[None]
# Reshape spatial grid to sequence
batch_size = tf.shape(voxel_embeddings)[0]
sequential_context_embedding = tf.reshape(
voxel_embeddings, [batch_size, -1, self.embedding_dim])
return None, sequential_context_embedding
class FaceModel(snt.AbstractModule):
"""Autoregressive generative model of n-gon meshes.
Operates on sets of input vertices as well as flattened face sequences with
new face and stopping tokens:
[f_0^0, f_0^1, f_0^2, NEW, f_1^0, f_1^1, ..., STOP]
Input vertices are encoded using a Transformer encoder.
Input face sequences are embedded and tagged with learned position indicators,
as well as their corresponding vertex embeddings. A transformer decoder
outputs a pointer which is compared to each vertex embedding to obtain a
distribution over vertex indices.
"""
def __init__(self,
encoder_config,
decoder_config,
class_conditional=True,
num_classes=55,
decoder_cross_attention=True,
use_discrete_vertex_embeddings=True,
quantization_bits=8,
max_seq_length=5000,
name='face_model'):
"""Initializes FaceModel.
Args:
encoder_config: Dictionary with TransformerEncoder config.
decoder_config: Dictionary with TransformerDecoder config.
class_conditional: If True, then condition on learned class embeddings.
num_classes: Number of classes to condition on.
decoder_cross_attention: If True, the use cross attention from decoder
querys into encoder outputs.
use_discrete_vertex_embeddings: If True, use discrete vertex embeddings.
quantization_bits: Number of quantization bits for discrete vertex
embeddings.
max_seq_length: Maximum face sequence length. Used for learned position
embeddings.
name: Name of variable scope
"""
super(FaceModel, self).__init__(name=name)
self.embedding_dim = decoder_config['hidden_size']
self.class_conditional = class_conditional
self.num_classes = num_classes
self.max_seq_length = max_seq_length
self.decoder_cross_attention = decoder_cross_attention
self.use_discrete_vertex_embeddings = use_discrete_vertex_embeddings
self.quantization_bits = quantization_bits
with self._enter_variable_scope():
self.decoder = TransformerDecoder(**decoder_config)
self.encoder = TransformerEncoder(**encoder_config)
@snt.reuse_variables
def _embed_class_label(self, labels):
"""Embeds class label with learned embedding matrix."""
init_dict = {'embeddings': tf.glorot_uniform_initializer}
return snt.Embed(
vocab_size=self.num_classes,
embed_dim=self.embedding_dim,
initializers=init_dict,
densify_gradients=True,
name='class_label')(labels)
@snt.reuse_variables
def _prepare_context(self, context, is_training=False):
"""Prepare class label and vertex context."""
if self.class_conditional:
global_context_embedding = self._embed_class_label(context['class_label'])
else:
global_context_embedding = None
vertex_embeddings = self._embed_vertices(
context['vertices'], context['vertices_mask'],
is_training=is_training)
if self.decoder_cross_attention:
sequential_context_embeddings = (
vertex_embeddings *
tf.pad(context['vertices_mask'], [[0, 0], [2, 0]],
constant_values=1)[..., None])
else:
sequential_context_embeddings = None
return (vertex_embeddings, global_context_embedding,
sequential_context_embeddings)
@snt.reuse_variables
def _embed_vertices(self, vertices, vertices_mask, is_training=False):
"""Embeds vertices with transformer encoder."""
# num_verts = tf.shape(vertices)[1]
if self.use_discrete_vertex_embeddings:
vertex_embeddings = 0.
verts_quantized = quantize_verts(vertices, self.quantization_bits)
for c in range(3):
vertex_embeddings += snt.Embed(
vocab_size=256,
embed_dim=self.embedding_dim,
initializers={'embeddings': tf.glorot_uniform_initializer},
densify_gradients=True,
name='coord_{}'.format(c))(verts_quantized[..., c])
else:
vertex_embeddings = tf.layers.dense(
vertices, self.embedding_dim, use_bias=True, name='vertex_embeddings')
vertex_embeddings *= vertices_mask[..., None]
# Pad vertex embeddings with learned embeddings for stopping and new face
# tokens
stopping_embeddings = tf.get_variable(
'stopping_embeddings', shape=[1, 2, self.embedding_dim])
stopping_embeddings = tf.tile(stopping_embeddings,
[tf.shape(vertices)[0], 1, 1])
vertex_embeddings = tf.concat(
[stopping_embeddings, vertex_embeddings], axis=1)
# Pass through Transformer encoder
vertex_embeddings = self.encoder(vertex_embeddings, is_training=is_training)
return vertex_embeddings
@snt.reuse_variables
def _embed_inputs(self, faces_long, vertex_embeddings,
global_context_embedding=None):
"""Embeds face sequences and adds within and between face positions."""
# Face value embeddings are gathered vertex embeddings
face_embeddings = tf.gather(vertex_embeddings, faces_long, batch_dims=1)
# Position embeddings
pos_embeddings = snt.Embed(
vocab_size=self.max_seq_length,
embed_dim=self.embedding_dim,
initializers={'embeddings': tf.glorot_uniform_initializer},
densify_gradients=True,
name='coord_embeddings')(tf.range(tf.shape(faces_long)[1]))
# Step zero embeddings
batch_size = tf.shape(face_embeddings)[0]
if global_context_embedding is None:
zero_embed = tf.get_variable(
'embed_zero', shape=[1, 1, self.embedding_dim])
zero_embed_tiled = tf.tile(zero_embed, [batch_size, 1, 1])
else:
zero_embed_tiled = global_context_embedding[:, None]
# Aggregate embeddings
embeddings = face_embeddings + pos_embeddings[None]
embeddings = tf.concat([zero_embed_tiled, embeddings], axis=1)
return embeddings
@snt.reuse_variables
def _project_to_pointers(self, inputs):
"""Projects transformer outputs to pointer vectors."""
return tf.layers.dense(
inputs,
self.embedding_dim,
use_bias=True,
kernel_initializer=tf.zeros_initializer(),
name='project_to_pointers'
)
@snt.reuse_variables
def _create_dist(self,
vertex_embeddings,
vertices_mask,
faces_long,
global_context_embedding=None,
sequential_context_embeddings=None,
temperature=1.,
top_k=0,
top_p=1.,
is_training=False,
cache=None):
"""Outputs categorical dist for vertex indices."""
# Embed inputs
decoder_inputs = self._embed_inputs(
faces_long, vertex_embeddings, global_context_embedding)
# Pass through Transformer decoder
if cache is not None:
decoder_inputs = decoder_inputs[:, -1:]
decoder_outputs = self.decoder(
decoder_inputs,
cache=cache,
sequential_context_embeddings=sequential_context_embeddings,
is_training=is_training)
# Get pointers
pred_pointers = self._project_to_pointers(decoder_outputs)
# Get logits and mask
logits = tf.matmul(pred_pointers, vertex_embeddings, transpose_b=True)
logits /= tf.sqrt(float(self.embedding_dim))
f_verts_mask = tf.pad(
vertices_mask, [[0, 0], [2, 0]], constant_values=1.)[:, None]
logits *= f_verts_mask
logits -= (1. - f_verts_mask) * 1e9
logits /= temperature
logits = top_k_logits(logits, top_k)
logits = top_p_logits(logits, top_p)
return tfd.Categorical(logits=logits)
def _build(self, batch, is_training=False):
"""Pass batch through face model and get log probabilities.
Args:
batch: Dictionary containing:
'vertices_dequantized': Tensor of shape [batch_size, num_vertices, 3].
'faces': int32 tensor of shape [batch_size, seq_length] with flattened
faces.
'vertices_mask': float32 tensor with shape
[batch_size, num_vertices] that masks padded elements in 'vertices'.
is_training: If True, use dropout.
Returns:
pred_dist: tfd.Categorical predictive distribution with batch shape
[batch_size, seq_length].
"""
vertex_embeddings, global_context, seq_context = self._prepare_context(
batch, is_training=is_training)
pred_dist = self._create_dist(
vertex_embeddings,
batch['vertices_mask'],
batch['faces'][:, :-1],
global_context_embedding=global_context,
sequential_context_embeddings=seq_context,
is_training=is_training)
return pred_dist
def sample(self,
context,
max_sample_length=None,
temperature=1.,
top_k=0,
top_p=1.,
only_return_complete=True):
"""Sample from face model using caching.
Args:
context: Dictionary of context, including 'vertices' and 'vertices_mask'.
See _prepare_context for details.
max_sample_length: Maximum length of sampled vertex sequences. Sequences
that do not complete are truncated.
temperature: Scalar softmax temperature > 0.
top_k: Number of tokens to keep for top-k sampling.
top_p: Proportion of probability mass to keep for top-p sampling.
only_return_complete: If True, only return completed samples. Otherwise
return all samples along with completed indicator.
Returns:
outputs: Output dictionary with fields:
'completed': Boolean tensor of shape [num_samples]. If True then
corresponding sample completed within max_sample_length.
'faces': Tensor of samples with shape [num_samples, num_verts, 3].
'num_face_indices': Tensor indicating number of vertices for each
example in padded vertex samples.
"""
vertex_embeddings, global_context, seq_context = self._prepare_context(
context, is_training=False)
num_samples = tf.shape(vertex_embeddings)[0]
def _loop_body(i, samples, cache):
"""While-loop body for autoregression calculation."""
pred_dist = self._create_dist(
vertex_embeddings,
context['vertices_mask'],
samples,
global_context_embedding=global_context,
sequential_context_embeddings=seq_context,
cache=cache,
temperature=temperature,
top_k=top_k,
top_p=top_p)
next_sample = pred_dist.sample()[:, -1:]
samples = tf.concat([samples, next_sample], axis=1)
return i + 1, samples, cache
def _stopping_cond(i, samples, cache):
"""Stopping conditions for autoregressive calculation."""
del i, cache # Unused
return tf.reduce_any(tf.reduce_all(tf.not_equal(samples, 0), axis=-1))
# While loop sampling with caching
samples = tf.zeros([num_samples, 0], dtype=tf.int32)
max_sample_length = max_sample_length or self.max_seq_length
cache, cache_shape_invariants = self.decoder.create_init_cache(num_samples)
_, f, _ = tf.while_loop(
cond=_stopping_cond,
body=_loop_body,
loop_vars=(0, samples, cache),
shape_invariants=(tf.TensorShape([]), tf.TensorShape([None, None]),
cache_shape_invariants),
back_prop=False,
parallel_iterations=1,
maximum_iterations=max_sample_length)
# Record completed samples
complete_samples = tf.reduce_any(tf.equal(f, 0), axis=-1)
# Find number of faces
sample_length = tf.shape(f)[-1]
# Get largest new face (1) index as stopping point for incomplete samples.
max_one_ind = tf.reduce_max(
tf.range(sample_length)[None] * tf.cast(tf.equal(f, 1), tf.int32),
axis=-1)
zero_inds = tf.cast(
tf.argmax(tf.cast(tf.equal(f, 0), tf.int32), axis=-1), tf.int32)
num_face_indices = tf.where(complete_samples, zero_inds, max_one_ind) + 1
# Mask faces beyond stopping token with zeros
# This mask has a -1 in order to replace the last new face token with zero
faces_mask = tf.cast(
tf.range(sample_length)[None] < num_face_indices[:, None] - 1, tf.int32)
f *= faces_mask
# This is the real mask
faces_mask = tf.cast(
tf.range(sample_length)[None] < num_face_indices[:, None], tf.int32)
# Pad to maximum size with zeros
pad_size = max_sample_length - sample_length
f = tf.pad(f, [[0, 0], [0, pad_size]])
if only_return_complete:
f = tf.boolean_mask(f, complete_samples)
num_face_indices = tf.boolean_mask(num_face_indices, complete_samples)
context = tf.nest.map_structure(
lambda x: tf.boolean_mask(x, complete_samples), context)
complete_samples = tf.boolean_mask(complete_samples, complete_samples)
# outputs
outputs = {
'context': context,
'completed': complete_samples,
'faces': f,
'num_face_indices': num_face_indices,
}
return outputs
| deepmind-research-master | polygen/modules.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Haiku module implementing hierarchical attention over memory."""
import functools
import inspect
from typing import Optional, NamedTuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
_EPSILON = 1e-3
class HierarchicalMemory(NamedTuple):
"""Structure of the hierarchical memory.
Where 'B' is batch size, 'M' is number of memories, 'C' is chunk size, and 'D'
is memory dimension.
"""
keys: jnp.ndarray # [B, M, D]
contents: jnp.ndarray # [B, M, C, D]
steps_since_last_write: jnp.ndarray # [B], steps since last memory write
accumulator: jnp.ndarray # [B, C, D], accumulates experiences before write
def sinusoid_position_encoding(
sequence_length: int,
hidden_size: int,
min_timescale: float = 2.,
max_timescale: float = 1e4,
) -> jnp.ndarray:
"""Creates sinusoidal encodings.
Args:
sequence_length: length [L] of sequence to be position encoded.
hidden_size: dimension [D] of the positional encoding vectors.
min_timescale: minimum timescale for the frequency.
max_timescale: maximum timescale for the frequency.
Returns:
An array of shape [L, D]
"""
freqs = np.arange(0, hidden_size, min_timescale)
inv_freq = max_timescale**(-freqs / hidden_size)
pos_seq = np.arange(sequence_length - 1, -1, -1.0)
sinusoid_inp = np.einsum("i,j->ij", pos_seq, inv_freq)
pos_emb = np.concatenate(
[np.sin(sinusoid_inp), np.cos(sinusoid_inp)], axis=-1)
return pos_emb
class HierarchicalMemoryAttention(hk.Module):
"""Multi-head attention over hierarchical memory."""
def __init__(self,
feature_size: int,
k: int,
num_heads: int = 1,
memory_position_encoding: bool = True,
init_scale: float = 2.,
name: Optional[str] = None) -> None:
"""Constructor.
Args:
feature_size: size of feature dimension of attention-over-memories
embedding.
k: number of memories to sample.
num_heads: number of attention heads.
memory_position_encoding: whether to add positional encodings to memories
during within memory attention.
init_scale: scale factor for Variance weight initializers.
name: module name.
"""
super().__init__(name=name)
self._size = feature_size
self._k = k
self._num_heads = num_heads
self._weights = None
self._memory_position_encoding = memory_position_encoding
self._init_scale = init_scale
@property
def num_heads(self):
return self._num_heads
@hk.transparent
def _singlehead_linear(self,
inputs: jnp.ndarray,
hidden_size: int,
name: str):
linear = hk.Linear(
hidden_size,
with_bias=False,
w_init=hk.initializers.VarianceScaling(scale=self._init_scale),
name=name)
out = linear(inputs)
return out
def __call__(
self,
queries: jnp.ndarray,
hm_memory: HierarchicalMemory,
hm_mask: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Do hierarchical attention over the stored memories.
Args:
queries: Tensor [B, Q, E] Query(ies) in, for batch size B, query length
Q, and embedding dimension E.
hm_memory: Hierarchical Memory.
hm_mask: Optional boolean mask tensor of shape [B, Q, M]. Where false,
the corresponding query timepoints cannot attend to the corresponding
memory chunks. This can be used for enforcing causal attention on the
learner, not attending to memories from prior episodes, etc.
Returns:
Value updates for each query slot: [B, Q, D]
"""
# some shape checks
batch_size, query_length, _ = queries.shape
(memory_batch_size, num_memories,
memory_chunk_size, mem_embbedding_size) = hm_memory.contents.shape
assert batch_size == memory_batch_size
chex.assert_shape(hm_memory.keys,
(batch_size, num_memories, mem_embbedding_size))
chex.assert_shape(hm_memory.accumulator,
(memory_batch_size, memory_chunk_size,
mem_embbedding_size))
chex.assert_shape(hm_memory.steps_since_last_write,
(memory_batch_size,))
if hm_mask is not None:
chex.assert_type(hm_mask, bool)
chex.assert_shape(hm_mask,
(batch_size, query_length, num_memories))
query_head = self._singlehead_linear(queries, self._size, "query")
key_head = self._singlehead_linear(
jax.lax.stop_gradient(hm_memory.keys), self._size, "key")
# What times in the input [t] attend to what times in the memories [T].
logits = jnp.einsum("btd,bTd->btT", query_head, key_head)
scaled_logits = logits / np.sqrt(self._size)
# Mask last dimension, replacing invalid logits with large negative values.
# This allows e.g. enforcing causal attention on learner, or blocking
# attention across episodes
if hm_mask is not None:
masked_logits = jnp.where(hm_mask, scaled_logits, -1e6)
else:
masked_logits = scaled_logits
# identify the top-k memories and their relevance weights
top_k_logits, top_k_indices = jax.lax.top_k(masked_logits, self._k)
weights = jax.nn.softmax(top_k_logits)
# set up the within-memory attention
assert self._size % self._num_heads == 0
mha_key_size = self._size // self._num_heads
attention_layer = hk.MultiHeadAttention(
key_size=mha_key_size,
model_size=self._size,
num_heads=self._num_heads,
w_init_scale=self._init_scale,
name="within_mem_attn")
# position encodings
augmented_contents = hm_memory.contents
if self._memory_position_encoding:
position_embs = sinusoid_position_encoding(
memory_chunk_size, mem_embbedding_size)
augmented_contents += position_embs[None, None, :, :]
def _within_memory_attention(sub_inputs, sub_memory_contents, sub_weights,
sub_top_k_indices):
top_k_contents = sub_memory_contents[sub_top_k_indices, :, :]
# Now we go deeper, with another vmap over **tokens**, because each token
# can each attend to different memories.
def do_attention(sub_sub_inputs, sub_sub_top_k_contents):
tiled_inputs = jnp.tile(sub_sub_inputs[None, None, :],
reps=(self._k, 1, 1))
sub_attention_results = attention_layer(
query=tiled_inputs,
key=sub_sub_top_k_contents,
value=sub_sub_top_k_contents)
return sub_attention_results
do_attention = hk_vmap(do_attention, in_axes=0, split_rng=False)
attention_results = do_attention(sub_inputs, top_k_contents)
attention_results = jnp.squeeze(attention_results, axis=2)
# Now collapse results across k memories
attention_results = sub_weights[:, :, None] * attention_results
attention_results = jnp.sum(attention_results, axis=1)
return attention_results
# vmap across batch
batch_within_memory_attention = hk_vmap(_within_memory_attention,
in_axes=0, split_rng=False)
outputs = batch_within_memory_attention(
queries,
jax.lax.stop_gradient(augmented_contents),
weights,
top_k_indices)
return outputs
@functools.wraps(hk.vmap)
def hk_vmap(*args, **kwargs):
"""Helper function to support older versions of Haiku."""
# Older versions of Haiku did not have split_rng, but the behavior has always
# been equivalent to split_rng=False.
if "split_rng" not in inspect.signature(hk.vmap).parameters:
kwargs.setdefault("split_rng", False)
if kwargs.get["split_rng"]:
raise ValueError("The installed version of Haiku only supports "
"`split_rng=False`, please upgrade Haiku.")
del kwargs["split_rng"]
return hk.vmap(*args, **kwargs)
| deepmind-research-master | hierarchical_transformer_memory/hierarchical_attention/htm_attention.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for hierarchical_attention.htm_attention."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import numpy as np
from hierarchical_transformer_memory.hierarchical_attention import htm_attention
def _build_queries_and_memory(query_length, num_memories, mem_chunk_size,
batch_size=2, embedding_size=12):
"""Builds dummy queries + memory contents for tests."""
queries = np.random.random([batch_size, query_length, embedding_size])
memory_contents = np.random.random(
[batch_size, num_memories, mem_chunk_size, embedding_size])
# summary key = average across chunk
memory_keys = np.mean(memory_contents, axis=2)
# to accumulate newest memories before writing
memory_accumulator = np.zeros_like(memory_contents[:, -1, :, :])
memory = htm_attention.HierarchicalMemory(
keys=memory_keys,
contents=memory_contents,
accumulator=memory_accumulator,
steps_since_last_write=np.zeros([batch_size,], dtype=np.int32))
return queries, memory
class HierarchicalAttentionTest(parameterized.TestCase):
@parameterized.parameters([
{
'query_length': 1,
'num_memories': 7,
'mem_chunk_size': 5,
'mem_k': 4,
},
{
'query_length': 9,
'num_memories': 7,
'mem_chunk_size': 5,
'mem_k': 4,
},
])
@hk.testing.transform_and_run
def test_output_shapes(self, query_length, num_memories, mem_chunk_size,
mem_k):
np.random.seed(0)
batch_size = 2
embedding_size = 12
num_heads = 3
queries, memory = _build_queries_and_memory(
query_length=query_length, num_memories=num_memories,
mem_chunk_size=mem_chunk_size, embedding_size=embedding_size)
hm_att = htm_attention.HierarchicalMemoryAttention(
feature_size=embedding_size,
k=mem_k,
num_heads=num_heads)
results = hm_att(queries, memory)
self.assertEqual(results.shape,
(batch_size, query_length, embedding_size))
self.assertTrue(np.all(np.isfinite(results)))
@hk.testing.transform_and_run
def test_masking(self):
np.random.seed(0)
batch_size = 2
embedding_size = 12
num_heads = 3
query_length = 5
num_memories = 7
mem_chunk_size = 6
mem_k = 4
queries, memory = _build_queries_and_memory(
query_length=query_length, num_memories=num_memories,
mem_chunk_size=mem_chunk_size, embedding_size=embedding_size)
hm_att = htm_attention.HierarchicalMemoryAttention(
feature_size=embedding_size,
k=mem_k,
num_heads=num_heads)
# get a random boolean mask
mask = np.random.binomial(
1, 0.5, [batch_size, query_length, num_memories]).astype(bool)
results = hm_att(queries, memory, hm_mask=mask)
self.assertEqual(results.shape,
(batch_size, query_length, embedding_size))
self.assertTrue(np.all(np.isfinite(results)))
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | hierarchical_transformer_memory/hierarchical_attention/htm_attention_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A pycolab environment for going to the ballet.
A pycolab-based environment for testing memory for sequences of events. The
environment contains some number of "dancer" characters in (implicit) 3 x 3
squares within a larger 9 x 9 room. The agent starts in the center of the room.
At the beginning of an episode, the dancers each do a dance solo of a fixed
length, separated by empty time of a fixed length. The agent's actions do
nothing during the dances. After the last dance ends, the agent must go up to a
dancer, identified using language describing the dance. The agent is rewarded +1
for approaching the correct dancer, 0 otherwise.
The room is upsampled at a size of 9 pixels per square to render a view for the
agent, which is cropped in egocentric perspective, i.e. the agent is always in
the center of its view (see https://arxiv.org/abs/1910.00571).
"""
from absl import app
from absl import flags
from absl import logging
import dm_env
import numpy as np
from pycolab import cropping
from hierarchical_transformer_memory.pycolab_ballet import ballet_environment_core as ballet_core
FLAGS = flags.FLAGS
UPSAMPLE_SIZE = 9 # pixels per game square
SCROLL_CROP_SIZE = 11 # in game squares
DANCER_SHAPES = [
"triangle", "empty_square", "plus", "inverse_plus", "ex", "inverse_ex",
"circle", "empty_circle", "tee", "upside_down_tee",
"h", "u", "upside_down_u", "vertical_stripes", "horizontal_stripes"
]
COLORS = {
"red": np.array([255, 0, 0]),
"green": np.array([0, 255, 0]),
"blue": np.array([0, 0, 255]),
"purple": np.array([128, 0, 128]),
"orange": np.array([255, 165, 0]),
"yellow": np.array([255, 255, 0]),
"brown": np.array([128, 64, 0]),
"pink": np.array([255, 64, 255]),
"cyan": np.array([0, 255, 255]),
"dark_green": np.array([0, 100, 0]),
"dark_red": np.array([100, 0, 0]),
"dark_blue": np.array([0, 0, 100]),
"olive": np.array([100, 100, 0]),
"teal": np.array([0, 100, 100]),
"lavender": np.array([215, 200, 255]),
"peach": np.array([255, 210, 170]),
"rose": np.array([255, 205, 230]),
"light_green": np.array([200, 255, 200]),
"light_yellow": np.array([255, 255, 200]),
}
def _generate_template(object_name):
"""Generates a template object image, given a name with color and shape."""
object_color, object_type = object_name.split()
template = np.zeros((UPSAMPLE_SIZE, UPSAMPLE_SIZE))
half = UPSAMPLE_SIZE // 2
if object_type == "triangle":
for i in range(UPSAMPLE_SIZE):
for j in range(UPSAMPLE_SIZE):
if (j <= half and i >= 2 * (half - j)) or (j > half and i >= 2 *
(j - half)):
template[i, j] = 1.
elif object_type == "square":
template[:, :] = 1.
elif object_type == "empty_square":
template[:2, :] = 1.
template[-2:, :] = 1.
template[:, :2] = 1.
template[:, -2:] = 1.
elif object_type == "plus":
template[:, half - 1:half + 2] = 1.
template[half - 1:half + 2, :] = 1.
elif object_type == "inverse_plus":
template[:, :] = 1.
template[:, half - 1:half + 2] = 0.
template[half - 1:half + 2, :] = 0.
elif object_type == "ex":
for i in range(UPSAMPLE_SIZE):
for j in range(UPSAMPLE_SIZE):
if abs(i - j) <= 1 or abs(UPSAMPLE_SIZE - 1 - j - i) <= 1:
template[i, j] = 1.
elif object_type == "inverse_ex":
for i in range(UPSAMPLE_SIZE):
for j in range(UPSAMPLE_SIZE):
if not (abs(i - j) <= 1 or abs(UPSAMPLE_SIZE - 1 - j - i) <= 1):
template[i, j] = 1.
elif object_type == "circle":
for i in range(UPSAMPLE_SIZE):
for j in range(UPSAMPLE_SIZE):
if (i - half)**2 + (j - half)**2 <= half**2:
template[i, j] = 1.
elif object_type == "empty_circle":
for i in range(UPSAMPLE_SIZE):
for j in range(UPSAMPLE_SIZE):
if abs((i - half)**2 + (j - half)**2 - half**2) < 6:
template[i, j] = 1.
elif object_type == "tee":
template[:, half - 1:half + 2] = 1.
template[:3, :] = 1.
elif object_type == "upside_down_tee":
template[:, half - 1:half + 2] = 1.
template[-3:, :] = 1.
elif object_type == "h":
template[:, :3] = 1.
template[:, -3:] = 1.
template[half - 1:half + 2, :] = 1.
elif object_type == "u":
template[:, :3] = 1.
template[:, -3:] = 1.
template[-3:, :] = 1.
elif object_type == "upside_down_u":
template[:, :3] = 1.
template[:, -3:] = 1.
template[:3, :] = 1.
elif object_type == "vertical_stripes":
for j in range(half + UPSAMPLE_SIZE % 2):
template[:, 2*j] = 1.
elif object_type == "horizontal_stripes":
for i in range(half + UPSAMPLE_SIZE % 2):
template[2*i, :] = 1.
else:
raise ValueError("Unknown object: {}".format(object_type))
if object_color not in COLORS:
raise ValueError("Unknown color: {}".format(object_color))
template = np.tensordot(template, COLORS[object_color], axes=0)
return template
# Agent and wall templates
_CHAR_TO_TEMPLATE_BASE = {
ballet_core.AGENT_CHAR:
np.tensordot(
np.ones([UPSAMPLE_SIZE, UPSAMPLE_SIZE]),
np.array([255, 255, 255]),
axes=0),
ballet_core.WALL_CHAR:
np.tensordot(
np.ones([UPSAMPLE_SIZE, UPSAMPLE_SIZE]),
np.array([40, 40, 40]),
axes=0),
}
def get_scrolling_cropper(rows=9, cols=9, crop_pad_char=" "):
return cropping.ScrollingCropper(rows=rows, cols=cols,
to_track=[ballet_core.AGENT_CHAR],
pad_char=crop_pad_char,
scroll_margins=(None, None))
class BalletEnvironment(dm_env.Environment):
"""A Python environment API for pycolab ballet tasks."""
def __init__(self, num_dancers, dance_delay, max_steps, rng=None):
"""Construct a BalletEnvironment that wraps pycolab games for agent use.
This class inherits from dm_env and has all the expected methods and specs.
Args:
num_dancers: The number of dancers to use, between 1 and 8 (inclusive).
dance_delay: How long to delay between the dances.
max_steps: The maximum number of steps to allow in an episode, after which
it will terminate.
rng: An optional numpy Random Generator, to set a fixed seed use e.g.
`rng=np.random.default_rng(seed=...)`
"""
self._num_dancers = num_dancers
self._dance_delay = dance_delay
self._max_steps = max_steps
# internal state
if rng is None:
rng = np.random.default_rng()
self._rng = rng
self._current_game = None # Current pycolab game instance.
self._state = None # Current game step state.
self._game_over = None # Whether the game has ended.
self._char_to_template = None # Mapping of chars to sprite images.
# rendering tools
self._cropper = get_scrolling_cropper(SCROLL_CROP_SIZE, SCROLL_CROP_SIZE,
" ")
def _game_factory(self):
"""Samples dancers and positions, returns a pycolab core game engine."""
target_dancer_index = self._rng.integers(self._num_dancers)
motions = list(ballet_core.DANCE_SEQUENCES.keys())
positions = ballet_core.DANCER_POSITIONS.copy()
colors = list(COLORS.keys())
shapes = DANCER_SHAPES.copy()
self._rng.shuffle(positions)
self._rng.shuffle(motions)
self._rng.shuffle(colors)
self._rng.shuffle(shapes)
dancers_and_properties = []
for dancer_i in range(self._num_dancers):
if dancer_i == target_dancer_index:
value = 1.
else:
value = 0.
dancers_and_properties.append(
(ballet_core.POSSIBLE_DANCER_CHARS[dancer_i],
positions[dancer_i],
motions[dancer_i],
shapes[dancer_i],
colors[dancer_i],
value))
logging.info("Making level with dancers_and_properties: %s",
dancers_and_properties)
return ballet_core.make_game(
dancers_and_properties=dancers_and_properties,
dance_delay=self._dance_delay)
def _render_observation(self, observation):
"""Renders from raw pycolab image observation to agent-usable ones."""
observation = self._cropper.crop(observation)
obs_rows, obs_cols = observation.board.shape
image = np.zeros([obs_rows * UPSAMPLE_SIZE, obs_cols * UPSAMPLE_SIZE, 3],
dtype=np.float32)
for i in range(obs_rows):
for j in range(obs_cols):
this_char = chr(observation.board[i, j])
if this_char != ballet_core.FLOOR_CHAR:
image[
i * UPSAMPLE_SIZE:(i + 1) * UPSAMPLE_SIZE, j *
UPSAMPLE_SIZE:(j + 1) * UPSAMPLE_SIZE] = self._char_to_template[
this_char]
image /= 255.
language = np.array(self._current_game.the_plot["instruction_string"])
full_observation = (image, language)
return full_observation
def reset(self):
"""Start a new episode."""
# Build a new game and retrieve its first set of state/reward/discount.
self._current_game = self._game_factory()
# set up rendering, cropping, and state for current game
self._char_to_template = {
k: _generate_template(v) for k, v in self._current_game.the_plot[
"char_to_color_shape"]}
self._char_to_template.update(_CHAR_TO_TEMPLATE_BASE)
self._cropper.set_engine(self._current_game)
self._state = dm_env.StepType.FIRST
# let's go!
observation, _, _ = self._current_game.its_showtime()
observation = self._render_observation(observation)
return dm_env.TimeStep(
step_type=self._state,
reward=None,
discount=None,
observation=observation)
def step(self, action):
"""Apply action, step the world forward, and return observations."""
# If needed, reset and start new episode.
if self._state == dm_env.StepType.LAST:
self._clear_state()
if self._current_game is None:
return self.reset()
# Execute the action in pycolab.
observation, reward, discount = self._current_game.play(action)
self._game_over = self._is_game_over()
reward = reward if reward is not None else 0.
observation = self._render_observation(observation)
# Check the current status of the game.
if self._game_over:
self._state = dm_env.StepType.LAST
else:
self._state = dm_env.StepType.MID
return dm_env.TimeStep(
step_type=self._state,
reward=reward,
discount=discount,
observation=observation)
@property
def observation_spec(self):
image_shape = (SCROLL_CROP_SIZE * UPSAMPLE_SIZE,
SCROLL_CROP_SIZE * UPSAMPLE_SIZE,
3)
return (
# vision
dm_env.specs.Array(
shape=image_shape, dtype=np.float32, name="image"),
# language
dm_env.specs.Array(
shape=[], dtype=str, name="language"),
)
@property
def action_spec(self):
return dm_env.specs.BoundedArray(
shape=[], dtype="int32",
minimum=0, maximum=7,
name="grid_actions")
def _is_game_over(self):
"""Returns whether it is game over, either from the engine or timeout."""
return (self._current_game.game_over or
(self._current_game.the_plot.frame >= self._max_steps))
def _clear_state(self):
"""Clear all the internal information about the game."""
self._state = None
self._current_game = None
self._char_to_template = None
self._game_over = None
def simple_builder(level_name):
"""Simplifies building from fixed defs.
Args:
level_name: '{num_dancers}_delay{delay_length}', where each variable is an
integer. The levels used in the paper were:
['2_delay16', '4_delay16', '8_delay16',
'2_delay48', '4_delay48', '8_delay48']
Returns:
A BalletEnvironment with the requested settings.
"""
num_dancers, dance_delay = level_name.split("_")
num_dancers = int(num_dancers)
dance_delay = int(dance_delay[5:])
max_steps = 320 if dance_delay == 16 else 1024
level_args = dict(
num_dancers=num_dancers,
dance_delay=dance_delay,
max_steps=max_steps)
return BalletEnvironment(**level_args)
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
env = simple_builder("4_delay16")
for _ in range(3):
obs = env.reset().observation
for _ in range(300):
obs = env.step(0).observation
print(obs)
if __name__ == "__main__":
app.run(main)
| deepmind-research-master | hierarchical_transformer_memory/pycolab_ballet/ballet_environment.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pycolab_ballet.ballet_environment_wrapper."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from hierarchical_transformer_memory.pycolab_ballet import ballet_environment
from hierarchical_transformer_memory.pycolab_ballet import ballet_environment_core
class BalletEnvironmentTest(parameterized.TestCase):
def test_full_wrapper(self):
env = ballet_environment.BalletEnvironment(
num_dancers=1, dance_delay=16, max_steps=200,
rng=np.random.default_rng(seed=0))
result = env.reset()
self.assertIsNone(result.reward)
level_size = ballet_environment_core.ROOM_SIZE
upsample_size = ballet_environment.UPSAMPLE_SIZE
# wait for dance to complete
for i in range(30):
result = env.step(0).observation
self.assertEqual(result[0].shape,
(level_size[0] * upsample_size,
level_size[1] * upsample_size,
3))
self.assertEqual(str(result[1])[:5],
np.array("watch"))
for i in [1, 1, 1, 1]: # first gets eaten before agent can move
result = env.step(i)
self.assertEqual(result.observation[0].shape,
(level_size[0] * upsample_size,
level_size[1] * upsample_size,
3))
self.assertEqual(str(result.observation[1])[:11],
np.array("up_and_down"))
self.assertEqual(result.reward, 1.)
# check egocentric scrolling is working, by checking object is in center
np.testing.assert_array_almost_equal(
result.observation[0][45:54, 45:54],
ballet_environment._generate_template("orange plus") / 255.)
@parameterized.parameters(
"2_delay16",
"4_delay16",
"8_delay48",
)
def test_simple_builder(self, level_name):
dance_delay = int(level_name[-2:])
np.random.seed(0)
env = ballet_environment.simple_builder(level_name)
# check max steps are set to match paper settings
self.assertEqual(env._max_steps,
320 if dance_delay == 16 else 1024)
# test running a few steps of each
env.reset()
level_size = ballet_environment_core.ROOM_SIZE
upsample_size = ballet_environment.UPSAMPLE_SIZE
for i in range(8):
result = env.step(i) # check all 8 movements work
self.assertEqual(result.observation[0].shape,
(level_size[0] * upsample_size,
level_size[1] * upsample_size,
3))
self.assertEqual(str(result.observation[1])[:5],
np.array("watch"))
self.assertEqual(result.reward, 0.)
if __name__ == "__main__":
absltest.main()
| deepmind-research-master | hierarchical_transformer_memory/pycolab_ballet/ballet_environment_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The pycolab core of the environment for going to the ballet.
This builds the text-based (non-graphical) engine of the environment, and offers
a UI which a human can play (for a fixed level). However, the logic of level
creation, the graphics, and anything that is external to the pycolab engine
itself is contained in ballet_environment.py.
"""
import curses
import enum
from absl import app
from absl import flags
from pycolab import ascii_art
from pycolab import human_ui
from pycolab.prefab_parts import sprites as prefab_sprites
FLAGS = flags.FLAGS
ROOM_SIZE = (11, 11) # one square around edge will be wall.
DANCER_POSITIONS = [(2, 2), (2, 5), (2, 8),
(5, 2), (5, 8), # space in center for agent
(8, 2), (8, 5), (8, 8)]
AGENT_START = (5, 5)
AGENT_CHAR = "A"
WALL_CHAR = "#"
FLOOR_CHAR = " "
RESERVED_CHARS = [AGENT_CHAR, WALL_CHAR, FLOOR_CHAR]
POSSIBLE_DANCER_CHARS = [
chr(i) for i in range(65, 91) if chr(i) not in RESERVED_CHARS
]
DANCE_SEQUENCE_LENGTHS = 16
# movement directions for dancers / actions for agent
class DIRECTIONS(enum.IntEnum):
N = 0
NE = 1
E = 2
SE = 3
S = 4
SW = 5
W = 6
NW = 7
DANCE_SEQUENCES = {
"circle_cw": [
DIRECTIONS.N, DIRECTIONS.E, DIRECTIONS.S, DIRECTIONS.S, DIRECTIONS.W,
DIRECTIONS.W, DIRECTIONS.N, DIRECTIONS.N, DIRECTIONS.E, DIRECTIONS.E,
DIRECTIONS.S, DIRECTIONS.S, DIRECTIONS.W, DIRECTIONS.W, DIRECTIONS.N,
DIRECTIONS.E
],
"circle_ccw": [
DIRECTIONS.N, DIRECTIONS.W, DIRECTIONS.S, DIRECTIONS.S, DIRECTIONS.E,
DIRECTIONS.E, DIRECTIONS.N, DIRECTIONS.N, DIRECTIONS.W, DIRECTIONS.W,
DIRECTIONS.S, DIRECTIONS.S, DIRECTIONS.E, DIRECTIONS.E, DIRECTIONS.N,
DIRECTIONS.W
],
"up_and_down": [
DIRECTIONS.N, DIRECTIONS.S, DIRECTIONS.S, DIRECTIONS.N, DIRECTIONS.N,
DIRECTIONS.S, DIRECTIONS.S, DIRECTIONS.N, DIRECTIONS.N, DIRECTIONS.S,
DIRECTIONS.S, DIRECTIONS.N, DIRECTIONS.N, DIRECTIONS.S, DIRECTIONS.S,
DIRECTIONS.N
],
"left_and_right": [
DIRECTIONS.E, DIRECTIONS.W, DIRECTIONS.W, DIRECTIONS.E, DIRECTIONS.E,
DIRECTIONS.W, DIRECTIONS.W, DIRECTIONS.E, DIRECTIONS.E, DIRECTIONS.W,
DIRECTIONS.W, DIRECTIONS.E, DIRECTIONS.E, DIRECTIONS.W, DIRECTIONS.W,
DIRECTIONS.E
],
"diagonal_uldr": [
DIRECTIONS.NW, DIRECTIONS.SE, DIRECTIONS.SE, DIRECTIONS.NW,
DIRECTIONS.NW, DIRECTIONS.SE, DIRECTIONS.SE, DIRECTIONS.NW,
DIRECTIONS.NW, DIRECTIONS.SE, DIRECTIONS.SE, DIRECTIONS.NW,
DIRECTIONS.NW, DIRECTIONS.SE, DIRECTIONS.SE, DIRECTIONS.NW
],
"diagonal_urdl": [
DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.SW, DIRECTIONS.NE,
DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.SW, DIRECTIONS.NE,
DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.SW, DIRECTIONS.NE,
DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.SW, DIRECTIONS.NE
],
"plus_cw": [
DIRECTIONS.N, DIRECTIONS.S, DIRECTIONS.E, DIRECTIONS.W, DIRECTIONS.S,
DIRECTIONS.N, DIRECTIONS.W, DIRECTIONS.E, DIRECTIONS.N, DIRECTIONS.S,
DIRECTIONS.E, DIRECTIONS.W, DIRECTIONS.S, DIRECTIONS.N, DIRECTIONS.W,
DIRECTIONS.E
],
"plus_ccw": [
DIRECTIONS.N, DIRECTIONS.S, DIRECTIONS.W, DIRECTIONS.E, DIRECTIONS.S,
DIRECTIONS.N, DIRECTIONS.E, DIRECTIONS.W, DIRECTIONS.N, DIRECTIONS.S,
DIRECTIONS.W, DIRECTIONS.E, DIRECTIONS.S, DIRECTIONS.N, DIRECTIONS.E,
DIRECTIONS.W
],
"times_cw": [
DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.SE, DIRECTIONS.NW,
DIRECTIONS.SW, DIRECTIONS.NE, DIRECTIONS.NW, DIRECTIONS.SE,
DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.SE, DIRECTIONS.NW,
DIRECTIONS.SW, DIRECTIONS.NE, DIRECTIONS.NW, DIRECTIONS.SE
],
"times_ccw": [
DIRECTIONS.NW, DIRECTIONS.SE, DIRECTIONS.SW, DIRECTIONS.NE,
DIRECTIONS.SE, DIRECTIONS.NW, DIRECTIONS.NE, DIRECTIONS.SW,
DIRECTIONS.NW, DIRECTIONS.SE, DIRECTIONS.SW, DIRECTIONS.NE,
DIRECTIONS.SE, DIRECTIONS.NW, DIRECTIONS.NE, DIRECTIONS.SW
],
"zee": [
DIRECTIONS.NE, DIRECTIONS.W, DIRECTIONS.W, DIRECTIONS.E, DIRECTIONS.E,
DIRECTIONS.SW, DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.SW,
DIRECTIONS.E, DIRECTIONS.E, DIRECTIONS.W, DIRECTIONS.W, DIRECTIONS.NE,
DIRECTIONS.SW, DIRECTIONS.NE
],
"chevron_down": [
DIRECTIONS.NW, DIRECTIONS.S, DIRECTIONS.SE, DIRECTIONS.NE, DIRECTIONS.N,
DIRECTIONS.SW, DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.NE,
DIRECTIONS.S, DIRECTIONS.SW, DIRECTIONS.NW, DIRECTIONS.N, DIRECTIONS.SE,
DIRECTIONS.NW, DIRECTIONS.SE
],
"chevron_up": [
DIRECTIONS.SE, DIRECTIONS.N, DIRECTIONS.NW, DIRECTIONS.SW, DIRECTIONS.S,
DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.NE, DIRECTIONS.SW,
DIRECTIONS.N, DIRECTIONS.NE, DIRECTIONS.SE, DIRECTIONS.S, DIRECTIONS.NW,
DIRECTIONS.SE, DIRECTIONS.NW
],
}
class DancerSprite(prefab_sprites.MazeWalker):
"""A `Sprite` for dancers."""
def __init__(self, corner, position, character, motion, color, shape,
value=0.):
super(DancerSprite, self).__init__(
corner, position, character, impassable="#")
self.motion = motion
self.dance_sequence = DANCE_SEQUENCES[motion].copy()
self.color = color
self.shape = shape
self.value = value
self.is_dancing = False
def update(self, actions, board, layers, backdrop, things, the_plot):
if the_plot["task_phase"] == "dance" and self.is_dancing:
if not self.dance_sequence:
raise ValueError(
"Dance sequence is empty! Was this dancer repeated in the order?")
dance_move = self.dance_sequence.pop(0)
if dance_move == DIRECTIONS.N:
self._north(board, the_plot)
elif dance_move == DIRECTIONS.NE:
self._northeast(board, the_plot)
elif dance_move == DIRECTIONS.E:
self._east(board, the_plot)
elif dance_move == DIRECTIONS.SE:
self._southeast(board, the_plot)
elif dance_move == DIRECTIONS.S:
self._south(board, the_plot)
elif dance_move == DIRECTIONS.SW:
self._southwest(board, the_plot)
elif dance_move == DIRECTIONS.W:
self._west(board, the_plot)
elif dance_move == DIRECTIONS.NW:
self._northwest(board, the_plot)
if not self.dance_sequence: # done!
self.is_dancing = False
the_plot["time_until_next_dance"] = the_plot["dance_delay"]
else:
if self.position == things[AGENT_CHAR].position:
# Award the player the appropriate amount of reward, and end episode.
the_plot.add_reward(self.value)
the_plot.terminate_episode()
class PlayerSprite(prefab_sprites.MazeWalker):
"""The player / agent character.
MazeWalker class methods handle basic movement and collision detection.
"""
def __init__(self, corner, position, character):
super(PlayerSprite, self).__init__(
corner, position, character, impassable="#")
def update(self, actions, board, layers, backdrop, things, the_plot):
if the_plot["task_phase"] == "dance":
# agent's actions are ignored, this logic updates the dance phases.
if the_plot["time_until_next_dance"] > 0:
the_plot["time_until_next_dance"] -= 1
if the_plot["time_until_next_dance"] == 0: # next phase time!
if the_plot["dance_order"]: # start the next dance!
next_dancer = the_plot["dance_order"].pop(0)
things[next_dancer].is_dancing = True
else: # choice time!
the_plot["task_phase"] = "choice"
the_plot["instruction_string"] = the_plot[
"choice_instruction_string"]
elif the_plot["task_phase"] == "choice":
# agent can now move and make its choice
if actions == DIRECTIONS.N:
self._north(board, the_plot)
elif actions == DIRECTIONS.NE:
self._northeast(board, the_plot)
elif actions == DIRECTIONS.E:
self._east(board, the_plot)
elif actions == DIRECTIONS.SE:
self._southeast(board, the_plot)
elif actions == DIRECTIONS.S:
self._south(board, the_plot)
elif actions == DIRECTIONS.SW:
self._southwest(board, the_plot)
elif actions == DIRECTIONS.W:
self._west(board, the_plot)
elif actions == DIRECTIONS.NW:
self._northwest(board, the_plot)
def make_game(dancers_and_properties, dance_delay=16):
"""Constructs an ascii map, then uses pycolab to make it a game.
Args:
dancers_and_properties: list of (character, (row, column), motion, shape,
color, value), for placing objects in the world.
dance_delay: how long to wait between dances.
Returns:
this_game: Pycolab engine running the specified game.
"""
num_rows, num_cols = ROOM_SIZE
level_layout = []
# upper wall
level_layout.append("".join([WALL_CHAR] * num_cols))
# room
middle_string = "".join([WALL_CHAR] + [" "] * (num_cols - 2) + [WALL_CHAR])
level_layout.extend([middle_string for _ in range(num_rows - 2)])
# lower wall
level_layout.append("".join([WALL_CHAR] * num_cols))
def _add_to_map(obj, loc):
"""Adds an ascii character to the level at the requested position."""
obj_row = level_layout[loc[0]]
pre_string = obj_row[:loc[1]]
post_string = obj_row[loc[1] + 1:]
level_layout[loc[0]] = pre_string + obj + post_string
_add_to_map(AGENT_CHAR, AGENT_START)
sprites = {AGENT_CHAR: PlayerSprite}
dance_order = []
char_to_color_shape = []
# add dancers to level
for obj, loc, motion, shape, color, value in dancers_and_properties:
_add_to_map(obj, loc)
sprites[obj] = ascii_art.Partial(
DancerSprite, motion=motion, color=color, shape=shape, value=value)
char_to_color_shape.append((obj, color + " " + shape))
dance_order += obj
if value > 0.:
choice_instruction_string = motion
this_game = ascii_art.ascii_art_to_game(
art=level_layout,
what_lies_beneath=" ",
sprites=sprites,
update_schedule=[[AGENT_CHAR],
dance_order])
this_game.the_plot["task_phase"] = "dance"
this_game.the_plot["instruction_string"] = "watch"
this_game.the_plot["choice_instruction_string"] = choice_instruction_string
this_game.the_plot["dance_order"] = dance_order
this_game.the_plot["dance_delay"] = dance_delay
this_game.the_plot["time_until_next_dance"] = 1
this_game.the_plot["char_to_color_shape"] = char_to_color_shape
return this_game
def main(argv):
del argv # unused
these_dancers_and_properties = [
(POSSIBLE_DANCER_CHARS[1], (2, 2), "chevron_up", "triangle", "red", 1),
(POSSIBLE_DANCER_CHARS[2], (2, 5), "circle_ccw", "triangle", "red", 0),
(POSSIBLE_DANCER_CHARS[3], (2, 8), "plus_cw", "triangle", "red", 0),
(POSSIBLE_DANCER_CHARS[4], (5, 2), "plus_ccw", "triangle", "red", 0),
(POSSIBLE_DANCER_CHARS[5], (5, 8), "times_cw", "triangle", "red", 0),
(POSSIBLE_DANCER_CHARS[6], (8, 2), "up_and_down", "plus", "blue", 0),
(POSSIBLE_DANCER_CHARS[7], (8, 5), "left_and_right", "plus", "blue", 0),
(POSSIBLE_DANCER_CHARS[8], (8, 8), "zee", "plus", "blue", 0),
]
game = make_game(dancers_and_properties=these_dancers_and_properties)
# Note that these colors are only for human UI
fg_colours = {
AGENT_CHAR: (999, 999, 999), # Agent is white
WALL_CHAR: (300, 300, 300), # Wall, dark grey
FLOOR_CHAR: (0, 0, 0), # Floor
}
for (c, _, _, _, col, _) in these_dancers_and_properties:
fg_colours[c] = (999, 0, 0) if col == "red" else (0, 0, 999)
bg_colours = {
c: (0, 0, 0) for c in RESERVED_CHARS + POSSIBLE_DANCER_CHARS[1:8]
}
ui = human_ui.CursesUi(
keys_to_actions={
# Basic movement.
curses.KEY_UP: DIRECTIONS.N,
curses.KEY_DOWN: DIRECTIONS.S,
curses.KEY_LEFT: DIRECTIONS.W,
curses.KEY_RIGHT: DIRECTIONS.E,
-1: 8, # Do nothing.
},
delay=500,
colour_fg=fg_colours,
colour_bg=bg_colours)
ui.play(game)
if __name__ == "__main__":
app.run(main)
| deepmind-research-master | hierarchical_transformer_memory/pycolab_ballet/ballet_environment_core.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Visualize physical planning games in Domain Explorer.
"""
import functools
from absl import app
from absl import flags
from dm_control import composer
from dm_control import viewer
from dm_control.locomotion import walkers
from physics_planning_games import board_games
from physics_planning_games.mujoban.boxoban import boxoban_level_generator
from physics_planning_games.mujoban.mujoban import Mujoban
from physics_planning_games.mujoban.mujoban_level import MujobanLevel
flags.DEFINE_enum('environment_name', 'mujoban', [
'mujoban', 'go_7x7', 'tic_tac_toe_markers_features',
'tic_tac_toe_mixture_opponent_markers_features',
'tic_tac_toe_optimal_opponent_markers_features'],
'Name of an environment to load.')
FLAGS = flags.FLAGS
TIME_LIMIT = 1000
CONTROL_TIMESTEP = .1
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
environment_name = FLAGS.environment_name
if environment_name == 'mujoban':
walker = walkers.JumpingBallWithHead(add_ears=True, camera_height=0.25)
arena = MujobanLevel(boxoban_level_generator)
task = Mujoban(
walker=walker,
maze=arena,
control_timestep=CONTROL_TIMESTEP,
top_camera_height=64,
top_camera_width=48)
env = composer.Environment(
time_limit=TIME_LIMIT, task=task, strip_singleton_obs_buffer_dim=True)
else:
env = functools.partial(
board_games.load, environment_name=environment_name)
viewer.launch(env)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | physics_planning_games/explore.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Mujoban."""
from absl.testing import absltest
from dm_control import composer
from dm_control.locomotion import walkers
import dm_env as environment
import numpy as np
from physics_planning_games.mujoban.mujoban import Mujoban
from physics_planning_games.mujoban.mujoban_level import MujobanLevel
TIME_LIMIT = 5
CONTROL_TIMESTEP = .1
class MujobanTest(absltest.TestCase):
def test(self):
walker = walkers.JumpingBallWithHead(add_ears=True, camera_height=0.25)
arena = MujobanLevel()
task = Mujoban(
walker=walker,
maze=arena,
control_timestep=CONTROL_TIMESTEP,
top_camera_height=64,
top_camera_width=48)
env = composer.Environment(
time_limit=TIME_LIMIT,
task=task,
strip_singleton_obs_buffer_dim=True)
time_step = env.reset()
self.assertEqual(
set([
'pixel_layer', 'full_entity_layer', 'top_camera',
'walker/body_height', 'walker/end_effectors_pos',
'walker/joints_pos', 'walker/joints_vel',
'walker/sensors_accelerometer', 'walker/sensors_gyro',
'walker/sensors_touch', 'walker/sensors_velocimeter',
'walker/world_zaxis', 'walker/orientation',
]), set(time_step.observation.keys()))
top_camera = time_step.observation['top_camera']
self.assertEqual(np.uint8, top_camera.dtype)
self.assertEqual((64, 48, 3), top_camera.shape)
all_step_types = []
# Run enough actions that we are guaranteed to have restarted the
# episode at least once.
for _ in range(int(2*TIME_LIMIT/CONTROL_TIMESTEP)):
action = 2*np.random.random(env.action_spec().shape) - 1
time_step = env.step(action)
all_step_types.append(time_step.step_type)
self.assertEqual(set([environment.StepType.FIRST,
environment.StepType.MID,
environment.StepType.LAST]),
set(all_step_types))
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | physics_planning_games/mujoban/mujoban_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A floor pad that is activated through touch."""
import weakref
from dm_control import composer
from dm_control import mjcf
import numpy as np
def _get_activator_box(pad_xpos, pad_size, boxes, tolerance=0.0):
"""Returns the activator box, if any. Otherwise returns None."""
# Ignore the height
pad_min = pad_xpos[0:2] - pad_size[0:2]
pad_max = pad_xpos[0:2] + pad_size[0:2]
for box in boxes:
box_xpos = np.array(box.xpos[0:2])
box_size = np.array(box.size[0:2])
min_ = pad_min + box_size - tolerance
max_ = pad_max - box_size + tolerance
in_range = np.logical_and(box_xpos >= min_, box_xpos <= max_).all()
if in_range:
return box
# No activator box was found
return None
class MujobanPad(composer.Entity):
"""A less sensitive floor pad for Mujoban."""
def _build(self, rgba=None, pressed_rgba=None,
size=1, height=0.02, detection_tolerance=0.0, name='mujoban_pad'):
rgba = tuple(rgba or (1, 0, 0, 1))
pressed_rgba = tuple(pressed_rgba or (0.2, 0, 0, 1))
self._mjcf_root = mjcf.RootElement(model=name)
self._site = self._mjcf_root.worldbody.add(
'site', type='box', name='site',
pos=[0, 0, (height / 2 or -0.001)],
size=[size / 2, size / 2, (height / 2 or 0.001)], rgba=rgba)
self._activated = False
self._rgba = np.array(rgba, dtype=np.float)
self._pressed_rgba = np.array(pressed_rgba, dtype=np.float)
self._activator = None
self._detection_tolerance = detection_tolerance
self._boxes = []
@property
def rgba(self):
return self._rgba
@property
def pressed_rgba(self):
return self._pressed_rgba
def register_box(self, box_entity):
self._boxes.append(weakref.proxy(box_entity))
@property
def site(self):
return self._site
@property
def boxes(self):
return self._boxes
@property
def activator(self):
return self._activator if self._activated else None
@property
def mjcf_model(self):
return self._mjcf_root
def initialize_episode_mjcf(self, unused_random_state):
self._activated = False
def initialize_episode(self, physics, unused_random_state):
self._update_activation(physics)
def _update_activation(self, physics):
# Note: we get the physically bound box, not an object from self._boxes.
# That's because the generator expression below generates bound objects.
box = _get_activator_box(
pad_xpos=np.array(physics.bind(self._site).xpos),
pad_size=np.array(physics.bind(self._site).size),
boxes=(physics.bind(box.geom) for box in self._boxes),
tolerance=self._detection_tolerance,)
if box:
self._activated = True
self._activator = box
else:
self._activated = False
self._activator = None
if self._activated:
physics.bind(self._site).rgba = self._pressed_rgba
else:
physics.bind(self._site).rgba = self._rgba
def before_step(self, physics, unused_random_state):
self._update_activation(physics)
def after_substep(self, physics, unused_random_state):
self._update_activation(physics)
@property
def activated(self):
"""Whether this floor pad is pressed at the moment."""
return self._activated
def reset(self, physics):
self._activated = False
physics.bind(self._site).rgba = self._rgba
| deepmind-research-master | physics_planning_games/mujoban/mujoban_pad.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module for generating Mujoban level.
"""
import labmaze
BOX_CHAR = 'B'
TARGET_CHAR = labmaze.defaults.OBJECT_TOKEN
_DEFAULT_LEVEL = """
#####
# #
#### # #
# . .# #
# . #
# .## $##
## #$$ #
## $@#
## ###
####"""
# The meaning of symbols here are the same as defined in
# http://sneezingtiger.com/sokoban/levels/sasquatch5Text.html. These are the
# same symbols as used by the Sokoban community.
EMPTY_CELL = ' '
GOAL = '.'
PLAYER = '@'
PLAYER_ON_GOAL = '+'
BOX = '$'
BOX_ON_GOAL = '*'
WALL = '#'
_SOKOBAN_SYMBOLS = [
EMPTY_CELL, GOAL, PLAYER, PLAYER_ON_GOAL, BOX, BOX_ON_GOAL, WALL
]
def single_level_generator(level=_DEFAULT_LEVEL):
while True:
yield level
def _ascii_to_text_grid_level(ascii_level):
"""Goes from official Sokoban ASCII art to string understood by Mujoban.
Args:
ascii_level: a multiline string; each character is a location in a
gridworld.
Returns:
A string.
"""
level = ascii_level
if level.startswith('\n'):
level = level[1:]
level = level.replace('$', BOX_CHAR)
level = level.replace('.', TARGET_CHAR)
level = level.replace(' ', '.')
level = level.replace('#', '*')
level = level.replace('@', 'P')
if level[-1] == '\n':
level = level[:-1]
# Pad
all_rows = level.split('\n')
width = max(len(row) for row in all_rows)
padded_rows = []
for row in all_rows:
row += '*' * (width - len(row))
padded_rows.append(row)
level = '\n'.join(padded_rows)
return level + '\n'
class MujobanLevel(labmaze.BaseMaze):
"""A maze that represents a level in Mujoban."""
def __init__(self, ascii_level_generator=single_level_generator):
"""Constructor.
Args:
ascii_level_generator: a Python generator. At each iteration, this should
return a string representing a level. The symbols in the string should be
those of http://sneezingtiger.com/sokoban/levels/sasquatch5Text.html.
These are the same symbols as used by the Sokoban community.
"""
self._level_iterator = ascii_level_generator()
self.regenerate()
def regenerate(self):
"""Regenerates the maze if required."""
level = next(self._level_iterator)
self._entity_layer = labmaze.TextGrid(_ascii_to_text_grid_level(level))
self._variation_layer = self._entity_layer.copy()
self._variation_layer[:] = '.'
self._num_boxes = (self._entity_layer == BOX_CHAR).sum()
num_targets = (self._entity_layer == TARGET_CHAR).sum()
if num_targets != self._num_boxes:
raise ValueError('Number of targets {} should equal number of boxes {}.'
.format(num_targets, self._num_boxes))
@property
def num_boxes(self):
return self._num_boxes
@property
def num_targets(self):
return self._num_boxes
@property
def entity_layer(self):
return self._entity_layer
@property
def variations_layer(self):
return self._variation_layer
@property
def height(self):
return self._entity_layer.shape[0]
@property
def width(self):
return self._entity_layer.shape[1]
| deepmind-research-master | physics_planning_games/mujoban/mujoban_level.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Top-level module definitions for mujoban."""
from physics_planning_games.mujoban.mujoban import Mujoban
from physics_planning_games.mujoban.mujoban_level import MujobanLevel
| deepmind-research-master | physics_planning_games/mujoban/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for mujoban_level."""
from absl.testing import absltest
from physics_planning_games.mujoban import mujoban_level
_LEVEL = """
#####
# @####
# $. #
###$.# #
# $.# #
# #$. #
# ###
######"""
_GRID_LEVEL = """********
*..P****
*..BG..*
***BG*.*
*..BG*.*
*.*BG..*
*....***
********
"""
class MujobanLevelTest(absltest.TestCase):
def test_ascii_to_text_grid_level(self):
grid_level = mujoban_level._ascii_to_text_grid_level(_LEVEL)
self.assertEqual(_GRID_LEVEL, grid_level)
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | physics_planning_games/mujoban/mujoban_level_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Box props used in Mujoban that the agent pushes.
"""
import itertools
from dm_control import composer
from dm_control.entities import props
class Box(props.Primitive):
"""A class representing a box prop."""
def _build(self, half_lengths=None, mass=None, name='box'):
half_lengths = half_lengths or [0.05, 0.1, 0.15]
super(Box, self)._build(geom_type='box',
size=half_lengths,
mass=mass,
name=name)
class BoxWithSites(Box):
"""A class representing a box prop with sites on the corners."""
def _build(self, half_lengths=None, mass=None, name='box'):
half_lengths = half_lengths or [0.05, 0.1, 0.15]
super(BoxWithSites, self)._build(half_lengths=half_lengths, mass=mass,
name=name)
corner_positions = itertools.product([half_lengths[0], -half_lengths[0]],
[half_lengths[1], -half_lengths[1]],
[half_lengths[2], -half_lengths[2]])
corner_sites = []
for i, corner_pos in enumerate(corner_positions):
corner_sites.append(
self.mjcf_model.worldbody.add(
'site',
type='sphere',
name='corner_{}'.format(i),
size=[0.1],
pos=corner_pos,
rgba=[1, 0, 0, 1.0],
group=composer.SENSOR_SITES_GROUP))
self._corner_sites = tuple(corner_sites)
@property
def corner_sites(self):
return self._corner_sites
| deepmind-research-master | physics_planning_games/mujoban/props.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MuJoban task.
Mujoban is a single player puzzle-solving game embedded in the MuJoCo
simulation environment. The puzzle is based on the 2D game of Sokoban,
where an agent situated on a grid has to push boxes onto target locations.
"""
import collections
from dm_control import composer
from dm_control.composer.observation import observable as observable_lib
from dm_control.locomotion.arenas import labmaze_textures
from dm_control.locomotion.arenas.mazes import MazeWithTargets
from dm_env import specs
import numpy as np
from six.moves import range
from six.moves import zip
from physics_planning_games.mujoban import mujoban_level
from physics_planning_games.mujoban.mujoban_pad import MujobanPad
from physics_planning_games.mujoban.props import BoxWithSites
_FLOOR_GAP_CHAR = '#'
_AMBIENT_HEADLIGHT = 0.8
_BOX_SIZE = 0.4
_BOX_HEIGHT = 0.15
_BOX_MASS = 2.5
_BOX_FRICTION = [0.5, 0.005, 0.0001]
_BOX_RGBA = [173. / 255., 179. / 255., 60. / 255., 1.]
_BOX_PRESSED_RGBA = [0, 0, 1, 1]
_TARGET_RGBA = [1.0, 0., 0., 1.]
_PRESSED_TARGET_RGBA = [0., 1., 0., 1.]
_PEG_SIZE = 0.05
_PEG_HEIGHT = 0.25
_PEG_RGBA = [0.5, 0.5, 0.5, 1]
_PEG_ANGLE = np.pi / 4
# Aliveness in [-1., 0.].
_ALIVE_THRESHOLD = -0.5
# Constants used by the full entity layer
_WALL_LAYER = 0
_TARGET_LAYER = 1
_SOKOBAN_LAYER = 2
_BOX_LAYER = 3
def _round_positions(boxes, walker, last_round_walker):
"""Round float positions to snap objects to grid."""
round_walker = np.round(walker).astype('int32')
round_boxes = [np.round(box).astype('int32') for box in boxes]
for box in round_boxes:
if np.array_equal(box, round_walker):
round_walker = last_round_walker
return round_boxes, round_walker
class Mujoban(composer.Task):
"""Requires objects to be moved onto matching-colored floor pads.
Agent only receives instantaneous rewards of +1 for the
timestep in which a box first enters a target, and -1 for the
timestep in which a box leaves the target. There is an additional reward of
+10 when all the boxes are put on targets, at which point the episode
terminates.
"""
def __init__(self,
walker,
maze,
target_height=0,
box_prop=None,
box_size=None,
box_mass=None,
with_grid_pegs=False,
detection_tolerance=0.0,
physics_timestep=0.001,
control_timestep=0.025,
top_camera_height=128,
top_camera_width=128,
box_on_target_reward=1.0,
level_solved_reward=10.0):
"""Initializes this task.
Args:
walker: A `Walker` object.
maze: A `BaseMaze` object.
target_height: The height of the target pads above the ground, in meters.
box_prop: An optional `Primitive` prop to use as the box.
box_size: An optional three element sequence defining the half lengths of
the sides of the box.
box_mass: Box mass. If this is a list or tuple, a random value is sampled
from the truncated exponential distribution in [a, b) where a =
box_mass[0] and b = box_mass[1], with scale factor box_mass[2] * (b -
a).
with_grid_pegs: Whether to add solid pegs at the corners of the maze
grid cells. This helps to enforce the usual Sokoban rules where
diagonal movements are forbidden.
detection_tolerance: A maximum length scale (in metres) within which a
box is allowed to stick outside a target pad while still activating it.
For example, if this is set to 0.1 then a box will activate a pad if it
sticks out of the pad by no more than 10 centimetres.
physics_timestep: The time step of the physics simulation.
control_timestep: Should be an integer multiple of the physics time step.
top_camera_height: An int; the height of the top camera in the
observation. Setting this to 0 will disable the top camera.
top_camera_width: An int; the width of the top camera in the observation.
Setting this to 0 will disable the top camera.
box_on_target_reward: A float; reward for putting a box on a target.
level_solved_reward: A float: reward for solving the level.
"""
skybox_texture = labmaze_textures.SkyBox(style='sky_03')
wall_textures = labmaze_textures.WallTextures(style='style_01')
floor_textures = labmaze_textures.FloorTextures(style='style_01')
self._detection_tolerance = detection_tolerance
self._box_prop = box_prop
self._box_on_target_reward = box_on_target_reward
self._level_solved_reward = level_solved_reward
self._maze = maze
self._arena = MazeWithTargets(
maze=maze,
xy_scale=1,
z_height=1,
skybox_texture=skybox_texture,
wall_textures=wall_textures,
floor_textures=floor_textures)
self._walker = walker
self._arena.mjcf_model.visual.headlight.ambient = [_AMBIENT_HEADLIGHT] * 3
self._arena.text_maze_regenerated_hook = self._regenerate_positions
self._first_step = True
# Targets.
self._targets = []
self._target_positions = []
# Boxes.
self._box_size = box_size or [_BOX_SIZE] * 2 + [_BOX_HEIGHT]
self._box_mass = box_mass or _BOX_MASS
self._boxes = []
self._box_positions = []
self._with_grid_pegs = with_grid_pegs
self._peg_body = None
self._last_walker_position = None
# Create walkers and corresponding observables.
self._walker.create_root_joints(self._arena.attach(self._walker))
enabled_observables = [self._walker.observables.sensors_touch,
self._walker.observables.orientation]
enabled_observables += self._walker.observables.proprioception
enabled_observables += self._walker.observables.kinematic_sensors
for observable in enabled_observables:
observable.enabled = True
if top_camera_width and top_camera_height:
self._arena.observables.top_camera.enabled = True
self._arena.observables.top_camera.width = top_camera_width
self._arena.observables.top_camera.height = top_camera_height
# symbolic entity repenstaion in labyrinth format.
self._entity_layer = self._maze.entity_layer
# pixel layer is same as pixel rendering of symbolic sokoban.
self._pixel_layer = np.zeros(self._entity_layer.shape + (3,), dtype='uint8')
self._full_entity_layer = np.zeros(self._entity_layer.shape + (4,),
dtype='bool')
pixel_layer_obs = observable_lib.Generic(lambda _: self._pixel_layer)
pixel_layer_obs.enabled = True
full_entity_layer_obs = observable_lib.Generic(
lambda _: self._full_entity_layer)
full_entity_layer_obs.enabled = True
self._task_observables = collections.OrderedDict({
'pixel_layer': pixel_layer_obs,
'full_entity_layer': full_entity_layer_obs,
})
# Set time steps.
self.set_timesteps(
physics_timestep=physics_timestep, control_timestep=control_timestep)
self._discount = 1.
@property
def name(self):
return 'Mujoban'
@property
def root_entity(self):
return self._arena
def _regenerate_positions(self):
self._object_positions = self._arena.find_token_grid_positions(
[mujoban_level.TARGET_CHAR, mujoban_level.BOX_CHAR])
self._box_positions = self._arena.grid_to_world_positions(
self._object_positions[mujoban_level.BOX_CHAR])
target_grid_positions = self._object_positions[mujoban_level.TARGET_CHAR]
self._target_positions = self._arena.grid_to_world_positions(
target_grid_positions)
for idx in range(len(self._target_positions)):
target_grid_position = target_grid_positions[idx]
grid_y, grid_x = target_grid_position
self._arena.maze.variations_layer[grid_y, grid_x] = _FLOOR_GAP_CHAR
def initialize_episode_mjcf(self, random_state):
self._arena.regenerate()
# Clear existing targets and boxes
for target in self._targets:
target.detach()
self._targets = []
for box in self._boxes:
box.detach()
self._boxes = []
self._arena.mjcf_model.contact.remove('pair')
for _ in range(self._maze.num_targets):
target = MujobanPad(
size=self._arena.xy_scale,
height=0,
detection_tolerance=self._detection_tolerance)
self._arena.attach(target)
self._targets.append(target)
for _ in range(self._maze.num_boxes):
box = self._box_prop
if not box:
box = BoxWithSites(half_lengths=self._box_size)
box.geom.mass = _BOX_MASS
box.geom.rgba = [0, 0, 0, 1] # Will be randomized for each episode.
frame = self._arena.attach(box)
frame.add('joint', type='slide', axis=[1, 0, 0], name='x_slider')
frame.add('joint', type='slide', axis=[0, 1, 0], name='y_slider')
frame.add('joint', type='slide', axis=[0, 0, 1], name='z_slider')
self._boxes.append(box)
for target in self._targets:
target.register_box(box)
# Reduce the friction between box and ground.
ground_geom = self._arena.mjcf_model.find('geom', 'ground')
self._arena.mjcf_model.contact.add(
'pair',
geom1=box.geom,
geom2=ground_geom,
condim=6,
friction=[
_BOX_FRICTION[0], _BOX_FRICTION[0], _BOX_FRICTION[1],
_BOX_FRICTION[2], _BOX_FRICTION[2]
])
# Set box masses.
for box in self._boxes:
box.geom.mass = _BOX_MASS
box.geom.rgba[:] = _BOX_RGBA
for target in self._targets:
target.rgba[:] = _TARGET_RGBA
target.pressed_rgba[:] = _PRESSED_TARGET_RGBA
if self._with_grid_pegs:
if self._peg_body is not None:
self._peg_body.remove()
self._peg_body = self._arena.mjcf_model.worldbody.add('body')
for y in range(self._arena.maze.height - 1):
for x in range(self._arena.maze.width - 1):
peg_x, peg_y, _ = self._arena.grid_to_world_positions(
[[x + 0.5, y + 0.5]])[0]
self._peg_body.add(
'geom', type='box',
size=[_PEG_SIZE / np.sqrt(2),
_PEG_SIZE / np.sqrt(2),
_PEG_HEIGHT / 2],
pos=[peg_x, peg_y, _PEG_HEIGHT / 2],
quat=[np.cos(_PEG_ANGLE / 2), 0, 0, np.sin(_PEG_ANGLE / 2)],
rgba=_PEG_RGBA)
def initialize_episode(self, physics, random_state):
self._first_step = True
self._was_activated = [False] * len(self._targets)
self._is_solved = False
self._discount = 1.
self._walker.reinitialize_pose(physics, random_state)
spawn_position = self._arena.spawn_positions[0]
spawn_rotation = random_state.uniform(-np.pi, np.pi)
spawn_quat = np.array(
[np.cos(spawn_rotation / 2), 0, 0,
np.sin(spawn_rotation / 2)])
self._walker.shift_pose(
physics, [spawn_position[0], spawn_position[1], 0.0], spawn_quat)
for box, box_xy_position in zip(self._boxes, self._box_positions):
# Position at the middle of a maze cell.
box_position = np.array(
[box_xy_position[0], box_xy_position[1], self._box_size[2]])
# Commit the box's final pose.
box.set_pose(physics, position=box_position, quaternion=[1., 0., 0., 0.])
for target, target_position in zip(self._targets, self._target_positions):
target.set_pose(physics, position=target_position)
target.reset(physics)
self._update_entity_pixel_layers(physics)
def before_step(self, physics, actions, random_state):
if isinstance(actions, list):
actions = np.concatenate(actions)
super(Mujoban, self).before_step(physics, actions, random_state)
if self._first_step:
self._first_step = False
else:
self._was_activated = [target.activated for target in self._targets]
def _get_object_positions_in_grid(self, physics):
box_positions = self._arena.world_to_grid_positions(
[physics.bind(box.geom).xpos for box in self._boxes])
walker_position = self._arena.world_to_grid_positions(
[physics.bind(self._walker.root_body).xpos])[0]
return box_positions, walker_position
def _update_entity_pixel_layers(self, physics):
"""Updates the pixel observation and both layered representations.
Mujoban offers 3 grid representations of the world:
* the pixel layer: this is a grid representations with an RGB value at
each grid point;
* the entity layer: this is a grid representation with a character at
each grid point. This representation hides information since if Sokoban
or a box are over a target, then the target is occluded. This is the
official entity layer used by arenas which is based on dm_control labmaze;
* the full entity layer: this is a grid represention with a boolean vector
of length 4 at each grid point. The first value is `True` iff there is a
wall at this location. The second value is `True` iff there is a target at
this location. The third value is for Sokoban, and fourth value is for
boxes. Note that this is not a one-hot encoding since Sokoban or a box
can share the same location as a target.
Args:
physics: a Mujoco physics object.
Raises:
RuntimeError: if a box or walker are overlapping with a wall.
"""
# The entity layer from the maze is a string that shows the maze at the
# *beginning* of the level. This is fixed throughout an episode.
entity_layer = self._maze.entity_layer.copy()
box_positions, walker_position = self._get_object_positions_in_grid(physics)
# round positions to snap to grid.
box_positions, walker_position = _round_positions(
box_positions, walker_position, self._last_walker_position)
# setup pixel layer
map_size = entity_layer.shape
pixel_layer = np.ndarray(map_size + (3,), dtype='uint8')
pixel_layer.fill(128)
# setup full entity layer
full_entity_layer = np.zeros(map_size + (4,), dtype='bool')
# remove boxes and agent
entity_layer[entity_layer == mujoban_level.BOX_CHAR] = '.'
entity_layer[entity_layer == 'P'] = '.'
# draw empty space and goals
pixel_layer[entity_layer == '.'] = [0, 0, 0]
pixel_layer[entity_layer == 'G'] = [255, 0, 0]
full_entity_layer[:, :, _WALL_LAYER] = True
full_entity_layer[:, :, _WALL_LAYER][entity_layer == '.'] = False
full_entity_layer[:, :, _WALL_LAYER][entity_layer == 'G'] = False
full_entity_layer[:, :, _TARGET_LAYER][entity_layer == 'G'] = True
# update boxes
for pos in box_positions:
# to ensure we are not changing the walls.
if entity_layer[pos[0], pos[1]] == '*':
raise RuntimeError('Box and wall positions are overlapping and this ',
'should not happen. It requires investigation and ',
'and fixing.')
# the entity layer has no representation of box on goal.
entity_layer[pos[0], pos[1]] = mujoban_level.BOX_CHAR
if np.array_equal(pixel_layer[pos[0], pos[1]], [255, 0, 0]):
pixel_layer[pos[0], pos[1]] = [0, 255, 0] # box on goal
else:
pixel_layer[pos[0], pos[1]] = [255, 255, 0]
full_entity_layer[pos[0], pos[1], _BOX_LAYER] = True
# update player
if entity_layer[walker_position[0], walker_position[1]] == '*':
raise RuntimeError('Walker and wall positions are overlapping and this ',
'should have not happen. It requires investigation ',
'and fixing.')
entity_layer[walker_position[0], walker_position[1]] = 'P'
pixel_layer[walker_position[0], walker_position[1]] = 0, 0, 255
full_entity_layer[
walker_position[0], walker_position[1], _SOKOBAN_LAYER] = True
self._last_walker_position = walker_position
self._entity_layer = entity_layer
self._pixel_layer = pixel_layer
self._full_entity_layer = full_entity_layer
def after_step(self, physics, random_state):
super(Mujoban, self).after_step(physics, random_state)
for box in self._boxes:
physics.bind(box.geom).rgba = _BOX_RGBA
for target in self._targets:
if target.activated:
target.activator.rgba = _BOX_PRESSED_RGBA
self._update_entity_pixel_layers(physics)
self._is_solved = all([target.activated for target in self._targets])
if self._is_solved:
self._discount = 0.
def get_reward(self, physics):
reward = 0.0
for target, was_activated in zip(self._targets, self._was_activated):
if target.activated and not was_activated:
reward += self._box_on_target_reward
elif was_activated and not target.activated:
reward -= self._box_on_target_reward
if self._is_solved:
reward += self._level_solved_reward
return reward
def get_discount(self, physics):
return self._discount
def should_terminate_episode(self, physics):
is_dead = self._walker.aliveness(physics) < _ALIVE_THRESHOLD
return self._is_solved or is_dead
def get_reward_spec(self):
return specs.ArraySpec(shape=[], dtype=np.float32)
@property
def task_observables(self):
return self._task_observables
| deepmind-research-master | physics_planning_games/mujoban/mujoban.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Level generator for Mujoban based on levels from follwing dataset.
https://github.com/deepmind/boxoban-levels/
"""
import glob
import os
import zipfile
import numpy as np
import requests
BOXOBAN_URL = "https://github.com/deepmind/boxoban-levels/archive/master.zip"
def boxoban_level_generator(levels_set="unfiltered", data_split="valid"):
env = Boxoban(levels_set=levels_set, data_split=data_split)
while True:
index = np.random.randint(0, env.num_levels-1)
yield env.levels[index]
class Boxoban(object):
"""Class for loading and generatting Boxoban levels."""
def __init__(self,
levels_set="unfiltered",
data_split="valid"):
self._levels_set = levels_set
self._data_split = data_split
self._levels = []
data_file_path_local = os.path.join(os.path.dirname(__file__),
"boxoban_cache",
"{}_{}.npz".format(self._levels_set,
self._data_split))
data_file_path_global = os.path.join("/tmp/boxoban_cache",
"{}_{}.npz".format(self._levels_set,
self._data_split))
if os.path.exists(data_file_path_local):
self.levels = np.load(data_file_path_local)["levels"]
elif os.path.exists(data_file_path_global):
self.levels = np.load(data_file_path_global)["levels"]
else:
self.levels = self.get_data()
self.num_levels = len(self.levels)
def get_data(self):
"""Downloads and cache the data."""
try:
cache_path = os.path.join(
os.path.dirname(__file__), "boxoban_cache")
os.makedirs(cache_path, exist_ok=True)
except PermissionError:
cache_path = os.path.join("/tmp/boxoban_cache")
if not os.path.exists(cache_path):
os.makedirs(cache_path, exist_ok=True)
# Get the zip file
zip_file_path = os.path.join(cache_path, "master.zip")
if not os.path.exists(zip_file_path):
response = requests.get(BOXOBAN_URL, stream=True)
handle = open(zip_file_path, "wb")
for chunk in response.iter_content(chunk_size=512):
if chunk:
handle.write(chunk)
handle.close()
with zipfile.ZipFile(zip_file_path, "r") as zipref:
zipref.extractall(cache_path)
# convert to npz
path = os.path.join(cache_path, "boxoban-levels-master",
self._levels_set,
self._data_split)
files = glob.glob(path + "/*.txt")
levels = "".join([open(f, "r").read() for f in files])
levels = levels.split("\n;")
levels = ["\n".join(item.split("\n")[1:]) for item in levels]
levels = np.asarray(levels)
data_file_path = os.path.join(
cache_path, "{}_{}.npz".format(self._levels_set, self._data_split))
np.savez(data_file_path, levels=levels)
return levels
| deepmind-research-master | physics_planning_games/mujoban/boxoban.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from physics_planning_games.board_games import tic_tac_toe_logic
class TicTacToeGameLogicTest(parameterized.TestCase):
def setUp(self):
super(TicTacToeGameLogicTest, self).setUp()
self.logic = tic_tac_toe_logic.TicTacToeGameLogic()
self.expected_board_state = np.zeros((3, 3, 3), dtype=bool)
self.expected_board_state[..., 0] = True # All positions initially empty.
def test_valid_move_sequence(self):
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
action = tic_tac_toe_logic.SingleMarkerAction(col=1, row=2)
self.assertTrue(self.logic.apply(player=0, action=action),
msg='Invalid action: {}'.format(action))
self.expected_board_state[action.row, action.col, 0] = False
self.expected_board_state[action.row, action.col, 1] = True
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
action = tic_tac_toe_logic.SingleMarkerAction(col=0, row=1)
self.assertTrue(self.logic.apply(player=1, action=action),
msg='Invalid action: {}'.format(action))
self.expected_board_state[action.row, action.col, 0] = False
self.expected_board_state[action.row, action.col, 2] = True
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
def test_invalid_move_sequence(self):
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
action = tic_tac_toe_logic.SingleMarkerAction(col=1, row=2)
self.assertTrue(self.logic.apply(player=0, action=action),
msg='Invalid action: {}'.format(action))
self.expected_board_state[action.row, action.col, 0] = False
self.expected_board_state[action.row, action.col, 1] = True
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
# Player 0 tries to move again in the same location.
action = tic_tac_toe_logic.SingleMarkerAction(col=1, row=2)
self.assertFalse(self.logic.apply(player=0, action=action),
msg='Invalid action was accepted: {}'.format(action))
# Player 1 tries to move in the same location as player 0.
self.assertFalse(self.logic.apply(player=1, action=action),
msg='Invalid action was accepted: {}'.format(action))
# The board state should not have changed as a result of invalid actions.
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
@parameterized.named_parameters([
dict(testcase_name='player_0_win',
move_sequence=((0, 0, 0),
(1, 0, 1),
(0, 1, 0),
(1, 2, 1),
(0, 2, 0)),
winner_id=0),
dict(testcase_name='player_1_win',
move_sequence=((0, 0, 0),
(1, 0, 2),
(0, 1, 0),
(1, 1, 1),
(0, 0, 1),
(1, 2, 0)),
winner_id=1),
dict(testcase_name='draw',
move_sequence=((0, 0, 0),
(1, 1, 1),
(0, 1, 0),
(1, 2, 0),
(0, 0, 2),
(1, 0, 1),
(0, 2, 1),
(1, 2, 2),
(0, 1, 2)),
winner_id=None)])
def test_reward_and_termination(self, move_sequence, winner_id):
for (player_id, row, col) in move_sequence:
self.assertFalse(self.logic.is_game_over)
self.assertDictEqual(self.logic.get_reward, {0: 0.0, 1: 0.0})
action = tic_tac_toe_logic.SingleMarkerAction(col=col, row=row)
self.assertTrue(self.logic.apply(player=player_id, action=action),
msg='Invalid action: {}'.format(action))
self.assertTrue(self.logic.is_game_over)
rewards = self.logic.get_reward
if winner_id is not None:
loser_id = 1 - winner_id
self.assertDictEqual(rewards, {winner_id: 1.0, loser_id: 0.0})
else: # Draw
self.assertDictEqual(rewards, {0: 0.5, 1: 0.5})
def test_random_opponent_vs_optimal(self):
"""Play random v optimal opponents and check that optimal largely wins.
"""
rand_state = np.random.RandomState(42)
optimal_opponent = tic_tac_toe_logic.TicTacToeOptimalOpponent()
random_opponent = tic_tac_toe_logic.TicTacToeRandomOpponent()
players = [optimal_opponent, random_opponent]
optimal_returns = []
random_returns = []
for _ in range(20):
logic = tic_tac_toe_logic.TicTacToeGameLogic()
optimal_opponent.reset()
random_opponent.reset()
rand_state.shuffle(players)
current_player_idx = 0
while not logic.is_game_over:
current_player = players[current_player_idx]
action = current_player.policy(logic, rand_state)
self.assertTrue(logic.apply(current_player_idx, action),
msg='Opponent {} selected invalid action {}'.format(
current_player, action))
current_player_idx = (current_player_idx + 1) % 2
# Record the winner.
reward = logic.get_reward
if players[0] == optimal_opponent:
optimal_return = reward[0]
random_return = reward[1]
else:
optimal_return = reward[1]
random_return = reward[0]
optimal_returns.append(optimal_return)
random_returns.append(random_return)
mean_optimal_returns = np.mean(optimal_returns)
mean_random_returns = np.mean(random_returns)
self.assertGreater(mean_optimal_returns, 0.9)
self.assertLess(mean_random_returns, 0.1)
@parameterized.named_parameters([
dict(testcase_name='pos0',
move_sequence=((0, 0, 1),
(1, 1, 1),
(0, 0, 2),
(1, 1, 2)),
optimal_move=(0, 0)),
dict(testcase_name='pos1',
move_sequence=((0, 0, 1),
(1, 1, 2),
(0, 0, 2),
(1, 1, 1)),
optimal_move=(0, 0)),
dict(testcase_name='pos2',
move_sequence=((0, 2, 1),
(1, 1, 2),
(0, 2, 2),
(1, 1, 1)),
optimal_move=(2, 0)),
])
def test_minimax_policy(self, move_sequence, optimal_move):
rand_state = np.random.RandomState(42)
for (player_id, row, col) in move_sequence:
action = tic_tac_toe_logic.SingleMarkerAction(col=col, row=row)
self.assertTrue(self.logic.apply(player=player_id, action=action),
msg='Invalid action: {}'.format(action))
state = self.logic.open_spiel_state
planner_action = tic_tac_toe_logic.tic_tac_toe_minimax(state,
rand_state)
self.assertEqual(planner_action, optimal_move)
# Do the same but with np array as input
self.logic = tic_tac_toe_logic.TicTacToeGameLogic()
for (player_id, row, col) in move_sequence:
action = tic_tac_toe_logic.SingleMarkerAction(col=col, row=row)
self.assertTrue(self.logic.apply(player=player_id, action=action),
msg='Invalid action: {}'.format(action))
board = self.logic.get_board_state()
planner_action = tic_tac_toe_logic.tic_tac_toe_minimax(board,
rand_state)
self.assertEqual(planner_action, optimal_move)
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | physics_planning_games/board_games/tic_tac_toe_logic_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TicTacToe logic wrapper for use in manipulation tasks."""
import collections
import itertools
import numpy as np
from physics_planning_games.board_games import logic_base
from open_spiel.python.algorithms import minimax
import pyspiel
SingleMarkerAction = collections.namedtuple('SingleMarkerAction',
['row', 'col'])
force_random_start_position = False
class TicTacToeGameLogic(logic_base.OpenSpielBasedLogic):
"""Logic for TicTacToe game."""
def __init__(self):
self.reset()
def reset(self):
"""Resets the game state."""
# For now we always assume we are the starting player.
game = pyspiel.load_game('tic_tac_toe')
self._open_spiel_state = game.new_initial_state()
if force_random_start_position:
# For debugging purposes only, force some random moves
rand_state = np.random.RandomState(46)
rand_player = TicTacToeRandomOpponent()
num_moves = 4
for _ in range(num_moves):
action = rand_player.policy(self, rand_state)
action_1d = np.ravel_multi_index(action, (3, 3))
self._open_spiel_state.apply_action(action_1d)
def get_board_state(self):
"""Returns the logical board state as a numpy array.
Returns:
A boolean array of shape (H, W, C), where H=3, W=3 (height and width
of the board) and C=3 for the 3 planes. The 3 planes are, in order,
unmarked squares, x's (player 0) and y's (player 1).
"""
board_state = np.reshape(
np.array(self._open_spiel_state.observation_tensor(0), dtype=np.bool),
[3, 3, 3])
board_state = np.transpose(board_state, [1, 2, 0])
board_state = board_state[:, :, [0, 2, 1]]
return board_state
def apply(self, player, action):
"""Checks whether action is valid, and if so applies it to the game state.
Args:
player: Integer specifying the player ID; either 0 or 1.
action: A `SingleMarkerAction` instance.
Returns:
True if the action was valid, else False.
"""
action_value = np.ravel_multi_index((action.row, action.col), (3, 3))
if self._open_spiel_state.current_player() != player:
return False
try:
self._open_spiel_state.apply_action(action_value)
was_valid_move = True
except RuntimeError:
was_valid_move = False
return was_valid_move
class TicTacToeRandomOpponent(logic_base.Opponent):
"""An easy opponent for TicTacToe."""
def __init__(self):
pass
def reset(self):
"""Resets the opponent's internal state (not implemented)."""
pass
def policy(self, game_logic, random_state):
"""Return a random, valid move.
Args:
game_logic: TicTacToeGameLogic state of the game.
random_state: An instance of `np.random.RandomState`
Returns:
SingleMarkerAction of opponent.
"""
if game_logic.is_game_over:
return None
valid_moves = game_logic.open_spiel_state.legal_actions()
assert valid_moves
move = random_state.choice(valid_moves)
row, col = np.unravel_index(move, shape=(3, 3))
return SingleMarkerAction(row=row, col=col)
class TicTacToeMixtureOpponent(logic_base.Opponent):
"""A TicTacToe opponent which makes a mixture of optimal and random moves.
The optimal mixture component uses minimax search.
"""
def __init__(self, mixture_p):
"""Initialize the mixture opponent.
Args:
mixture_p: The mixture probability. We choose moves from the random
opponent with probability mixture_p and moves from the optimal
opponent with probability 1 - mixture_p.
"""
self._random_opponent = TicTacToeRandomOpponent()
self._optimal_opponent = TicTacToeOptimalOpponent()
self._mixture_p = mixture_p
def reset(self):
pass
def policy(self, game_logic, random_state):
if random_state.rand() < self._mixture_p:
return self._random_opponent.policy(game_logic, random_state)
else:
return self._optimal_opponent.policy(game_logic, random_state)
class TicTacToeOptimalOpponent(logic_base.Opponent):
"""A TicTacToe opponent which makes perfect moves.
Uses minimax search.
"""
def __init__(self):
pass
def reset(self):
pass
def policy(self, game_logic, random_state):
action = tic_tac_toe_minimax(game_logic.open_spiel_state, random_state)
return action
def numpy_array_to_open_spiel_state(board_state):
"""Take a numpy observation [3x3x3] bool area and create an OpenSpiel state.
Args:
board_state: 3x3x3 bool array with [col, row, c] with c indexing, in order,
empty squares, x moves, y moves.
Returns:
open_spiel_state: OpenSpiel state of this position.
"""
game = pyspiel.load_game('tic_tac_toe')
open_spiel_state = game.new_initial_state()
x_moves = np.flatnonzero(board_state[:, :, 1])
y_moves = np.flatnonzero(board_state[:, :, 2])
for x_m, y_m in itertools.zip_longest(x_moves, y_moves):
if open_spiel_state.is_terminal():
break
open_spiel_state.apply_action(x_m)
if open_spiel_state.is_terminal():
break
if y_m is not None:
open_spiel_state.apply_action(y_m)
return open_spiel_state
def open_spiel_move_to_single_marker_action(action):
row, col = np.unravel_index(action, shape=(3, 3))
return SingleMarkerAction(row=row, col=col)
def tic_tac_toe_random_move(state, random_state):
"""Returns a legal move at random from current state.
Args:
state: World state of the game. Either an OpenSpiel state
or a numpy encoding of the board.
random_state: numpy random state used for choosing randomly if there is more
than one optimal action.
Returns:
action: SingleMarkerAction of a random move.
"""
if isinstance(state, np.ndarray):
spiel_state = numpy_array_to_open_spiel_state(state)
else:
spiel_state = state
if spiel_state.is_terminal():
return False
legal_actions = spiel_state.legal_actions()
action = random_state.choice(legal_actions)
return open_spiel_move_to_single_marker_action(action)
def tic_tac_toe_minimax(state, random_state):
"""Tree search from the world_state in order to find the optimal action.
Args:
state: World state of the game. Either an OpenSpiel state
or a numpy encoding of the board.
random_state: numpy random state used for choosing randomly if there is more
than one optimal action.
Returns:
action: SingleMarkerAction of an optimal move.
"""
if isinstance(state, np.ndarray):
spiel_state = numpy_array_to_open_spiel_state(state)
else:
spiel_state = state
if spiel_state.is_terminal():
return False
current_player = spiel_state.current_player()
legal_actions = spiel_state.legal_actions()
best_actions = []
best_value = -100
for action in legal_actions:
state_after_action = spiel_state.clone()
state_after_action.apply_action(action)
value, _ = minimax.expectiminimax(state_after_action, 100, None,
current_player)
if value > best_value:
best_value = value
best_actions = [action]
elif value == best_value:
best_actions.append(action)
assert best_actions
action = random_state.choice(best_actions)
return open_spiel_move_to_single_marker_action(action)
| deepmind-research-master | physics_planning_games/board_games/tic_tac_toe_logic.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Abstract base classes and utility functions for logical aspects of the games.
"""
import abc
ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()})
class GameLogic(ABC):
"""Define the abstrat game logic class.
"""
@abc.abstractmethod
def __init__(self):
pass
@abc.abstractmethod
def reset(self):
pass
@abc.abstractproperty
def is_game_over(self):
"""Boolean specifying whether the current game has ended."""
@abc.abstractproperty
def get_reward(self):
pass
@abc.abstractmethod
def get_board_state(self):
"""Returns the logical board state as a numpy array."""
@abc.abstractmethod
def apply(self, player, action):
"""Checks whether action is valid, and if so applies it to the game state.
Args:
player: Integer specifying the player ID; either 0 or 1.
action: A `GoMarkerAction` instance.
Returns:
True if the action was valid, else False.
"""
class OpenSpielBasedLogic(GameLogic):
"""GameLogic using OpenSpiel for tracking game state.
"""
@property
def is_game_over(self):
"""Boolean specifying whether the current game has ended."""
return self._open_spiel_state.is_terminal()
@property
def get_reward(self):
"""Returns a dictionary that maps from `{player_id: player_reward}`."""
if self.is_game_over:
player0_return = self._open_spiel_state.player_return(0)
# Translate from OpenSpiel returns to 0.5 for draw, -1 for loss,
# +1 for win.
if player0_return == 0.:
reward = {0: 0.5, 1: 0.5}
elif player0_return == 1.:
reward = {0: 1., 1: 0.}
else:
assert player0_return == -1.
reward = {0: 0., 1: 1.}
else:
reward = {0: 0.,
1: 0.}
return reward
@property
def open_spiel_state(self):
"""OpenSpiel object representing the underlying game state."""
return self._open_spiel_state
class Opponent(ABC):
"""Abstract Opponent class."""
@abc.abstractmethod
def __init__(self):
pass
@abc.abstractmethod
def reset(self):
pass
@abc.abstractmethod
def policy(self, game_logic, random_state):
"""Return policy action.
Args:
game_logic: Go game logic state.
random_state: Numpy random state object.
Returns:
NamedTuple indicating opponent move.
"""
| deepmind-research-master | physics_planning_games/board_games/logic_base.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Shared base class for two-player Jaco arm board games.
"""
import functools
from dm_control import composer
from dm_control.composer import initializers
from dm_control.composer.observation import observable
from dm_control.composer.variation import distributions
from dm_control.composer.variation import rotations
from dm_control.entities.manipulators import base
from dm_control.entities.manipulators import kinova
import numpy as np
from physics_planning_games.board_games._internal import arenas
from physics_planning_games.board_games._internal import observations
_ARM_Y_OFFSET = 0.4
_TCP_LOWER_BOUNDS = (-0.1, -0.1, 0.2)
_TCP_UPPER_BOUNDS = (0.1, 0.1, 0.4)
# Player IDs
SELF = 0
OPPONENT = 1
def _uniform_downward_rotation():
angle = distributions.Uniform(-np.pi, np.pi, single_sample=True)
quaternion = rotations.QuaternionFromAxisAngle(axis=(0., 0., 1.), angle=angle)
return functools.partial(rotations.QuaternionPreMultiply(quaternion),
initial_value=base.DOWN_QUATERNION)
class JacoArmBoardGame(composer.Task):
"""Base class for two-player checker-like board games."""
def __init__(self, observation_settings, opponent, game_logic, board,
markers):
"""Initializes the task.
Args:
observation_settings: An `observations.ObservationSettings` namedtuple
specifying configuration options for each category of observation.
opponent: Opponent used for generating opponent moves.
game_logic: Logic for keeping track of the logical state of the board.
board: Board to use.
markers: Markers to use.
"""
self._game_logic = game_logic
self._game_opponent = opponent
arena = arenas.Standard(observable_options=observations.make_options(
observation_settings, observations.ARENA_OBSERVABLES))
arena.attach(board)
arm = kinova.JacoArm(observable_options=observations.make_options(
observation_settings, observations.JACO_ARM_OBSERVABLES))
hand = kinova.JacoHand(observable_options=observations.make_options(
observation_settings, observations.JACO_HAND_OBSERVABLES))
arm.attach(hand)
arena.attach_offset(arm, offset=(0, _ARM_Y_OFFSET, 0))
arena.attach(markers)
# Geoms belonging to the arm and hand are placed in a custom group in order
# to disable their visibility to the top-down camera. NB: we assume that
# there are no other geoms in ROBOT_GEOM_GROUP that don't belong to the
# robot (this is usually the case since the default geom group is 0). If
# there are then these will also be invisible to the top-down camera.
for robot_geom in arm.mjcf_model.find_all('geom'):
robot_geom.group = arenas.ROBOT_GEOM_GROUP
self._arena = arena
self._board = board
self._arm = arm
self._hand = hand
self._markers = markers
self._tcp_initializer = initializers.ToolCenterPointInitializer(
hand=hand, arm=arm,
position=distributions.Uniform(_TCP_LOWER_BOUNDS, _TCP_UPPER_BOUNDS),
quaternion=_uniform_downward_rotation())
# Add an observable exposing the logical state of the board.
board_state_observable = observable.Generic(
lambda physics: self._game_logic.get_board_state())
board_state_observable.configure(
**observation_settings.board_state._asdict())
self._task_observables = {'board_state': board_state_observable}
@property
def root_entity(self):
return self._arena
@property
def arm(self):
return self._arm
@property
def hand(self):
return self._hand
@property
def task_observables(self):
return self._task_observables
def get_reward(self, physics):
del physics # Unused.
return self._game_logic.get_reward[SELF]
def should_terminate_episode(self, physics):
return self._game_logic.is_game_over
def initialize_episode(self, physics, random_state):
self._tcp_initializer(physics, random_state)
self._game_logic.reset()
self._game_opponent.reset()
def before_step(self, physics, action, random_state):
super(JacoArmBoardGame, self).before_step(physics, action, random_state)
self._made_move_this_step = False
def after_substep(self, physics, random_state):
raise NotImplementedError('Subclass must implement after_substep.')
| deepmind-research-master | physics_planning_games/board_games/jaco_arm_board_game.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Basic smoke test for board_games environments."""
from absl.testing import absltest
from dm_env import test_utils
from physics_planning_games import board_games
class GoTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return board_games.load(environment_name='go_7x7', seed=0)
class TicTacToeTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return board_games.load(
environment_name='tic_tac_toe_mixture_opponent_markers_features',
seed=0)
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | physics_planning_games/board_games/board_games_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Physically-grounded board game environments."""
from dm_control import composer as _composer
from physics_planning_games.board_games import go as _go
from physics_planning_games.board_games import tic_tac_toe as _tic_tac_toe
from physics_planning_games.board_games._internal import registry as _registry
_registry.done_importing_tasks()
ALL = tuple(_registry.get_all_names())
TAGS = tuple(_registry.get_tags())
def get_environments_by_tag(tag):
"""Returns the names of all environments matching a given tag.
Args:
tag: A string from `TAGS`.
Returns:
A tuple of environment names.
"""
return tuple(_registry.get_names_by_tag(tag))
def load(environment_name,
env_kwargs=None,
seed=None,
time_limit=float('inf'),
strip_singleton_obs_buffer_dim=False):
"""Loads an environment from board_games.
Args:
environment_name: String, the name of the environment to load. Must be in
`ALL`.
env_kwargs: extra params to pass to task creation.
seed: Optional, either an int seed or an `np.random.RandomState`
object. If None (default), the random number generator will self-seed
from a platform-dependent source of entropy.
time_limit: (optional) A float, the time limit in seconds beyond which an
episode is forced to terminate.
strip_singleton_obs_buffer_dim: (optional) A boolean, if `True`,
the array shape of observations with `buffer_size == 1` will not have a
leading buffer dimension.
Returns:
An instance of `composer.Environment`.
"""
if env_kwargs is not None:
task = _registry.get_constructor(environment_name)(**env_kwargs)
else:
task = _registry.get_constructor(environment_name)()
return _composer.Environment(
task=task,
time_limit=time_limit,
strip_singleton_obs_buffer_dim=strip_singleton_obs_buffer_dim,
random_state=seed)
| deepmind-research-master | physics_planning_games/board_games/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Logic for the Go Game."""
import abc
import collections
import enum
import shutil
import subprocess
from absl import logging
import numpy as np
from dm_control.utils import io as resources
from physics_planning_games.board_games import logic_base
import pyspiel
GNUGO_PATH = '/usr/games/gnugo'
GoMarkerAction = collections.namedtuple('GoMarkerAction',
['row', 'col', 'pass_action'])
# Note that there is no 'i' in these Go board coordinates
# (cf https://senseis.xmp.net/?Coordinates)
_X_CHARS = 'abcdefghjklmnopqrstuvwxyz'
_X_MAP = {c: x for c, x in zip(_X_CHARS, range(len(_X_CHARS)))}
def _go_marker_to_int(go_marker, board_size):
"""Convert GoMarkerAction into GoPoint integer encoding of move.
Args:
go_marker: GoMarkerAction.
board_size: Board size of the go board (e.g. 9 or 19).
Returns:
GoPoint int value.
"""
if go_marker.pass_action:
return board_size * board_size
else:
return int((go_marker.row) * board_size + go_marker.col)
def _int_to_go_marker(move_int, board_size):
"""Decode the integer move encoding to a GoMarkerAction.
Args:
move_int: Integer encoding the go move.
board_size: Board size of the go board (e.g. 9 or 19).
Returns:
GoMarkerAction encoding of move.
"""
if move_int == board_size * board_size:
go_marker_action = GoMarkerAction(row=-1, col=-1, pass_action=True)
else:
row = move_int // board_size
col = move_int % board_size
go_marker_action = GoMarkerAction(row=row, col=col, pass_action=False)
return go_marker_action
def _go_marker_to_str(go_marker):
if go_marker.pass_action:
return 'PASS'
else:
move_str = _X_CHARS[go_marker.col] + str(go_marker.row + 1)
return move_str
def _str_to_go_marker(move_str):
"""Convert from a 2-letter Go move str (e.g.
a3) to a GoMarker.
Args:
move_str: String describing the move (e.g. a3).
Returns:
GoMarkerAction encoding of move.
"""
move_str = move_str.lower()
if move_str == 'pass':
action = GoMarkerAction(row=-1, col=-1, pass_action=True)
elif move_str == 'resign':
raise NotImplementedError('Not dealing with resign')
else:
assert len(move_str) == 2
col, row = move_str[0], move_str[1]
col = _X_MAP[col]
row = int(row) - 1
action = GoMarkerAction(row=row, col=col, pass_action=False)
return action
def _get_gnugo_ref_config(level=1, binary_path=None):
"""Reference config for GnuGo.
Args:
level: GnuGo level
binary_path: string pointing to GnuGo binary
Returns:
Config dict that can be passed to gtp engine
"""
try:
gnugo_binary_path = resources.GetResourceFilename(binary_path)
except FileNotFoundError:
gnugo_binary_path = shutil.which('gnugo')
if not gnugo_binary_path:
raise FileNotFoundError('Not able to locate gnugo library. ',
'Try installing it by: apt install gnugo')
gnugo_extra_flags = ['--mode', 'gtp']
gnugo_extra_flags += ['--chinese-rules', '--capture-all-dead']
gtp_player_cfg = {
'name': 'gnugo',
'binary_path': gnugo_binary_path,
'level': level,
'extra_flags': gnugo_extra_flags,
}
return gtp_player_cfg
class Stone(enum.Enum):
EMPTY = 1
WHITE = 2
BLACK = 3
def __lt__(self, other):
value = int(self.value)
return value < other.value
def gtp_to_sgf_point(gtp_point, board_size):
"""Format a GTP point according to the SGF format."""
if gtp_point.lower() == 'pass' or gtp_point.lower() == 'resign':
return 'tt'
column, row = gtp_point[0], gtp_point[1:]
# GTP doesn't use i, but SGF does, so we need to convert.
gtp_columns = 'abcdefghjklmnopqrstuvwxyz'
sgf_columns = 'abcdefghijklmnopqrstuvwxyz'
x = gtp_columns.find(column.lower())
y = board_size - int(row)
return '%s%s' % (sgf_columns[x], sgf_columns[y])
class Gtp(object):
"""Wrapper around Go playing program that communicates using GTP."""
__metaclass__ = abc.ABCMeta
def __init__(self, checkpoint_file=None):
self.stones = {
'.': Stone.EMPTY,
'+': Stone.EMPTY,
'O': Stone.WHITE,
'X': Stone.BLACK
}
self.moves = []
self.comments = []
self.handicap = 0
self.board_size = 19
self.komi = 0
self.free_handicap = None
self.byo_yomi_time = None
self.checkpoint_file = checkpoint_file
self.stderr = None
def set_board_size(self, size):
self.board_size = size
self.gtp_command('boardsize %d' % size)
self.gtp_command('clear_board')
def set_komi(self, komi):
self.komi = komi
self.gtp_command('komi %s' % komi)
def set_free_handicap(self, vertices):
self.free_handicap = vertices
self.gtp_command('set_free_handicap %s' % vertices)
def place_free_handicap(self, n):
self.free_handicap = self.gtp_command('place_free_handicap %d' % n)
return self.free_handicap
def make_move(self, move, record=True):
self.gtp_command('play %s' % move)
if record:
self._record_move(move)
def set_byo_yomi_time(self, t):
self.byo_yomi_time = t
def num_moves(self):
return len(self.moves)
def clear_board(self):
self.moves = []
self.comments = []
self.gtp_command('clear_board')
def generate_move(self, color):
if self.byo_yomi_time is not None:
self.gtp_command('time_left %s %d 1' % (color, self.byo_yomi_time))
move = '%s %s' % (color, self.gtp_command(
'genmove %s' % color).split(' ')[-1].lower())
self._record_move(move, stderr=self.stderr)
return move
def board(self):
raw_board = self.gtp_command('showboard', log=False)[1:].strip()
rows = [line.strip().split(' ')[0] for line in raw_board.split('\n')][1:-1]
rows = [''.join(row.split(' ')[1:-1]) for row in rows]
return [[self.stones[cell] for cell in row] for row in rows]
def quit(self):
self.gtp_command('quit')
def final_status(self, status):
return self.gtp_command('final_status_list %s' % status)[2:].replace(
'\n', ' ').split(' ')
def fixed_handicap(self, handicap):
self.handicap = handicap
self.gtp_command('fixed_handicap %d' % handicap)
def undo(self, num_moves):
self.gtp_command('gg-undo %d' % num_moves)
for _ in range(num_moves):
self.moves.pop()
self.comments.pop()
def _record_move(self, move, stderr=None):
self.moves.append(move)
self.comments.append(stderr)
if self.checkpoint_file:
with open(self.checkpoint_file, 'w') as f:
f.write(self.to_sgf())
def to_sgf(self):
sgf = '(;PB[Black]PW[White]KM[%.1f]HA[%d]SZ[19]' % (self.komi,
self.handicap)
for i, move in enumerate(self.moves):
sgf += '\n;' + self._format_sgf_move(move)
if self.comments[i]:
sgf += 'C[' + self._sgf_escape(self.comments[i]) + ']'
return sgf + ')'
def _format_sgf_move(self, move):
"""Format a move according to the SGF format."""
color, vertex = str(move).split(' ')
return '%s[%s]' % (color[0].upper(),
gtp_to_sgf_point(vertex, self.board_size))
def _sgf_escape(self, text):
return ''.join(['\\' + t if t == ']' or t == '\\' else t for t in text])
@abc.abstractmethod
def gtp_command(self, command, log=True):
"""Executes a GTP command and returns its response.
Args:
command: The GTP command to run, no trailing newline.
log: Whether to log command and response to INFO.
Returns:
The GTP response.
Raises:
GtpError: if the response is not ok (doesn't start with '=').
"""
pass
class GtpError(Exception):
def __init__(self, response):
super(GtpError, self).__init__()
self.response = response
def __str__(self):
return self.response
class GoEngine(Gtp):
"""GTP-based Go engine.
Supports at least GnuGo and Pachi.
For GnuGo, at least specify ['--mode', 'gtp'] in extra_flags.
"""
def __init__(self, command='', checkpoint_file=None, extra_flags=None):
super(GoEngine, self).__init__(checkpoint_file)
if extra_flags:
command = [command] + extra_flags
self.p = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
def gtp_command(self, command, log=True):
if log:
logging.info('GTP: %s', command)
self.p.stdin.write(command)
self.p.stdin.write('\n')
self.p.stdin.flush()
response = [self.p.stdout.readline()]
while response[-1] != '\n':
response.append(self.p.stdout.readline())
response = ''.join(response).strip()
if log:
logging.info('GTP: %s', response)
if response[0][0] != '=':
raise GtpError(response)
return response
class GoGameLogic(logic_base.OpenSpielBasedLogic):
"""Logic for Go game."""
def __init__(self, board_size, gnugo_level=1, komi=5.5):
self._board_size = board_size
self._komi = komi
gtp_player_cfg = _get_gnugo_ref_config(
level=gnugo_level,
binary_path=GNUGO_PATH)
self._gtp_player = GoEngine(
command=gtp_player_cfg['binary_path'],
extra_flags=gtp_player_cfg['extra_flags'])
self._gtp_player.set_board_size(board_size)
self.reset()
def board_size(self):
return self._board_size
def get_gtp_player(self):
return self._gtp_player
def reset(self):
"""Resets the game state."""
# For now we always assume we are the starting player and use a random
# opponent.
self._gtp_player.gtp_command('clear_board', log=False)
self._gtp_player.set_board_size(self._board_size)
self._gtp_player.set_komi(self._komi)
game = pyspiel.load_game('go', {'board_size': self._board_size})
self._open_spiel_state = game.new_initial_state()
self._moves = np.ones(
(self._board_size * self._board_size * 2,), dtype=np.int32) * -1
self._move_id = 0
def show_board(self):
self._gtp_player.gtp_command('showboard')
def get_gtp_reward(self):
self._gtp_player.gtp_command('final_score')
def get_board_state(self):
"""Returns the logical board state as a numpy array.
Returns: A boolean array of shape (H, W, C), where H=3, W=3 (height and
width of the board) and C=4 for the 4 planes. The 4 planes are, in order,
unmarked, black (player 0), white (player 1) and komi (this layer is
always all the same value indicating whether white is to play).
"""
board_state = np.reshape(
np.array(self._open_spiel_state.observation_tensor(0), dtype=np.bool),
[4, self._board_size, self._board_size])
board_state = np.transpose(board_state, [1, 2, 0])
board_state = board_state[:, :, [2, 0, 1, 3]]
return board_state
def set_state_from_history(self, move_history):
self.reset()
move_history = np.squeeze(move_history.numpy())
for t in range(move_history.size):
if move_history[t] < 0:
break
else:
self.apply(t % 2, move_history[t])
# self.show_board()
def get_move_history(self):
"""Returns the move history as padded numpy array."""
return self._moves
def apply(self, player, action):
"""Checks whether action is valid, and if so applies it to the game state.
Args:
player: Integer specifying the player ID; either 0 or 1.
action: A `GoMarkerAction` instance (or numpy.int32) which represent the
action in the board of size `board_size`.
Returns:
True if the action was valid, else False.
"""
if isinstance(action, GoMarkerAction):
action = _go_marker_to_int(action, self._board_size)
if self._open_spiel_state.current_player() != player:
return False
legal_actions = self._open_spiel_state.legal_actions()
if np.isin(action, legal_actions):
self._open_spiel_state.apply_action(action)
was_valid_move = True
else:
was_valid_move = False
if not was_valid_move:
return False
self._moves[self._move_id] = action
self._move_id += 1
# Apply to the Go program
player_color = 'B' if player == 0 else 'W'
action_str = _go_marker_to_str(_int_to_go_marker(action, self._board_size))
self._gtp_player.gtp_command('play {} {}'.format(player_color, action_str))
return was_valid_move
def gen_move(game_logic, player):
"""Generate move from GTP player and game state defined in game_logic."""
player_color = 'B' if player == 0 else 'W'
gtp_player = game_logic.get_gtp_player()
move_str = gtp_player.gtp_command(
'reg_genmove {}'.format(player_color), log=True)
move_str = move_str[2:].lower()
action = _str_to_go_marker(move_str)
return action
def gen_random_move(game_logic, random_state):
"""Generate random move for current state in game logic."""
if game_logic.is_game_over:
return None
valid_moves = game_logic.open_spiel_state.legal_actions()
assert valid_moves
move = random_state.choice(valid_moves)
go_action = _int_to_go_marker(move, board_size=game_logic.board_size())
return go_action
class GoGTPOpponent(logic_base.Opponent):
"""Use external binary Pachi to generate opponent moves."""
def __init__(self, board_size, mixture_p=0.0):
"""Initialize Go opponent.
Args:
board_size: Go board size (int)
mixture_p: Probability of playing a random move (amongst legal moves).
"""
self._board_size = board_size
self._mixture_p = mixture_p
def reset(self):
pass
def policy(self, game_logic, player, random_state):
"""Return policy action.
Args:
game_logic: Go game logic state.
player: Integer specifying the player ID; either 0 or 1.
random_state: Numpy random state object.
Returns:
GoMarkerAction indicating opponent move.
"""
if random_state.rand() < self._mixture_p:
return gen_random_move(game_logic, random_state)
else:
return gen_move(game_logic, player)
class GoRandomOpponent(logic_base.Opponent):
"""An easy opponent for Go."""
def __init__(self, board_size):
self._board_size = board_size
def reset(self):
"""Resets the opponent's internal state (not implemented)."""
pass
def policy(self, game_logic, player, random_state):
"""Return a random, valid move.
Args:
game_logic: TicTacToeGameLogic state of the game.
player: Integer specifying the player ID; either 0 or 1.
random_state: An instance of `np.random.RandomState`
Returns:
GoMarkerAction of opponent.
"""
return gen_random_move(game_logic, random_state)
| deepmind-research-master | physics_planning_games/board_games/go_logic.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from physics_planning_games.board_games import go_logic
class GoGameLogicTest(parameterized.TestCase):
def setUp(self):
super(GoGameLogicTest, self).setUp()
self.logic = go_logic.GoGameLogic(board_size=5)
self.expected_board_state = np.zeros((5, 5, 4), dtype=bool)
self.expected_board_state[:, :, 0] = True
def test_valid_move_sequence(self):
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
action = go_logic.GoMarkerAction(col=1, row=2, pass_action=False)
self.assertTrue(self.logic.apply(player=0, action=action),
msg='Invalid action: {}'.format(action))
def test_pass(self):
action = go_logic.GoMarkerAction(col=0, row=0, pass_action=True)
self.assertTrue(self.logic.apply(player=0, action=action),
msg='Invalid action: {}'.format(action))
self.expected_board_state[:, :, 3] = True
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
def test_invalid_move_sequence(self):
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
action = go_logic.GoMarkerAction(col=1, row=2, pass_action=False)
self.assertTrue(self.logic.apply(player=0, action=action),
msg='Invalid action: {}'.format(action))
self.expected_board_state[action.row, action.col, 0] = False
self.expected_board_state[action.row, action.col, 1] = True
self.expected_board_state[:, :, 3] = True
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
action = go_logic.GoMarkerAction(col=1, row=2, pass_action=False)
self.assertFalse(self.logic.apply(player=0, action=action),
msg='Invalid action was accepted: {}'.format(action))
# Player 1 tries to move in the same location as player 0.
self.assertFalse(self.logic.apply(player=1, action=action),
msg='Invalid action was accepted: {}'.format(action))
# The board state should not have changed as a result of invalid actions.
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
def test_random_opponent_vs_gnugo(self):
"""Play random v gnugo opponents and check that optimal largely wins.
"""
board_size = 9
rand_state = np.random.RandomState(42)
pachi_opponent = go_logic.GoGTPOpponent(board_size)
random_opponent = go_logic.GoRandomOpponent(board_size)
players = [pachi_opponent, random_opponent]
pachi_returns = []
random_returns = []
for _ in range(3):
logic = go_logic.GoGameLogic(board_size)
pachi_opponent.reset()
random_opponent.reset()
rand_state.shuffle(players)
current_player_idx = 0
while not logic.is_game_over:
current_player = players[current_player_idx]
action = current_player.policy(logic, current_player_idx, rand_state)
valid_action = logic.apply(current_player_idx, action)
self.assertTrue(valid_action,
msg='Opponent {} selected invalid action {}'.format(
current_player, action))
current_player_idx = (current_player_idx + 1) % 2
# Record the winner.
reward = logic.get_reward
if players[0] == pachi_opponent:
pachi_return = reward[0]
random_return = reward[1]
else:
pachi_return = reward[1]
random_return = reward[0]
pachi_returns.append(pachi_return)
random_returns.append(random_return)
mean_pachi_returns = np.mean(pachi_returns)
mean_random_returns = np.mean(random_returns)
self.assertGreater(mean_pachi_returns, 0.95)
self.assertLess(mean_random_returns, 0.05)
@parameterized.named_parameters([
dict(testcase_name='00',
row=0, col=0),
dict(testcase_name='01',
row=1, col=0)])
def test_go_marker_to_int(self, row, col):
go_marker = go_logic.GoMarkerAction(row=row, col=col, pass_action=False)
int_action = go_logic._go_marker_to_int(go_marker, board_size=19)
recovered_go_marker = go_logic._int_to_go_marker(int_action, board_size=19)
self.assertEqual(go_marker, recovered_go_marker,
msg='Initial go marker {}, recovered {}'.format(
go_marker, recovered_go_marker))
@parameterized.named_parameters([
dict(testcase_name='00',
row=0, col=0),
dict(testcase_name='01',
row=1, col=0)])
def test_go_marker_to_str(self, row, col):
go_marker = go_logic.GoMarkerAction(row=row, col=col, pass_action=False)
str_action = go_logic._go_marker_to_str(go_marker)
recovered_go_marker = go_logic._str_to_go_marker(str_action)
self.assertEqual(go_marker,
recovered_go_marker,
msg='Initial go marker {}, recovered {}, '
'str_action {}'.format(go_marker, recovered_go_marker,
str_action))
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | physics_planning_games/board_games/go_logic_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A Go board game."""
from dm_control.composer.observation import observable
import numpy as np
from physics_planning_games.board_games import go_logic
from physics_planning_games.board_games import jaco_arm_board_game
from physics_planning_games.board_games._internal import boards
from physics_planning_games.board_games._internal import observations
from physics_planning_games.board_games._internal import pieces
from physics_planning_games.board_games._internal import registry
from physics_planning_games.board_games._internal import tags
_BLACK = (0., 0., 0., 0.75)
_WHITE = (1., 1., 1., 0.75)
_GO_PIECE_SIZE = 0.04
_DEFAULT_OPPONENT_MIXTURE = 0.2
class Go(jaco_arm_board_game.JacoArmBoardGame):
"""Single-player Go of configurable size."""
def __init__(self, board_size, observation_settings, opponent=None,
reset_arm_after_move=True):
"""Initializes a `Go` task.
Args:
board_size: board size
observation_settings: An `observations.ObservationSettings` namedtuple
specifying configuration options for each category of observation.
opponent: Go opponent to use for the opponent player actions.
reset_arm_after_move: Whether to reset arm to random position after every
piece being placed on the board.
"""
game_logic = go_logic.GoGameLogic(board_size=board_size)
if opponent is None:
opponent = go_logic.GoGTPOpponent(board_size=board_size,
mixture_p=_DEFAULT_OPPONENT_MIXTURE)
self._last_valid_move_is_pass = False
super(Go, self).__init__(observation_settings=observation_settings,
opponent=opponent,
game_logic=game_logic,
board=boards.GoBoard(boardsize=board_size),
markers=pieces.Markers(
player_colors=(_BLACK, _WHITE),
halfwidth=_GO_PIECE_SIZE,
num_per_player=board_size*board_size*2,
observable_options=observations.make_options(
observation_settings,
observations.MARKER_OBSERVABLES),
board_size=board_size))
self._reset_arm_after_move = reset_arm_after_move
# Add an observable exposing the move history (to reconstruct game states)
move_history_observable = observable.Generic(
lambda physics: self._game_logic.get_move_history())
move_history_observable.configure(
**observation_settings.board_state._asdict())
self._task_observables['move_history'] = move_history_observable
@property
def name(self):
return 'Go'
@property
def control_timestep(self):
return 0.05
def after_substep(self, physics, random_state):
if not self._made_move_this_step:
# which board square received the most contact pressure
indices = self._board.get_contact_indices(physics)
if not indices:
return
row, col = indices
# Makes sure that contact with that board square involved a finger
finger_touch = self._board.validate_finger_touch(physics,
row, col, self._hand)
if not finger_touch:
return
pass_action = True if (row == -1 and col == -1) else False
if pass_action and self._last_valid_move_is_pass:
# Don't allow two passes in a row (otherwise hard to only pass once)
valid_move = False
else:
valid_move = self._game_logic.apply(
player=jaco_arm_board_game.SELF,
action=go_logic.GoMarkerAction(row=int(row), col=int(col),
pass_action=pass_action))
if valid_move:
self._made_move_this_step = True
if not pass_action:
self._last_valid_move_is_pass = False
marker_pos = self._board.get_contact_pos(
physics=physics, row=row, col=col)
self._markers.mark(physics=physics,
player_id=jaco_arm_board_game.SELF,
pos=marker_pos,
bpos=(row, col))
else:
self._last_valid_move_is_pass = True
if not self._game_logic.is_game_over:
opponent_move = self._game_opponent.policy(
game_logic=self._game_logic, player=jaco_arm_board_game.OPPONENT,
random_state=random_state)
assert opponent_move
assert self._game_logic.apply(player=jaco_arm_board_game.OPPONENT,
action=opponent_move)
marker_pos = self._board.sample_pos_inside_touch_sensor(
physics=physics,
random_state=random_state,
row=opponent_move.row,
col=opponent_move.col)
self._markers.mark(physics=physics,
player_id=jaco_arm_board_game.OPPONENT,
pos=marker_pos,
bpos=(opponent_move.row,
opponent_move.col))
if self._reset_arm_after_move:
self._tcp_initializer(physics, random_state)
# Redraw all markers that are on the board (after captures)
self._markers.make_all_invisible(physics)
board = self._game_logic.get_board_state()
black_stones = np.transpose(np.nonzero(board[:, :, 1]))
white_stones = np.transpose(np.nonzero(board[:, :, 2]))
if black_stones.size > 0:
self._markers.make_visible_by_bpos(physics, 0, black_stones)
if white_stones.size > 0:
self._markers.make_visible_by_bpos(physics, 1, white_stones)
@registry.add(tags.EASY, tags.FEATURES)
def go_7x7():
return Go(board_size=7,
observation_settings=observations.PERFECT_FEATURES)
| deepmind-research-master | physics_planning_games/board_games/go.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A Tic Tac Toe task."""
from physics_planning_games.board_games import jaco_arm_board_game
from physics_planning_games.board_games import tic_tac_toe_logic
from physics_planning_games.board_games._internal import boards
from physics_planning_games.board_games._internal import observations
from physics_planning_games.board_games._internal import pieces
from physics_planning_games.board_games._internal import registry
from physics_planning_games.board_games._internal import tags
class TicTacToe(jaco_arm_board_game.JacoArmBoardGame):
"""Single-player Tic Tac Toe."""
def __init__(self, observation_settings, opponent=None,
reset_arm_after_move=True):
"""Initializes a `TicTacToe` task.
Args:
observation_settings: An `observations.ObservationSettings` namedtuple
specifying configuration options for each category of observation.
opponent: TicTacToeOpponent used for generating opponent moves.
reset_arm_after_move: Whether to reset arm to random position after every
piece being placed on the board.
"""
game_logic = tic_tac_toe_logic.TicTacToeGameLogic()
if opponent is None:
opponent = tic_tac_toe_logic.TicTacToeRandomOpponent()
markers = pieces.Markers(num_per_player=5,
observable_options=observations.make_options(
observation_settings,
observations.MARKER_OBSERVABLES))
self._reset_arm_after_move = reset_arm_after_move
super(TicTacToe, self).__init__(observation_settings=observation_settings,
opponent=opponent,
game_logic=game_logic,
board=boards.CheckerBoard(),
markers=markers)
@property
def control_timestep(self):
return 0.05
def after_substep(self, physics, random_state):
if not self._made_move_this_step:
indices = self._board.get_contact_indices(physics)
if not indices:
return
row, col = indices
valid_move = self._game_logic.apply(
player=jaco_arm_board_game.SELF,
action=tic_tac_toe_logic.SingleMarkerAction(row=row, col=col))
if valid_move:
self._made_move_this_step = True
marker_pos = self._board.get_contact_pos(
physics=physics, row=row, col=col)
self._markers.mark(physics=physics, player_id=jaco_arm_board_game.SELF,
pos=marker_pos)
if not self._game_logic.is_game_over:
opponent_move = self._game_opponent.policy(
game_logic=self._game_logic, random_state=random_state)
assert opponent_move
assert self._game_logic.apply(player=jaco_arm_board_game.OPPONENT,
action=opponent_move)
marker_pos = self._board.sample_pos_inside_touch_sensor(
physics=physics,
random_state=random_state,
row=opponent_move.row,
col=opponent_move.col)
self._markers.mark(physics=physics,
player_id=jaco_arm_board_game.OPPONENT,
pos=marker_pos)
if self._reset_arm_after_move:
self._tcp_initializer(physics, random_state)
@registry.add(tags.EASY, tags.FEATURES)
def tic_tac_toe_markers_features(**unused_kwargs):
return TicTacToe(observation_settings=observations.PERFECT_FEATURES)
@registry.add(tags.MED, tags.FEATURES)
def tic_tac_toe_mixture_opponent_markers_features(mixture_p=0.25):
print('Creating tictactoe task with random/optimal opponent mixture, p={}'
.format(mixture_p))
return TicTacToe(
observation_settings=observations.PERFECT_FEATURES,
opponent=tic_tac_toe_logic.TicTacToeMixtureOpponent(mixture_p))
@registry.add(tags.HARD, tags.FEATURES)
def tic_tac_toe_optimal_opponent_markers_features(**unused_kwargs):
return TicTacToe(observation_settings=observations.PERFECT_FEATURES,
opponent=tic_tac_toe_logic.TicTacToeOptimalOpponent())
| deepmind-research-master | physics_planning_games/board_games/tic_tac_toe.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""String constants used to annotate task constructors."""
FEATURES = 'features'
VISION = 'vision'
EASY = 'easy'
MED = 'medium'
HARD = 'hard'
| deepmind-research-master | physics_planning_games/board_games/_internal/tags.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A global registry of constructors for board game environments."""
from dm_control.utils import containers
_ALL_CONSTRUCTORS = containers.TaggedTasks(allow_overriding_keys=False)
add = _ALL_CONSTRUCTORS.add
get_constructor = _ALL_CONSTRUCTORS.__getitem__
get_all_names = _ALL_CONSTRUCTORS.keys
get_tags = _ALL_CONSTRUCTORS.tags
get_names_by_tag = _ALL_CONSTRUCTORS.tagged
# This disables the check that prevents the same task constructor name from
# being added to the container more than once. This is done in order to allow
# individual task modules to be reloaded without also reloading `registry.py`
# first (e.g. when "hot-reloading" environments in domain explorer).
def done_importing_tasks():
_ALL_CONSTRUCTORS.allow_overriding_keys = True
| deepmind-research-master | physics_planning_games/board_games/_internal/registry.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Configuration for observations."""
import collections
import numpy as np
class ObservableSpec(collections.namedtuple(
'ObservableSpec',
['enabled', 'update_interval', 'buffer_size', 'delay', 'aggregator',
'corruptor'])):
"""Configuration options for generic observables."""
__slots__ = ()
class CameraObservableSpec(collections.namedtuple(
'CameraObservableSpec', ('height', 'width') + ObservableSpec._fields)):
"""Configuration options for camera observables."""
__slots__ = ()
class ObservationSettings(collections.namedtuple(
'ObservationSettings',
['proprio', 'ftt', 'prop_pose', 'board_state', 'camera'])):
"""Container of `ObservableSpecs` grouped by category."""
__slots__ = ()
class ObservableNames(collections.namedtuple(
'ObservableNames',
['proprio', 'ftt', 'prop_pose', 'board_state', 'camera'])):
"""Container that groups the names of observables by category."""
__slots__ = ()
def __new__(cls, proprio=(), ftt=(), prop_pose=(), board_state=(), camera=()):
return super(ObservableNames, cls).__new__(
cls,
proprio=proprio,
ftt=ftt,
prop_pose=prop_pose,
board_state=board_state,
camera=camera)
# Global defaults for "feature" observables (i.e. anything that isn't a camera).
_DISABLED_FEATURE = ObservableSpec(
enabled=False,
update_interval=1,
buffer_size=1,
delay=0,
aggregator=None,
corruptor=None)
_ENABLED_FEATURE = _DISABLED_FEATURE._replace(enabled=True)
# Force, torque and touch-sensor readings are scaled using a symmetric
# logarithmic transformation that handles 0 and negative values.
_symlog1p = lambda x, random_state: np.sign(x) * np.log1p(abs(x))
_DISABLED_FTT = _DISABLED_FEATURE._replace(corruptor=_symlog1p)
_ENABLED_FTT = _ENABLED_FEATURE._replace(corruptor=_symlog1p)
# Global defaults for camera observables.
_DISABLED_CAMERA = CameraObservableSpec(
height=84,
width=84,
enabled=False,
update_interval=1,
buffer_size=1,
delay=0,
aggregator=None,
corruptor=None)
_ENABLED_CAMERA = _DISABLED_CAMERA._replace(enabled=True)
# Predefined sets of configurations options to apply to each category of
# observable.
PERFECT_FEATURES = ObservationSettings(
proprio=_ENABLED_FEATURE,
ftt=_ENABLED_FTT,
prop_pose=_ENABLED_FEATURE,
board_state=_ENABLED_FEATURE,
camera=_ENABLED_CAMERA)
ARENA_OBSERVABLES = ObservableNames(camera=['front_camera', 'front_camera_2'])
JACO_ARM_OBSERVABLES = ObservableNames(
proprio=['joints_pos', 'joints_vel'], ftt=['joints_torque'])
JACO_HAND_OBSERVABLES = ObservableNames(
proprio=['joints_pos', 'joints_vel', 'pinch_site_pos', 'pinch_site_rmat'])
MARKER_OBSERVABLES = ObservableNames(prop_pose=['position'])
def make_options(obs_settings, obs_names):
"""Constructs a dict of configuration options for a set of named observables.
Args:
obs_settings: An `ObservationSettings` instance.
obs_names: An `ObservableNames` instance.
Returns:
A nested dict containing `{observable_name: {option_name: value}}`.
"""
observable_options = {}
for category, spec in obs_settings._asdict().items():
for observable_name in getattr(obs_names, category):
observable_options[observable_name] = spec._asdict()
return observable_options
| deepmind-research-master | physics_planning_games/board_games/_internal/observations.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for physics_planning_games.board_games._internal.pieces.py."""
from absl.testing import absltest
from dm_control import mjcf
import numpy as np
from physics_planning_games.board_games._internal import pieces
class MarkersTest(absltest.TestCase):
def test_position_observable(self):
num_per_player = 3
markers = pieces.Markers(num_per_player=num_per_player)
physics = mjcf.Physics.from_mjcf_model(markers.mjcf_model)
all_positions = [
[(0, 1, 2), (3, 4, 5), (6, 7, 8)], # Player 0
[(-1, 2, -3), (4, -5, 6)], # Player 1
]
for player_id, positions in enumerate(all_positions):
for marker_pos in positions:
markers.mark(physics=physics, player_id=player_id, pos=marker_pos)
expected_positions = np.zeros((2, num_per_player, 3), dtype=np.double)
expected_positions[0, :len(all_positions[0])] = all_positions[0]
expected_positions[1, :len(all_positions[1])] = all_positions[1]
observed_positions = markers.observables.position(physics)
np.testing.assert_array_equal(
expected_positions.reshape(-1, 3), observed_positions)
def test_invalid_player_id(self):
markers = pieces.Markers(num_per_player=5)
physics = mjcf.Physics.from_mjcf_model(markers.mjcf_model)
invalid_player_id = 99
with self.assertRaisesWithLiteralMatch(
ValueError, pieces._INVALID_PLAYER_ID.format(1, 99)):
markers.mark(physics=physics, player_id=invalid_player_id, pos=(1, 2, 3))
def test_too_many_moves(self):
num_per_player = 5
player_id = 0
markers = pieces.Markers(num_per_player=num_per_player)
physics = mjcf.Physics.from_mjcf_model(markers.mjcf_model)
for _ in range(num_per_player):
markers.mark(physics=physics, player_id=player_id, pos=(1, 2, 3))
with self.assertRaisesWithLiteralMatch(
RuntimeError,
pieces._NO_MORE_MARKERS_AVAILABLE.format(num_per_player, player_id)):
markers.mark(physics=physics, player_id=player_id, pos=(1, 2, 3))
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | physics_planning_games/board_games/_internal/pieces_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Entities representing board game pieces."""
import itertools
from dm_control import composer
from dm_control import mjcf
from dm_control.composer.observation import observable
import numpy as np
_VISIBLE_SITE_GROUP = 0
_INVISIBLE_SITE_GROUP = 3
_RED = (1., 0., 0., 0.5)
_BLUE = (0., 0, 1., 0.5)
_INVALID_PLAYER_ID = '`player_id` must be between 0 and {}, got {}.'
_NO_MORE_MARKERS_AVAILABLE = (
'All {} markers for player {} have already been placed.')
class Markers(composer.Entity):
"""A collection of non-physical entities for marking board positions."""
def _build(self,
num_per_player,
player_colors=(_RED, _BLUE),
halfwidth=0.025,
height=0.01,
board_size=7):
"""Builds a `Markers` entity.
Args:
num_per_player: Integer, the total number of markers to create per player.
player_colors: Sequence of (R, G, B, A) values specifying the marker
colors for each player.
halfwidth: Scalar, the halfwidth of each marker.
height: Scalar, height of each marker.
board_size: Integer, optional if using the integer indexing.
"""
root = mjcf.RootElement(model='markers')
root.default.site.set_attributes(type='cylinder', size=(halfwidth, height))
all_markers = []
for i, color in enumerate(player_colors):
player_name = 'player_{}'.format(i)
# TODO(alimuldal): Would look cool if these were textured.
material = root.asset.add('material', name=player_name, rgba=color)
player_markers = []
for j in range(num_per_player):
player_markers.append(
root.worldbody.add(
'site',
name='player_{}_move_{}'.format(i, j),
material=material))
all_markers.append(player_markers)
self._num_players = len(player_colors)
self._mjcf_model = root
self._all_markers = all_markers
self._move_counts = [0] * self._num_players
# To go from integer position to marker index in the all_markers array
self._marker_ids = np.zeros((2, board_size, board_size))
self._board_size = board_size
def _build_observables(self):
return MarkersObservables(self)
@property
def mjcf_model(self):
"""`mjcf.RootElement` for this entity."""
return self._mjcf_model
@property
def markers(self):
"""Marker sites belonging to all players.
Returns:
A nested list, where `markers[i][j]` contains the `mjcf.Element`
corresponding to player i's jth marker.
"""
return self._all_markers
def initialize_episode(self, physics, random_state):
"""Resets the markers at the start of an episode."""
del random_state # Unused.
self._reset(physics)
def _reset(self, physics):
for player_markers in self._all_markers:
for marker in player_markers:
bound_marker = physics.bind(marker)
bound_marker.pos = 0. # Markers are initially placed at the origin.
bound_marker.group = _INVISIBLE_SITE_GROUP
self._move_counts = [0] * self._num_players
self._marker_ids = np.zeros((2, self._board_size, self._board_size),
dtype=np.int32)
def make_all_invisible(self, physics):
for player_markers in self._all_markers:
for marker in player_markers:
bound_marker = physics.bind(marker)
bound_marker.group = _INVISIBLE_SITE_GROUP
def make_visible_by_bpos(self, physics, player_id, all_bpos):
for bpos in all_bpos:
marker_id = self._marker_ids[player_id][bpos[0]][bpos[1]]
marker = self._all_markers[player_id][marker_id]
bound_marker = physics.bind(marker)
bound_marker.group = _VISIBLE_SITE_GROUP
def mark(self, physics, player_id, pos, bpos=None):
"""Enables the visibility of a marker, moves it to the specified position.
Args:
physics: `mjcf.Physics` instance.
player_id: Integer specifying the ID of the player whose marker to use.
pos: Array-like object specifying the cartesian position of the marker.
bpos: Board position, optional integer coordinates to index the markers.
Raises:
ValueError: If `player_id` is invalid.
RuntimeError: If `player_id` has no more available markers.
"""
if not 0 <= player_id < self._num_players:
raise ValueError(
_INVALID_PLAYER_ID.format(self._num_players - 1, player_id))
markers = self._all_markers[player_id]
move_count = self._move_counts[player_id]
if move_count >= len(markers):
raise RuntimeError(
_NO_MORE_MARKERS_AVAILABLE.format(move_count, player_id))
bound_marker = physics.bind(markers[move_count])
bound_marker.pos = pos
# TODO(alimuldal): Set orientation as well (random? same as contact frame?)
bound_marker.group = _VISIBLE_SITE_GROUP
self._move_counts[player_id] += 1
if bpos:
self._marker_ids[player_id][bpos[0]][bpos[1]] = move_count
class MarkersObservables(composer.Observables):
"""Observables for a `Markers` entity."""
@composer.observable
def position(self):
"""Cartesian positions of all marker sites.
Returns:
An `observable.MJCFFeature` instance. When called with an instance of
`physics` as the argument, this will return a numpy float64 array of shape
(num_players * num_markers, 3) where each row contains the cartesian
position of a marker. Unplaced markers will have position (0, 0, 0).
"""
return observable.MJCFFeature(
'xpos', list(itertools.chain.from_iterable(self._entity.markers)))
| deepmind-research-master | physics_planning_games/board_games/_internal/pieces.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Composer entities corresponding to game boards."""
import copy
import os
from dm_control import composer
from dm_control import mjcf
import numpy as np
from dm_control.utils import io as resources
_TOUCH_THRESHOLD = 1e-3 # Activation threshold for touch sensors (N).
# whether to display underlying sensors for Goboard (useful to align texture)
_SHOW_DEBUG_GRID = False
_TEXTURE_PATH = os.path.join(os.path.dirname(__file__), 'goboard_7x7.png')
def _make_checkerboard(rows,
columns,
square_halfwidth,
height=0.01,
sensor_size=0.7,
name='checkerboard'):
"""Builds a checkerboard with touch sensors centered on each square."""
root = mjcf.RootElement(model=name)
black_mat = root.asset.add('material', name='black', rgba=(0.2, 0.2, 0.2, 1))
white_mat = root.asset.add('material', name='white', rgba=(0.8, 0.8, 0.8, 1))
sensor_mat = root.asset.add('material', name='sensor', rgba=(0, 1, 0, 0.3))
root.default.geom.set_attributes(
type='box', size=(square_halfwidth, square_halfwidth, height))
root.default.site.set_attributes(
type='box',
size=(sensor_size * square_halfwidth,) * 2 + (0.5 * height,),
material=sensor_mat, group=composer.SENSOR_SITES_GROUP)
xpos = (np.arange(columns) - 0.5*(columns - 1)) * 2 * square_halfwidth
ypos = (np.arange(rows) - 0.5*(rows - 1)) * 2 * square_halfwidth
geoms = []
touch_sensors = []
for i in range(rows):
for j in range(columns):
geom_mat = black_mat if ((i % 2) == (j % 2)) else white_mat
name = '{}_{}'.format(i, j)
geoms.append(
root.worldbody.add(
'geom',
pos=(xpos[j], ypos[i], height),
name=name,
material=geom_mat))
site = root.worldbody.add('site', pos=(xpos[j], ypos[i], 2*height),
name=name)
touch_sensors.append(root.sensor.add('touch', site=site, name=name))
return root, geoms, touch_sensors
def _make_goboard(boardsize,
square_halfwidth,
height=0.01,
sensor_size=0.7,
name='goboard'):
"""Builds a Go with touch sensors centered on each intersection."""
y_offset = -0.08
rows = boardsize
columns = boardsize
root = mjcf.RootElement(model=name)
if _SHOW_DEBUG_GRID:
black_mat = root.asset.add('material', name='black',
rgba=(0.2, 0.2, 0.2, 0.5))
white_mat = root.asset.add('material', name='white',
rgba=(0.8, 0.8, 0.8, 0.5))
else:
transparent_mat = root.asset.add('material', name='intersection',
rgba=(0, 1, 0, 0.0))
sensor_mat = root.asset.add('material', name='sensor', rgba=(0, 1, 0, 0.3))
contents = resources.GetResource(_TEXTURE_PATH)
root.asset.add('texture', name='goboard', type='2d',
file=mjcf.Asset(contents, '.png'))
board_mat = root.asset.add(
'material', name='goboard', texture='goboard',
texrepeat=[0.97, 0.97])
root.default.geom.set_attributes(
type='box', size=(square_halfwidth, square_halfwidth, height))
root.default.site.set_attributes(
type='box',
size=(sensor_size * square_halfwidth,) * 2 + (0.5 * height,),
material=sensor_mat, group=composer.SENSOR_SITES_GROUP)
board_height = height
if _SHOW_DEBUG_GRID:
board_height = 0.5*height
root.worldbody.add(
'geom',
pos=(0, 0+y_offset, height),
type='box',
size=(square_halfwidth * boardsize,) * 2 + (board_height,),
name=name,
material=board_mat)
xpos = (np.arange(columns) - 0.5*(columns - 1)) * 2 * square_halfwidth
ypos = (np.arange(rows) - 0.5*(rows - 1)) * 2 * square_halfwidth + y_offset
geoms = []
touch_sensors = []
for i in range(rows):
for j in range(columns):
name = '{}_{}'.format(i, j)
if _SHOW_DEBUG_GRID:
transparent_mat = black_mat if ((i % 2) == (j % 2)) else white_mat
geoms.append(
root.worldbody.add(
'geom',
pos=(xpos[j], ypos[i], height),
name=name,
material=transparent_mat))
site = root.worldbody.add('site', pos=(xpos[j], ypos[i], 2*height),
name=name)
touch_sensors.append(root.sensor.add('touch', site=site, name=name))
pass_geom = root.worldbody.add(
'geom',
pos=(0, y_offset, 0.0),
size=(square_halfwidth*boardsize*2,
square_halfwidth*boardsize) + (0.5 * height,),
name='pass',
material=transparent_mat)
site = root.worldbody.add('site', pos=(0, y_offset, 0.0),
size=(square_halfwidth*boardsize*2,
square_halfwidth*boardsize) + (0.5 * height,),
name='pass')
pass_sensor = root.sensor.add('touch', site=site, name='pass')
return root, geoms, touch_sensors, pass_geom, pass_sensor
class CheckerBoard(composer.Entity):
"""An entity representing a checkerboard."""
def __init__(self, *args, **kwargs):
super(CheckerBoard, self).__init__(*args, **kwargs)
self._contact_from_before_substep = None
def _build(self, rows=3, columns=3, square_halfwidth=0.05):
"""Builds a `CheckerBoard` entity.
Args:
rows: Integer, the number of rows.
columns: Integer, the number of columns.
square_halfwidth: Float, the halfwidth of the squares on the board.
"""
root, geoms, touch_sensors = _make_checkerboard(
rows=rows, columns=columns, square_halfwidth=square_halfwidth)
self._mjcf_model = root
self._geoms = np.array(geoms).reshape(rows, columns)
self._touch_sensors = np.array(touch_sensors).reshape(rows, columns)
@property
def mjcf_model(self):
return self._mjcf_model
def before_substep(self, physics, random_state):
del random_state # Unused.
# Cache a copy of the array of active contacts before each substep.
self._contact_from_before_substep = [
copy.copy(c) for c in physics.data.contact
]
def validate_finger_touch(self, physics, row, col, hand):
# Geom for the board square
geom_id = physics.bind(self._geoms[row, col]).element_id
# finger geoms
finger_geoms_ids = set(physics.bind(hand.finger_geoms).element_id)
contacts = self._contact_from_before_substep
set1, set2 = set([geom_id]), finger_geoms_ids
for contact in contacts:
finger_tile_contact = ((contact.geom1 in set1 and
contact.geom2 in set2) or
(contact.geom1 in set2 and contact.geom2 in set1))
if finger_tile_contact:
return True
return False
def get_contact_pos(self, physics, row, col):
geom_id = physics.bind(self._geoms[row, col]).element_id
# Here we use the array of active contacts from the previous substep, rather
# than the current values in `physics.data.contact`. This is because we use
# touch sensors to detect when a square on the board is being pressed, and
# the pressure readings are based on forces that were calculated at the end
# of the previous substep. It's possible that `physics.data.contact` no
# longer contains any active contacts involving the board geoms, even though
# the touch sensors are telling us that one of the squares on the board is
# being pressed.
contacts = self._contact_from_before_substep
relevant_contacts = [
c for c in contacts if c.geom1 == geom_id or c.geom2 == geom_id
]
if relevant_contacts:
# If there are multiple contacts involving this square of the board, just
# pick the first one.
return relevant_contacts[0].pos.copy()
else:
print("Touch sensor at ({},{}) doesn't have any active contacts!".format(
row, col))
return False
def get_contact_indices(self, physics):
pressures = physics.bind(self._touch_sensors.ravel()).sensordata
# If any of the touch sensors exceed the threshold, return the (row, col)
# indices of the most strongly activated sensor.
if np.any(pressures > _TOUCH_THRESHOLD):
return np.unravel_index(np.argmax(pressures), self._touch_sensors.shape)
else:
return None
def sample_pos_inside_touch_sensor(self, physics, random_state, row, col):
bound_site = physics.bind(self._touch_sensors[row, col].site)
jitter = bound_site.size * np.array([1., 1., 0.])
return bound_site.xpos + random_state.uniform(-jitter, jitter)
class GoBoard(CheckerBoard):
"""An entity representing a Goboard."""
def _build(self, boardsize=7, square_halfwidth=0.05):
"""Builds a `GoBoard` entity.
Args:
boardsize: Integer, the size of the board (boardsize x boardsize).
square_halfwidth: Float, the halfwidth of the squares on the board.
"""
if boardsize != 7:
raise ValueError('Only boardsize of 7x7 is implemented at the moment')
root, geoms, touch_sensors, pass_geom, pass_sensor = _make_goboard(
boardsize=boardsize, square_halfwidth=square_halfwidth)
self._mjcf_model = root
self._geoms = np.array(geoms).reshape(boardsize, boardsize)
self._touch_sensors = np.array(touch_sensors).reshape(boardsize, boardsize)
self._pass_geom = pass_geom
self._pass_sensor = pass_sensor
def get_contact_indices(self, physics):
pressures = physics.bind(self._touch_sensors.ravel()).sensordata
# Deal with pass first
pass_pressure = physics.bind(self._pass_sensor).sensordata
if pass_pressure > np.max(pressures) and pass_pressure > _TOUCH_THRESHOLD:
return -1, -1
# If any of the other touch sensors exceed the threshold, return the
# (row, col) indices of the most strongly activated sensor.
if np.any(pressures > _TOUCH_THRESHOLD):
return np.unravel_index(np.argmax(pressures), self._touch_sensors.shape)
else:
return None
def validate_finger_touch(self, physics, row, col, hand):
# Geom for the board square
if row == -1 and col == -1:
geom_id = physics.bind(self._pass_geom).element_id
else:
geom_id = physics.bind(self._geoms[row, col]).element_id
# finger geoms
finger_geoms_ids = set(physics.bind(hand.finger_geoms).element_id)
contacts = self._contact_from_before_substep
set1, set2 = set([geom_id]), finger_geoms_ids
for contact in contacts:
finger_tile_contact = ((contact.geom1 in set1 and
contact.geom2 in set2) or
(contact.geom1 in set2 and contact.geom2 in set1))
if finger_tile_contact:
return True
return False
def sample_pos_inside_touch_sensor(self, physics, random_state, row, col):
bound_site = physics.bind(self._touch_sensors[row, col].site)
jitter = bound_site.size * np.array([0.25, 0.25, 0.])
return bound_site.xpos + random_state.uniform(-jitter, jitter)
| deepmind-research-master | physics_planning_games/board_games/_internal/boards.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Board game-specific arena classes."""
from dm_control import composer
from dm_control.composer.observation import observable
from dm_control.mujoco import wrapper
# Robot geoms will be assigned to this group in order to disable their
# visibility to the top-down camera.
ROBOT_GEOM_GROUP = 1
class Standard(composer.Arena):
""""Board game-specific arena class."""
def _build(self, name=None):
"""Initializes this arena.
Args:
name: (optional) A string, the name of this arena. If `None`, use the
model name defined in the MJCF file.
"""
super(Standard, self)._build(name=name)
# Add visual assets.
self.mjcf_model.asset.add(
'texture',
type='skybox',
builtin='gradient',
rgb1=(0.4, 0.6, 0.8),
rgb2=(0., 0., 0.),
width=100,
height=100)
groundplane_texture = self.mjcf_model.asset.add(
'texture',
name='groundplane',
type='2d',
builtin='checker',
rgb1=(0.2, 0.3, 0.4),
rgb2=(0.1, 0.2, 0.3),
width=300,
height=300,
mark='edge',
markrgb=(.8, .8, .8))
groundplane_material = self.mjcf_model.asset.add(
'material',
name='groundplane',
texture=groundplane_texture,
texrepeat=(5, 5),
texuniform='true',
reflectance=0.2)
# Add ground plane.
self.mjcf_model.worldbody.add(
'geom',
name='ground',
type='plane',
material=groundplane_material,
size=(1, 1, 0.1),
friction=(0.4,),
solimp=(0.95, 0.99, 0.001),
solref=(0.002, 1))
# Add lighting
self.mjcf_model.worldbody.add(
'light',
pos=(0, 0, 1.5),
dir=(0, 0, -1),
diffuse=(0.7, 0.7, 0.7),
specular=(.3, .3, .3),
directional='false',
castshadow='true')
# Add some fixed cameras to the arena.
self._front_camera = self.mjcf_model.worldbody.add(
'camera',
name='front',
pos=(0., -0.6, 0.75),
xyaxes=(1., 0., 0., 0., 0.7, 0.75))
# Ensures a 7x7 go board fits into the view from camera
self._front_camera_2 = self.mjcf_model.worldbody.add(
'camera',
name='front_2',
pos=(0., -0.65, 0.85),
xyaxes=(1., 0., 0., 0., 0.85, 0.6))
self._top_down_camera = self.mjcf_model.worldbody.add(
'camera',
name='top_down',
pos=(0., 0., 0.5),
xyaxes=(1., 0., 0., 0., 1., 0.))
# Always initialize the free camera so that it points at the origin.
self.mjcf_model.statistic.center = (0., 0., 0.)
def _build_observables(self):
return ArenaObservables(self)
@property
def front_camera(self):
return self._front_camera
@property
def front_camera_2(self):
return self._front_camera_2
@property
def top_down_camera(self):
return self._top_down_camera
def attach_offset(self, entity, offset, attach_site=None):
"""Attaches another entity at a position offset from the attachment site.
Args:
entity: The `Entity` to attach.
offset: A length 3 array-like object representing the XYZ offset.
attach_site: (optional) The site to which to attach the entity's model.
If not set, defaults to self.attachment_site.
Returns:
The frame of the attached model.
"""
frame = self.attach(entity, attach_site=attach_site)
frame.pos = offset
return frame
class ArenaObservables(composer.Observables):
"""Observables belonging to the arena."""
@composer.observable
def front_camera(self):
return observable.MJCFCamera(mjcf_element=self._entity.front_camera)
@composer.observable
def front_camera_2(self):
return observable.MJCFCamera(mjcf_element=self._entity.front_camera_2)
@composer.observable
def top_down_camera(self):
return observable.MJCFCamera(mjcf_element=self._entity.top_down_camera)
@composer.observable
def top_down_camera_invisible_robot(self):
# Custom scene options for making robot geoms invisible.
robot_geoms_invisible = wrapper.MjvOption()
robot_geoms_invisible.geomgroup[ROBOT_GEOM_GROUP] = 0
return observable.MJCFCamera(mjcf_element=self._entity.top_down_camera,
scene_option=robot_geoms_invisible)
| deepmind-research-master | physics_planning_games/board_games/_internal/arenas.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model for CylinderFlow."""
import sonnet as snt
import tensorflow.compat.v1 as tf
from meshgraphnets import common
from meshgraphnets import core_model
from meshgraphnets import normalization
class Model(snt.AbstractModule):
"""Model for fluid simulation."""
def __init__(self, learned_model, name='Model'):
super(Model, self).__init__(name=name)
with self._enter_variable_scope():
self._learned_model = learned_model
self._output_normalizer = normalization.Normalizer(
size=2, name='output_normalizer')
self._node_normalizer = normalization.Normalizer(
size=2+common.NodeType.SIZE, name='node_normalizer')
self._edge_normalizer = normalization.Normalizer(
size=3, name='edge_normalizer') # 2D coord + length
def _build_graph(self, inputs, is_training):
"""Builds input graph."""
# construct graph nodes
node_type = tf.one_hot(inputs['node_type'][:, 0], common.NodeType.SIZE)
node_features = tf.concat([inputs['velocity'], node_type], axis=-1)
# construct graph edges
senders, receivers = common.triangles_to_edges(inputs['cells'])
relative_mesh_pos = (tf.gather(inputs['mesh_pos'], senders) -
tf.gather(inputs['mesh_pos'], receivers))
edge_features = tf.concat([
relative_mesh_pos,
tf.norm(relative_mesh_pos, axis=-1, keepdims=True)], axis=-1)
mesh_edges = core_model.EdgeSet(
name='mesh_edges',
features=self._edge_normalizer(edge_features, is_training),
receivers=receivers,
senders=senders)
return core_model.MultiGraph(
node_features=self._node_normalizer(node_features, is_training),
edge_sets=[mesh_edges])
def _build(self, inputs):
graph = self._build_graph(inputs, is_training=False)
per_node_network_output = self._learned_model(graph)
return self._update(inputs, per_node_network_output)
@snt.reuse_variables
def loss(self, inputs):
"""L2 loss on velocity."""
graph = self._build_graph(inputs, is_training=True)
network_output = self._learned_model(graph)
# build target velocity change
cur_velocity = inputs['velocity']
target_velocity = inputs['target|velocity']
target_velocity_change = target_velocity - cur_velocity
target_normalized = self._output_normalizer(target_velocity_change)
# build loss
node_type = inputs['node_type'][:, 0]
loss_mask = tf.logical_or(tf.equal(node_type, common.NodeType.NORMAL),
tf.equal(node_type, common.NodeType.OUTFLOW))
error = tf.reduce_sum((target_normalized - network_output)**2, axis=1)
loss = tf.reduce_mean(error[loss_mask])
return loss
def _update(self, inputs, per_node_network_output):
"""Integrate model outputs."""
velocity_update = self._output_normalizer.inverse(per_node_network_output)
# integrate forward
cur_velocity = inputs['velocity']
return cur_velocity + velocity_update
| deepmind-research-master | meshgraphnets/cfd_model.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model for FlagSimple."""
import sonnet as snt
import tensorflow.compat.v1 as tf
from meshgraphnets import common
from meshgraphnets import core_model
from meshgraphnets import normalization
class Model(snt.AbstractModule):
"""Model for static cloth simulation."""
def __init__(self, learned_model, name='Model'):
super(Model, self).__init__(name=name)
with self._enter_variable_scope():
self._learned_model = learned_model
self._output_normalizer = normalization.Normalizer(
size=3, name='output_normalizer')
self._node_normalizer = normalization.Normalizer(
size=3+common.NodeType.SIZE, name='node_normalizer')
self._edge_normalizer = normalization.Normalizer(
size=7, name='edge_normalizer') # 2D coord + 3D coord + 2*length = 7
def _build_graph(self, inputs, is_training):
"""Builds input graph."""
# construct graph nodes
velocity = inputs['world_pos'] - inputs['prev|world_pos']
node_type = tf.one_hot(inputs['node_type'][:, 0], common.NodeType.SIZE)
node_features = tf.concat([velocity, node_type], axis=-1)
# construct graph edges
senders, receivers = common.triangles_to_edges(inputs['cells'])
relative_world_pos = (tf.gather(inputs['world_pos'], senders) -
tf.gather(inputs['world_pos'], receivers))
relative_mesh_pos = (tf.gather(inputs['mesh_pos'], senders) -
tf.gather(inputs['mesh_pos'], receivers))
edge_features = tf.concat([
relative_world_pos,
tf.norm(relative_world_pos, axis=-1, keepdims=True),
relative_mesh_pos,
tf.norm(relative_mesh_pos, axis=-1, keepdims=True)], axis=-1)
mesh_edges = core_model.EdgeSet(
name='mesh_edges',
features=self._edge_normalizer(edge_features, is_training),
receivers=receivers,
senders=senders)
return core_model.MultiGraph(
node_features=self._node_normalizer(node_features, is_training),
edge_sets=[mesh_edges])
def _build(self, inputs):
graph = self._build_graph(inputs, is_training=False)
per_node_network_output = self._learned_model(graph)
return self._update(inputs, per_node_network_output)
@snt.reuse_variables
def loss(self, inputs):
"""L2 loss on position."""
graph = self._build_graph(inputs, is_training=True)
network_output = self._learned_model(graph)
# build target acceleration
cur_position = inputs['world_pos']
prev_position = inputs['prev|world_pos']
target_position = inputs['target|world_pos']
target_acceleration = target_position - 2*cur_position + prev_position
target_normalized = self._output_normalizer(target_acceleration)
# build loss
loss_mask = tf.equal(inputs['node_type'][:, 0], common.NodeType.NORMAL)
error = tf.reduce_sum((target_normalized - network_output)**2, axis=1)
loss = tf.reduce_mean(error[loss_mask])
return loss
def _update(self, inputs, per_node_network_output):
"""Integrate model outputs."""
acceleration = self._output_normalizer.inverse(per_node_network_output)
# integrate forward
cur_position = inputs['world_pos']
prev_position = inputs['prev|world_pos']
position = 2*cur_position + acceleration - prev_position
return position
| deepmind-research-master | meshgraphnets/cloth_model.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Runs the learner/evaluator."""
import pickle
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from meshgraphnets import cfd_eval
from meshgraphnets import cfd_model
from meshgraphnets import cloth_eval
from meshgraphnets import cloth_model
from meshgraphnets import core_model
from meshgraphnets import dataset
FLAGS = flags.FLAGS
flags.DEFINE_enum('mode', 'train', ['train', 'eval'],
'Train model, or run evaluation.')
flags.DEFINE_enum('model', None, ['cfd', 'cloth'],
'Select model to run.')
flags.DEFINE_string('checkpoint_dir', None, 'Directory to save checkpoint')
flags.DEFINE_string('dataset_dir', None, 'Directory to load dataset from.')
flags.DEFINE_string('rollout_path', None,
'Pickle file to save eval trajectories')
flags.DEFINE_enum('rollout_split', 'valid', ['train', 'test', 'valid'],
'Dataset split to use for rollouts.')
flags.DEFINE_integer('num_rollouts', 10, 'No. of rollout trajectories')
flags.DEFINE_integer('num_training_steps', int(10e6), 'No. of training steps')
PARAMETERS = {
'cfd': dict(noise=0.02, gamma=1.0, field='velocity', history=False,
size=2, batch=2, model=cfd_model, evaluator=cfd_eval),
'cloth': dict(noise=0.003, gamma=0.1, field='world_pos', history=True,
size=3, batch=1, model=cloth_model, evaluator=cloth_eval)
}
def learner(model, params):
"""Run a learner job."""
ds = dataset.load_dataset(FLAGS.dataset_dir, 'train')
ds = dataset.add_targets(ds, [params['field']], add_history=params['history'])
ds = dataset.split_and_preprocess(ds, noise_field=params['field'],
noise_scale=params['noise'],
noise_gamma=params['gamma'])
inputs = tf.data.make_one_shot_iterator(ds).get_next()
loss_op = model.loss(inputs)
global_step = tf.train.create_global_step()
lr = tf.train.exponential_decay(learning_rate=1e-4,
global_step=global_step,
decay_steps=int(5e6),
decay_rate=0.1) + 1e-6
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
train_op = optimizer.minimize(loss_op, global_step=global_step)
# Don't train for the first few steps, just accumulate normalization stats
train_op = tf.cond(tf.less(global_step, 1000),
lambda: tf.group(tf.assign_add(global_step, 1)),
lambda: tf.group(train_op))
with tf.train.MonitoredTrainingSession(
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.num_training_steps)],
checkpoint_dir=FLAGS.checkpoint_dir,
save_checkpoint_secs=600) as sess:
while not sess.should_stop():
_, step, loss = sess.run([train_op, global_step, loss_op])
if step % 1000 == 0:
logging.info('Step %d: Loss %g', step, loss)
logging.info('Training complete.')
def evaluator(model, params):
"""Run a model rollout trajectory."""
ds = dataset.load_dataset(FLAGS.dataset_dir, FLAGS.rollout_split)
ds = dataset.add_targets(ds, [params['field']], add_history=params['history'])
inputs = tf.data.make_one_shot_iterator(ds).get_next()
scalar_op, traj_ops = params['evaluator'].evaluate(model, inputs)
tf.train.create_global_step()
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.checkpoint_dir,
save_checkpoint_secs=None,
save_checkpoint_steps=None) as sess:
trajectories = []
scalars = []
for traj_idx in range(FLAGS.num_rollouts):
logging.info('Rollout trajectory %d', traj_idx)
scalar_data, traj_data = sess.run([scalar_op, traj_ops])
trajectories.append(traj_data)
scalars.append(scalar_data)
for key in scalars[0]:
logging.info('%s: %g', key, np.mean([x[key] for x in scalars]))
with open(FLAGS.rollout_path, 'wb') as fp:
pickle.dump(trajectories, fp)
def main(argv):
del argv
tf.enable_resource_variables()
tf.disable_eager_execution()
params = PARAMETERS[FLAGS.model]
learned_model = core_model.EncodeProcessDecode(
output_size=params['size'],
latent_size=128,
num_layers=2,
message_passing_steps=15)
model = params['model'].Model(learned_model)
if FLAGS.mode == 'train':
learner(model, params)
elif FLAGS.mode == 'eval':
evaluator(model, params)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | meshgraphnets/run_model.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Core learned graph net model."""
import collections
import functools
import sonnet as snt
import tensorflow.compat.v1 as tf
EdgeSet = collections.namedtuple('EdgeSet', ['name', 'features', 'senders',
'receivers'])
MultiGraph = collections.namedtuple('Graph', ['node_features', 'edge_sets'])
class GraphNetBlock(snt.AbstractModule):
"""Multi-Edge Interaction Network with residual connections."""
def __init__(self, model_fn, name='GraphNetBlock'):
super(GraphNetBlock, self).__init__(name=name)
self._model_fn = model_fn
def _update_edge_features(self, node_features, edge_set):
"""Aggregrates node features, and applies edge function."""
sender_features = tf.gather(node_features, edge_set.senders)
receiver_features = tf.gather(node_features, edge_set.receivers)
features = [sender_features, receiver_features, edge_set.features]
with tf.variable_scope(edge_set.name+'_edge_fn'):
return self._model_fn()(tf.concat(features, axis=-1))
def _update_node_features(self, node_features, edge_sets):
"""Aggregrates edge features, and applies node function."""
num_nodes = tf.shape(node_features)[0]
features = [node_features]
for edge_set in edge_sets:
features.append(tf.math.unsorted_segment_sum(edge_set.features,
edge_set.receivers,
num_nodes))
with tf.variable_scope('node_fn'):
return self._model_fn()(tf.concat(features, axis=-1))
def _build(self, graph):
"""Applies GraphNetBlock and returns updated MultiGraph."""
# apply edge functions
new_edge_sets = []
for edge_set in graph.edge_sets:
updated_features = self._update_edge_features(graph.node_features,
edge_set)
new_edge_sets.append(edge_set._replace(features=updated_features))
# apply node function
new_node_features = self._update_node_features(graph.node_features,
new_edge_sets)
# add residual connections
new_node_features += graph.node_features
new_edge_sets = [es._replace(features=es.features + old_es.features)
for es, old_es in zip(new_edge_sets, graph.edge_sets)]
return MultiGraph(new_node_features, new_edge_sets)
class EncodeProcessDecode(snt.AbstractModule):
"""Encode-Process-Decode GraphNet model."""
def __init__(self,
output_size,
latent_size,
num_layers,
message_passing_steps,
name='EncodeProcessDecode'):
super(EncodeProcessDecode, self).__init__(name=name)
self._latent_size = latent_size
self._output_size = output_size
self._num_layers = num_layers
self._message_passing_steps = message_passing_steps
def _make_mlp(self, output_size, layer_norm=True):
"""Builds an MLP."""
widths = [self._latent_size] * self._num_layers + [output_size]
network = snt.nets.MLP(widths, activate_final=False)
if layer_norm:
network = snt.Sequential([network, snt.LayerNorm()])
return network
def _encoder(self, graph):
"""Encodes node and edge features into latent features."""
with tf.variable_scope('encoder'):
node_latents = self._make_mlp(self._latent_size)(graph.node_features)
new_edges_sets = []
for edge_set in graph.edge_sets:
latent = self._make_mlp(self._latent_size)(edge_set.features)
new_edges_sets.append(edge_set._replace(features=latent))
return MultiGraph(node_latents, new_edges_sets)
def _decoder(self, graph):
"""Decodes node features from graph."""
with tf.variable_scope('decoder'):
decoder = self._make_mlp(self._output_size, layer_norm=False)
return decoder(graph.node_features)
def _build(self, graph):
"""Encodes and processes a multigraph, and returns node features."""
model_fn = functools.partial(self._make_mlp, output_size=self._latent_size)
latent_graph = self._encoder(graph)
for _ in range(self._message_passing_steps):
latent_graph = GraphNetBlock(model_fn)(latent_graph)
return self._decoder(latent_graph)
| deepmind-research-master | meshgraphnets/core_model.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility functions for reading the datasets."""
import functools
import json
import os
import tensorflow.compat.v1 as tf
from meshgraphnets.common import NodeType
def _parse(proto, meta):
"""Parses a trajectory from tf.Example."""
feature_lists = {k: tf.io.VarLenFeature(tf.string)
for k in meta['field_names']}
features = tf.io.parse_single_example(proto, feature_lists)
out = {}
for key, field in meta['features'].items():
data = tf.io.decode_raw(features[key].values, getattr(tf, field['dtype']))
data = tf.reshape(data, field['shape'])
if field['type'] == 'static':
data = tf.tile(data, [meta['trajectory_length'], 1, 1])
elif field['type'] == 'dynamic_varlen':
length = tf.io.decode_raw(features['length_'+key].values, tf.int32)
length = tf.reshape(length, [-1])
data = tf.RaggedTensor.from_row_lengths(data, row_lengths=length)
elif field['type'] != 'dynamic':
raise ValueError('invalid data format')
out[key] = data
return out
def load_dataset(path, split):
"""Load dataset."""
with open(os.path.join(path, 'meta.json'), 'r') as fp:
meta = json.loads(fp.read())
ds = tf.data.TFRecordDataset(os.path.join(path, split+'.tfrecord'))
ds = ds.map(functools.partial(_parse, meta=meta), num_parallel_calls=8)
ds = ds.prefetch(1)
return ds
def add_targets(ds, fields, add_history):
"""Adds target and optionally history fields to dataframe."""
def fn(trajectory):
out = {}
for key, val in trajectory.items():
out[key] = val[1:-1]
if key in fields:
if add_history:
out['prev|'+key] = val[0:-2]
out['target|'+key] = val[2:]
return out
return ds.map(fn, num_parallel_calls=8)
def split_and_preprocess(ds, noise_field, noise_scale, noise_gamma):
"""Splits trajectories into frames, and adds training noise."""
def add_noise(frame):
noise = tf.random.normal(tf.shape(frame[noise_field]),
stddev=noise_scale, dtype=tf.float32)
# don't apply noise to boundary nodes
mask = tf.equal(frame['node_type'], NodeType.NORMAL)[:, 0]
noise = tf.where(mask, noise, tf.zeros_like(noise))
frame[noise_field] += noise
frame['target|'+noise_field] += (1.0 - noise_gamma) * noise
return frame
ds = ds.flat_map(tf.data.Dataset.from_tensor_slices)
ds = ds.map(add_noise, num_parallel_calls=8)
ds = ds.shuffle(10000)
ds = ds.repeat(None)
return ds.prefetch(10)
def batch_dataset(ds, batch_size):
"""Batches input datasets."""
shapes = ds.output_shapes
types = ds.output_types
def renumber(buffer, frame):
nodes, cells = buffer
new_nodes, new_cells = frame
return nodes + new_nodes, tf.concat([cells, new_cells+nodes], axis=0)
def batch_accumulate(ds_window):
out = {}
for key, ds_val in ds_window.items():
initial = tf.zeros((0, shapes[key][1]), dtype=types[key])
if key == 'cells':
# renumber node indices in cells
num_nodes = ds_window['node_type'].map(lambda x: tf.shape(x)[0])
cells = tf.data.Dataset.zip((num_nodes, ds_val))
initial = (tf.constant(0, tf.int32), initial)
_, out[key] = cells.reduce(initial, renumber)
else:
merge = lambda prev, cur: tf.concat([prev, cur], axis=0)
out[key] = ds_val.reduce(initial, merge)
return out
if batch_size > 1:
ds = ds.window(batch_size, drop_remainder=True)
ds = ds.map(batch_accumulate, num_parallel_calls=8)
return ds
| deepmind-research-master | meshgraphnets/dataset.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Plots a cloth trajectory rollout."""
import pickle
from absl import app
from absl import flags
from matplotlib import animation
import matplotlib.pyplot as plt
FLAGS = flags.FLAGS
flags.DEFINE_string('rollout_path', None, 'Path to rollout pickle file')
def main(unused_argv):
with open(FLAGS.rollout_path, 'rb') as fp:
rollout_data = pickle.load(fp)
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111, projection='3d')
skip = 10
num_steps = rollout_data[0]['gt_pos'].shape[0]
num_frames = len(rollout_data) * num_steps // skip
# compute bounds
bounds = []
for trajectory in rollout_data:
bb_min = trajectory['gt_pos'].min(axis=(0, 1))
bb_max = trajectory['gt_pos'].max(axis=(0, 1))
bounds.append((bb_min, bb_max))
def animate(num):
step = (num*skip) % num_steps
traj = (num*skip) // num_steps
ax.cla()
bound = bounds[traj]
ax.set_xlim([bound[0][0], bound[1][0]])
ax.set_ylim([bound[0][1], bound[1][1]])
ax.set_zlim([bound[0][2], bound[1][2]])
pos = rollout_data[traj]['pred_pos'][step]
faces = rollout_data[traj]['faces'][step]
ax.plot_trisurf(pos[:, 0], pos[:, 1], faces, pos[:, 2], shade=True)
ax.set_title('Trajectory %d Step %d' % (traj, step))
return fig,
_ = animation.FuncAnimation(fig, animate, frames=num_frames, interval=100)
plt.show(block=True)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | meshgraphnets/plot_cloth.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Commonly used data structures and functions."""
import enum
import tensorflow.compat.v1 as tf
class NodeType(enum.IntEnum):
NORMAL = 0
OBSTACLE = 1
AIRFOIL = 2
HANDLE = 3
INFLOW = 4
OUTFLOW = 5
WALL_BOUNDARY = 6
SIZE = 9
def triangles_to_edges(faces):
"""Computes mesh edges from triangles."""
# collect edges from triangles
edges = tf.concat([faces[:, 0:2],
faces[:, 1:3],
tf.stack([faces[:, 2], faces[:, 0]], axis=1)], axis=0)
# those edges are sometimes duplicated (within the mesh) and sometimes
# single (at the mesh boundary).
# sort & pack edges as single tf.int64
receivers = tf.reduce_min(edges, axis=1)
senders = tf.reduce_max(edges, axis=1)
packed_edges = tf.bitcast(tf.stack([senders, receivers], axis=1), tf.int64)
# remove duplicates and unpack
unique_edges = tf.bitcast(tf.unique(packed_edges)[0], tf.int32)
senders, receivers = tf.unstack(unique_edges, axis=1)
# create two-way connectivity
return (tf.concat([senders, receivers], axis=0),
tf.concat([receivers, senders], axis=0))
| deepmind-research-master | meshgraphnets/common.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functions to build evaluation metrics for cloth data."""
import tensorflow.compat.v1 as tf
from meshgraphnets.common import NodeType
def _rollout(model, initial_state, num_steps):
"""Rolls out a model trajectory."""
mask = tf.equal(initial_state['node_type'][:, 0], NodeType.NORMAL)
def step_fn(step, prev_pos, cur_pos, trajectory):
prediction = model({**initial_state,
'prev|world_pos': prev_pos,
'world_pos': cur_pos})
# don't update kinematic nodes
next_pos = tf.where(mask, prediction, cur_pos)
trajectory = trajectory.write(step, cur_pos)
return step+1, cur_pos, next_pos, trajectory
_, _, _, output = tf.while_loop(
cond=lambda step, last, cur, traj: tf.less(step, num_steps),
body=step_fn,
loop_vars=(0, initial_state['prev|world_pos'], initial_state['world_pos'],
tf.TensorArray(tf.float32, num_steps)),
parallel_iterations=1)
return output.stack()
def evaluate(model, inputs):
"""Performs model rollouts and create stats."""
initial_state = {k: v[0] for k, v in inputs.items()}
num_steps = inputs['cells'].shape[0]
prediction = _rollout(model, initial_state, num_steps)
error = tf.reduce_mean((prediction - inputs['world_pos'])**2, axis=-1)
scalars = {'mse_%d_steps' % horizon: tf.reduce_mean(error[1:horizon+1])
for horizon in [1, 10, 20, 50, 100, 200]}
traj_ops = {
'faces': inputs['cells'],
'mesh_pos': inputs['mesh_pos'],
'gt_pos': inputs['world_pos'],
'pred_pos': prediction
}
return scalars, traj_ops
| deepmind-research-master | meshgraphnets/cloth_eval.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Plots a CFD trajectory rollout."""
import pickle
from absl import app
from absl import flags
from matplotlib import animation
from matplotlib import tri as mtri
import matplotlib.pyplot as plt
FLAGS = flags.FLAGS
flags.DEFINE_string('rollout_path', None, 'Path to rollout pickle file')
def main(unused_argv):
with open(FLAGS.rollout_path, 'rb') as fp:
rollout_data = pickle.load(fp)
fig, ax = plt.subplots(1, 1, figsize=(12, 8))
skip = 10
num_steps = rollout_data[0]['gt_velocity'].shape[0]
num_frames = len(rollout_data) * num_steps // skip
# compute bounds
bounds = []
for trajectory in rollout_data:
bb_min = trajectory['gt_velocity'].min(axis=(0, 1))
bb_max = trajectory['gt_velocity'].max(axis=(0, 1))
bounds.append((bb_min, bb_max))
def animate(num):
step = (num*skip) % num_steps
traj = (num*skip) // num_steps
ax.cla()
ax.set_aspect('equal')
ax.set_axis_off()
vmin, vmax = bounds[traj]
pos = rollout_data[traj]['mesh_pos'][step]
faces = rollout_data[traj]['faces'][step]
velocity = rollout_data[traj]['pred_velocity'][step]
triang = mtri.Triangulation(pos[:, 0], pos[:, 1], faces)
ax.tripcolor(triang, velocity[:, 0], vmin=vmin[0], vmax=vmax[0])
ax.triplot(triang, 'ko-', ms=0.5, lw=0.3)
ax.set_title('Trajectory %d Step %d' % (traj, step))
return fig,
_ = animation.FuncAnimation(fig, animate, frames=num_frames, interval=100)
plt.show(block=True)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | meshgraphnets/plot_cfd.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functions to build evaluation metrics for CFD data."""
import tensorflow.compat.v1 as tf
from meshgraphnets.common import NodeType
def _rollout(model, initial_state, num_steps):
"""Rolls out a model trajectory."""
node_type = initial_state['node_type'][:, 0]
mask = tf.logical_or(tf.equal(node_type, NodeType.NORMAL),
tf.equal(node_type, NodeType.OUTFLOW))
def step_fn(step, velocity, trajectory):
prediction = model({**initial_state,
'velocity': velocity})
# don't update boundary nodes
next_velocity = tf.where(mask, prediction, velocity)
trajectory = trajectory.write(step, velocity)
return step+1, next_velocity, trajectory
_, _, output = tf.while_loop(
cond=lambda step, cur, traj: tf.less(step, num_steps),
body=step_fn,
loop_vars=(0, initial_state['velocity'],
tf.TensorArray(tf.float32, num_steps)),
parallel_iterations=1)
return output.stack()
def evaluate(model, inputs):
"""Performs model rollouts and create stats."""
initial_state = {k: v[0] for k, v in inputs.items()}
num_steps = inputs['cells'].shape[0]
prediction = _rollout(model, initial_state, num_steps)
error = tf.reduce_mean((prediction - inputs['velocity'])**2, axis=-1)
scalars = {'mse_%d_steps' % horizon: tf.reduce_mean(error[1:horizon+1])
for horizon in [1, 10, 20, 50, 100, 200]}
traj_ops = {
'faces': inputs['cells'],
'mesh_pos': inputs['mesh_pos'],
'gt_velocity': inputs['velocity'],
'pred_velocity': prediction
}
return scalars, traj_ops
| deepmind-research-master | meshgraphnets/cfd_eval.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Online data normalization."""
import sonnet as snt
import tensorflow.compat.v1 as tf
class Normalizer(snt.AbstractModule):
"""Feature normalizer that accumulates statistics online."""
def __init__(self, size, max_accumulations=10**6, std_epsilon=1e-8,
name='Normalizer'):
super(Normalizer, self).__init__(name=name)
self._max_accumulations = max_accumulations
self._std_epsilon = std_epsilon
with self._enter_variable_scope():
self._acc_count = tf.Variable(0, dtype=tf.float32, trainable=False)
self._num_accumulations = tf.Variable(0, dtype=tf.float32,
trainable=False)
self._acc_sum = tf.Variable(tf.zeros(size, tf.float32), trainable=False)
self._acc_sum_squared = tf.Variable(tf.zeros(size, tf.float32),
trainable=False)
def _build(self, batched_data, accumulate=True):
"""Normalizes input data and accumulates statistics."""
update_op = tf.no_op()
if accumulate:
# stop accumulating after a million updates, to prevent accuracy issues
update_op = tf.cond(self._num_accumulations < self._max_accumulations,
lambda: self._accumulate(batched_data),
tf.no_op)
with tf.control_dependencies([update_op]):
return (batched_data - self._mean()) / self._std_with_epsilon()
@snt.reuse_variables
def inverse(self, normalized_batch_data):
"""Inverse transformation of the normalizer."""
return normalized_batch_data * self._std_with_epsilon() + self._mean()
def _accumulate(self, batched_data):
"""Function to perform the accumulation of the batch_data statistics."""
count = tf.cast(tf.shape(batched_data)[0], tf.float32)
data_sum = tf.reduce_sum(batched_data, axis=0)
squared_data_sum = tf.reduce_sum(batched_data**2, axis=0)
return tf.group(
tf.assign_add(self._acc_sum, data_sum),
tf.assign_add(self._acc_sum_squared, squared_data_sum),
tf.assign_add(self._acc_count, count),
tf.assign_add(self._num_accumulations, 1.))
def _mean(self):
safe_count = tf.maximum(self._acc_count, 1.)
return self._acc_sum / safe_count
def _std_with_epsilon(self):
safe_count = tf.maximum(self._acc_count, 1.)
std = tf.sqrt(self._acc_sum_squared / safe_count - self._mean()**2)
return tf.math.maximum(std, self._std_epsilon)
| deepmind-research-master | meshgraphnets/normalization.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TransformerBlock and MultiheadAttention modules used in the paper.
"Effective gene expression prediction from sequence by integrating long-range
interactions"
Žiga Avsec1, Vikram Agarwal2,4, Daniel Visentin1,4, Joseph R. Ledsam1,3,
Agnieszka Grabska-Barwinska1, Kyle R. Taylor1, Yannis Assael1, John Jumper1,
Pushmeet Kohli1, David R. Kelley2*
1 DeepMind, London, UK
2 Calico Life Sciences, South San Francisco, CA, USA
3 Google, Tokyo, Japan
4 These authors contributed equally.
* correspondence: [email protected], [email protected], [email protected]
Example:
```
mha = MultiheadAttention(
value_size=96,
key_size=64,
num_heads=8,
relative_position_functions=['positional_features_sin_cos'])
mha(tf.ones((2, 1024, 96*8)), is_training=True)
# Transformer block as used in the paper
transformer_block = TransformerBlock(
channels=96 * 8,
dropout_rate=0.4,
attention_kwargs=dict(
value_size=96,
key_size=64,
num_heads=8,
relative_positions=True,
relative_position_symmetric=False,
num_relative_position_features=None,
relative_position_functions=['positional_features_exponential',
'positional_features_central_mask',
'positional_features_gamma'],
positional_dropout_rate=0.01,
attention_dropout_rate=0.05,
)
)
transformer_block(tf.ones((2, 1024, 96*8)), is_training=True)
```
"""
from typing import Any, Dict, List, Optional
import numpy as np
import sonnet as snt
import tensorflow as tf
class TransformerBlock(snt.Module):
"""Full transformer module block."""
def __init__(
self,
channels: int,
dropout_rate: float,
attention_kwargs: Dict[str, Any],
name: str = 'transformer_block',
):
super().__init__(name=name)
self.mha_ln = snt.LayerNorm(axis=-1, create_scale=True, create_offset=True)
self.mha = MultiheadAttention(**attention_kwargs)
self.mha_dropout = snt.Dropout(dropout_rate)
self.mlp_ln = snt.LayerNorm(axis=-1, create_scale=True, create_offset=True)
self.mlp_linear1 = snt.Linear(channels * 2)
self.mlp_dropout1 = snt.Dropout(dropout_rate)
self.mlp_linear2 = snt.Linear(channels)
self.mlp_dropout2 = snt.Dropout(dropout_rate)
def __call__(self, inputs: tf.Tensor, is_training: bool) -> tf.Tensor:
x = self.mha_ln(inputs)
x = self.mha(x, is_training=is_training)
x = self.mha_dropout(x, is_training=is_training)
x += inputs # Residual
mha_output = x
# MLP.
x = self.mlp_ln(mha_output)
x = self.mlp_linear1(x)
x = self.mlp_dropout1(x, is_training=is_training)
x = tf.nn.relu(x)
x = self.mlp_linear2(x)
x = self.mlp_dropout2(x, is_training=is_training)
return x + mha_output
class MultiheadAttention(snt.Module):
"""Multi-head attention."""
def __init__(self,
value_size: int,
key_size: int,
num_heads: int,
scaling: bool = True,
attention_dropout_rate: float = 0.1,
relative_positions: bool = False,
relative_position_symmetric: bool = False,
relative_position_functions: Optional[List[str]] = None,
num_relative_position_features: Optional[int] = None,
positional_dropout_rate: float = 0.1,
zero_initialize: bool = True,
initializer: Optional[snt.initializers.Initializer] = None,
name: str = None):
"""Creates a MultiheadAttention module.
Args:
value_size: The size of each value embedding per head.
key_size: The size of each key and query embedding per head.
num_heads: The number of independent queries per timestep.
scaling: Whether to scale the attention logits.
attention_dropout_rate: Dropout rate for attention logits.
relative_positions: Whether to use TransformerXL style relative attention.
relative_position_symmetric: If True, the symmetric version of basis
functions will be used. If False, a symmetric and asymmetric versions
will be use.
relative_position_functions: List of function names used for relative
positional biases.
num_relative_position_features: Number of relative positional features
to compute. If None, `value_size * num_heads` is used.
positional_dropout_rate: Dropout rate for the positional encodings if
relative positions are used.
zero_initialize: if True, the final linear layer will be 0 initialized.
initializer: Initializer for the projection layers. If unspecified,
VarianceScaling is used with scale = 2.0.
name: Name of module.
"""
super().__init__(name=name)
self._value_size = value_size
self._key_size = key_size
self._num_heads = num_heads
self._attention_dropout_rate = attention_dropout_rate
self._scaling = scaling
self._relative_positions = relative_positions
self._relative_position_symmetric = relative_position_symmetric
self._relative_position_functions = relative_position_functions
if num_relative_position_features is None:
# num_relative_position_features needs to be divisible by the number of
# relative positional functions *2 (for symmetric & asymmetric version).
divisible_by = 2 * len(self._relative_position_functions)
self._num_relative_position_features = (
(self._value_size // divisible_by) * divisible_by)
else:
self._num_relative_position_features = num_relative_position_features
self._positional_dropout_rate = positional_dropout_rate
self._initializer = initializer
if self._initializer is None:
self._initializer = snt.initializers.VarianceScaling(scale=2.0)
key_proj_size = self._key_size * self._num_heads
embedding_size = self._value_size * self._num_heads
self._q_layer = snt.Linear(
key_proj_size,
name='q_layer',
with_bias=False,
w_init=self._initializer)
self._k_layer = snt.Linear(
key_proj_size,
name='k_layer',
with_bias=False,
w_init=self._initializer)
self._v_layer = snt.Linear(
embedding_size,
name='v_layer',
with_bias=False,
w_init=self._initializer)
w_init = snt.initializers.Zeros() if zero_initialize else self._initializer
self._embedding_layer = snt.Linear(
embedding_size,
name='embedding_layer',
w_init=w_init)
# Create additional layers if using relative positions.
if self._relative_positions:
self._r_k_layer = snt.Linear(
key_proj_size,
name='r_k_layer',
with_bias=False,
w_init=self._initializer)
self._r_w_bias = tf.Variable(
self._initializer([1, self._num_heads, 1, self._key_size],
dtype=tf.float32),
name='r_w_bias')
self._r_r_bias = tf.Variable(
self._initializer([1, self._num_heads, 1, self._key_size],
dtype=tf.float32),
name='r_r_bias')
def _multihead_output(self, linear, inputs):
"""Applies a standard linear to inputs and returns multihead output."""
output = snt.BatchApply(linear)(inputs) # [B, T, H * KV]
num_kv_channels = output.shape[-1] // self._num_heads
# Split H * Channels into separate axes.
output = snt.reshape(output,
output_shape=[-1, self._num_heads, num_kv_channels])
# [B, T, H, KV] -> [B, H, T, KV]
return tf.transpose(output, [0, 2, 1, 3])
def __call__(self,
inputs,
is_training=False):
# Initialise the projection layers.
embedding_size = self._value_size * self._num_heads
seq_len = inputs.shape[1]
# Compute q, k and v as multi-headed projections of the inputs.
q = self._multihead_output(self._q_layer, inputs) # [B, H, T, K]
k = self._multihead_output(self._k_layer, inputs) # [B, H, T, K]
v = self._multihead_output(self._v_layer, inputs) # [B, H, T, V]
# Scale the query by the square-root of key size.
if self._scaling:
q *= self._key_size**-0.5
if self._relative_positions:
# For relative positions, we project positions to form relative keys.
distances = tf.range(-seq_len + 1, seq_len, dtype=tf.float32)[tf.newaxis]
positional_encodings = positional_features_all(
positions=distances,
feature_size=self._num_relative_position_features,
seq_length=seq_len,
feature_functions=self._relative_position_functions,
symmetric=self._relative_position_symmetric)
# [1, 2T-1, Cr]
if is_training:
positional_encodings = tf.nn.dropout(
positional_encodings, rate=self._positional_dropout_rate)
# [1, H, 2T-1, K]
r_k = self._multihead_output(self._r_k_layer, positional_encodings)
# Add shifted relative logits to content logits.
# [B, H, T', T]
content_logits = tf.matmul(q + self._r_w_bias, k, transpose_b=True)
# [B, H, T', 2T-1]
relative_logits = tf.matmul(
q + self._r_r_bias, r_k, transpose_b=True)
# [B, H, T', T]
relative_logits = relative_shift(relative_logits)
logits = content_logits + relative_logits
else:
# [B, H, T', T]
logits = tf.matmul(q, k, transpose_b=True)
weights = tf.nn.softmax(logits)
# Dropout on the attention weights.
if is_training:
weights = tf.nn.dropout(weights, rate=self._attention_dropout_rate)
# Transpose and reshape the output.
output = tf.matmul(weights, v) # [B, H, T', V]
output_transpose = tf.transpose(output, [0, 2, 1, 3]) # [B, T', H, V]
# Final linear layer.
attended_inputs = snt.reshape(
output_transpose, output_shape=[embedding_size], preserve_dims=2)
output = self._embedding_layer(attended_inputs)
return output
def relative_shift(x):
"""Shift the relative logits like in TransformerXL."""
# We prepend zeros on the final timescale dimension.
to_pad = tf.zeros_like(x[..., :1])
x = tf.concat([to_pad, x], -1)
_, num_heads, t1, t2 = x.shape
x = tf.reshape(x, [-1, num_heads, t2, t1])
x = tf.slice(x, [0, 0, 1, 0], [-1, -1, -1, -1])
x = tf.reshape(x, [-1, num_heads, t1, t2 - 1])
x = tf.slice(x, [0, 0, 0, 0], [-1, -1, -1, (t2 + 1) // 2])
return x
# Available feature functions:
def get_positional_feature_function(name):
"""Returns positional feature functions."""
available = {
'positional_features_exponential': positional_features_exponential,
'positional_features_central_mask': positional_features_central_mask,
'positional_features_gamma': positional_features_gamma,
'positional_features_cosine': positional_features_cosine,
'positional_features_linear_masks': positional_features_linear_masks,
'positional_features_sin_cos': positional_features_sin_cos,
}
if name not in available:
raise ValueError(f'Function {name} not available in {available.keys()}')
return available[name]
def positional_features_all(positions: tf.Tensor,
feature_size: int,
seq_length: Optional[int] = None,
bin_size: Optional[int] = None,
feature_functions: Optional[List[str]] = None,
symmetric=False):
"""Compute relative positional encodings/features.
Each positional feature function will compute/provide the same fraction of
features, making up the total of feature_size.
Args:
positions: Tensor of relative positions of arbitrary shape.
feature_size: Total number of basis functions.
seq_length: Sequence length denoting the characteristic length that
the individual positional features can use. This is required since the
parametrization of the input features should be independent of `positions`
while it could still require to use the total number of features.
bin_size: Bin sized used to partition the sequence. This can be used to
compute features on the absolute scale relative to the genome.
feature_functions: List of different feature functions to use. Each function
will take as argument: positions, sequence length and number of features
to compute.
symmetric: If True, the resulting features will be symmetric across the
relative position of 0 (i.e. only absolute value of positions will
matter). If false, then both the symmetric and asymmetric version
(symmetric multiplied by sign(positions)) of the features will be used.
Returns:
Tensor of shape: `positions.shape + (feature_size,)`.
"""
if feature_functions is None:
feature_functions = ['positional_features_exponential',
'positional_features_central_mask',
'positional_features_gamma']
num_components = len(feature_functions) # 1 per each basis function
if not symmetric:
num_components = 2 * num_components
# For now, we do not allow odd sized embeddings.
if feature_size % num_components != 0:
raise ValueError(
f'feature_size has to be divisible by {num_components}')
feature_functions = [get_positional_feature_function(f)
for f in feature_functions]
num_basis_per_class = feature_size // num_components
embeddings = tf.concat([f(tf.abs(positions), num_basis_per_class,
seq_length, bin_size)
for f in feature_functions],
axis=-1)
if not symmetric:
embeddings = tf.concat([embeddings,
tf.sign(positions)[..., tf.newaxis] * embeddings],
axis=-1)
tf.TensorShape(embeddings.shape).assert_is_compatible_with(
positions.shape + [feature_size])
return embeddings
def _prepend_dims(x, num_dims):
return tf.reshape(x, shape=[1] * num_dims + x.shape)
def positional_features_exponential(positions: tf.Tensor,
feature_size: int,
seq_length: Optional[int] = None,
bin_size: Optional[int] = None,
min_half_life: Optional[float] = 3.0):
"""Create exponentially decaying positional weights.
Args:
positions: Position tensor (arbitrary shape).
feature_size: Number of basis functions to use.
seq_length: Sequence length.
bin_size: (unused). See `positional_features_all`.
min_half_life: Smallest exponential half life in the grid of half lives.
Returns:
A Tensor with shape [2 * seq_length - 1, feature_size].
"""
del bin_size # Unused.
if seq_length is None:
seq_length = tf.reduce_max(tf.abs(positions)) + 1
# Grid of half lifes from [3, seq_length / 2] with feature_size
# distributed on the log scale.
seq_length = tf.cast(seq_length, dtype=tf.float32)
max_range = tf.math.log(seq_length) / tf.math.log(2.0)
half_life = tf.pow(2.0, tf.linspace(min_half_life, max_range, feature_size))
half_life = _prepend_dims(half_life, positions.shape.rank)
positions = tf.abs(positions)
outputs = tf.exp(-tf.math.log(2.0) / half_life * positions[..., tf.newaxis])
tf.TensorShape(outputs.shape).assert_is_compatible_with(
positions.shape + [feature_size])
return outputs
def positional_features_central_mask(positions: tf.Tensor,
feature_size: int,
seq_length: Optional[int] = None,
bin_size: Optional[int] = None):
"""Positional features using a central mask (allow only central features)."""
del seq_length # Unused.
del bin_size # Unused.
center_widths = tf.pow(2.0, tf.range(1, feature_size + 1, dtype=tf.float32))
center_widths = center_widths - 1
center_widths = _prepend_dims(center_widths, positions.shape.rank)
outputs = tf.cast(center_widths > tf.abs(positions)[..., tf.newaxis],
tf.float32)
tf.TensorShape(outputs.shape).assert_is_compatible_with(
positions.shape + [feature_size])
return outputs
def gamma_pdf(x, concentration, rate):
"""Gamma probability distribution function: p(x|concentration, rate)."""
log_unnormalized_prob = tf.math.xlogy(concentration - 1., x) - rate * x
log_normalization = (tf.math.lgamma(concentration) -
concentration * tf.math.log(rate))
return tf.exp(log_unnormalized_prob - log_normalization)
def positional_features_gamma(positions: tf.Tensor,
feature_size: int,
seq_length: Optional[int] = None,
bin_size: Optional[int] = None,
stddev=None,
start_mean=None):
"""Positional features computed using the gamma distributions."""
del bin_size # Unused.
if seq_length is None:
seq_length = tf.reduce_max(tf.abs(positions)) + 1
if stddev is None:
stddev = seq_length / (2 * feature_size)
if start_mean is None:
start_mean = seq_length / feature_size
mean = tf.linspace(start_mean, seq_length, num=feature_size)
mean = _prepend_dims(mean, positions.shape.rank)
concentration = (mean / stddev)**2
rate = mean / stddev**2
probabilities = gamma_pdf(
tf.abs(tf.cast(positions, dtype=tf.float32))[..., tf.newaxis],
concentration, rate)
probabilities += 1e-8 # To ensure numerical stability.
outputs = probabilities / tf.reduce_max(probabilities,
axis=1, keepdims=True)
tf.TensorShape(outputs.shape).assert_is_compatible_with(
positions.shape + [feature_size])
return outputs
def positional_features_cosine(positions: tf.Tensor,
feature_size: int,
seq_length: Optional[int] = None,
bin_size: Optional[int] = None):
"""Cosine positional features."""
del bin_size # Unused.
del seq_length # Unused.
periodicity = 1.25 * tf.pow(2.0, tf.range(0, feature_size, dtype=tf.float32))
periodicity = _prepend_dims(periodicity, positions.shape.rank)
outputs = tf.math.cos(2 * np.pi * positions[..., tf.newaxis] / periodicity)
tf.TensorShape(outputs.shape).assert_is_compatible_with(
positions.shape + [feature_size])
return outputs
def positional_features_linear_masks(positions: tf.Tensor,
feature_size: int,
seq_length: Optional[int] = None,
bin_size: Optional[int] = None):
"""Exponentially increasing point focuses."""
del bin_size # Unused.
del seq_length # Unused.
distances = tf.range(0, feature_size, dtype=tf.float32)
distances = _prepend_dims(distances, positions.shape.rank)
outputs = tf.cast(distances == tf.abs(positions[..., tf.newaxis]),
dtype=tf.float32)
tf.TensorShape(outputs.shape).assert_is_compatible_with(
positions.shape + [feature_size])
return outputs
def positional_features_sin_cos(positions: tf.Tensor,
feature_size: int,
seq_length: Optional[int] = None,
bin_size: Optional[int] = None,
max_time=10000.0):
"""Sine/cosine positional encodings."""
del bin_size # Unused.
del seq_length # Unused.
if feature_size % 2 != 0:
raise ValueError('feature_size needs to be divisible by 2.')
i = tf.range(0, feature_size, 2, dtype=tf.float32)
i = _prepend_dims(i, positions.shape.rank)
# Concat sines and cosines and return.
outputs = tf.concat([
tf.sin(positions[..., tf.newaxis] / max_time**(i / feature_size)),
tf.cos(positions[..., tf.newaxis] / max_time**(i / feature_size))], -1)
tf.TensorShape(outputs.shape).assert_is_compatible_with(
positions.shape + [feature_size])
return outputs
| deepmind-research-master | enformer/attention_module.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow implementation of Enformer model.
"Effective gene expression prediction from sequence by integrating long-range
interactions"
Žiga Avsec1, Vikram Agarwal2,4, Daniel Visentin1,4, Joseph R. Ledsam1,3,
Agnieszka Grabska-Barwinska1, Kyle R. Taylor1, Yannis Assael1, John Jumper1,
Pushmeet Kohli1, David R. Kelley2*
1 DeepMind, London, UK
2 Calico Life Sciences, South San Francisco, CA, USA
3 Google, Tokyo, Japan
4 These authors contributed equally.
* correspondence: [email protected], [email protected], [email protected]
"""
import inspect
from typing import Any, Callable, Dict, Optional, Text, Union, Iterable
import attention_module
import numpy as np
import sonnet as snt
import tensorflow as tf
SEQUENCE_LENGTH = 196_608
BIN_SIZE = 128
TARGET_LENGTH = 896
class Enformer(snt.Module):
"""Main model."""
def __init__(self,
channels: int = 1536,
num_transformer_layers: int = 11,
num_heads: int = 8,
pooling_type: str = 'attention',
name: str = 'enformer'):
"""Enformer model.
Args:
channels: Number of convolutional filters and the overall 'width' of the
model.
num_transformer_layers: Number of transformer layers.
num_heads: Number of attention heads.
pooling_type: Which pooling function to use. Options: 'attention' or max'.
name: Name of sonnet module.
"""
super().__init__(name=name)
# pylint: disable=g-complex-comprehension,g-long-lambda,cell-var-from-loop
heads_channels = {'human': 5313, 'mouse': 1643}
dropout_rate = 0.4
assert channels % num_heads == 0, ('channels needs to be divisible '
f'by {num_heads}')
whole_attention_kwargs = {
'attention_dropout_rate': 0.05,
'initializer': None,
'key_size': 64,
'num_heads': num_heads,
'num_relative_position_features': channels // num_heads,
'positional_dropout_rate': 0.01,
'relative_position_functions': [
'positional_features_exponential',
'positional_features_central_mask',
'positional_features_gamma'
],
'relative_positions': True,
'scaling': True,
'value_size': channels // num_heads,
'zero_initialize': True
}
trunk_name_scope = tf.name_scope('trunk')
trunk_name_scope.__enter__()
# lambda is used in Sequential to construct the module under tf.name_scope.
def conv_block(filters, width=1, w_init=None, name='conv_block', **kwargs):
return Sequential(lambda: [
snt.distribute.CrossReplicaBatchNorm(
create_scale=True,
create_offset=True,
scale_init=snt.initializers.Ones(),
moving_mean=snt.ExponentialMovingAverage(0.9),
moving_variance=snt.ExponentialMovingAverage(0.9)),
gelu,
snt.Conv1D(filters, width, w_init=w_init, **kwargs)
], name=name)
stem = Sequential(lambda: [
snt.Conv1D(channels // 2, 15),
Residual(conv_block(channels // 2, 1, name='pointwise_conv_block')),
pooling_module(pooling_type, pool_size=2),
], name='stem')
filter_list = exponential_linspace_int(start=channels // 2, end=channels,
num=6, divisible_by=128)
conv_tower = Sequential(lambda: [
Sequential(lambda: [
conv_block(num_filters, 5),
Residual(conv_block(num_filters, 1, name='pointwise_conv_block')),
pooling_module(pooling_type, pool_size=2),
],
name=f'conv_tower_block_{i}')
for i, num_filters in enumerate(filter_list)], name='conv_tower')
# Transformer.
def transformer_mlp():
return Sequential(lambda: [
snt.LayerNorm(axis=-1, create_scale=True, create_offset=True),
snt.Linear(channels * 2),
snt.Dropout(dropout_rate),
tf.nn.relu,
snt.Linear(channels),
snt.Dropout(dropout_rate)], name='mlp')
transformer = Sequential(lambda: [
Sequential(lambda: [
Residual(Sequential(lambda: [
snt.LayerNorm(axis=-1,
create_scale=True, create_offset=True,
scale_init=snt.initializers.Ones()),
attention_module.MultiheadAttention(**whole_attention_kwargs,
name=f'attention_{i}'),
snt.Dropout(dropout_rate)], name='mha')),
Residual(transformer_mlp())], name=f'transformer_block_{i}')
for i in range(num_transformer_layers)], name='transformer')
crop_final = TargetLengthCrop1D(TARGET_LENGTH, name='target_input')
final_pointwise = Sequential(lambda: [
conv_block(channels * 2, 1),
snt.Dropout(dropout_rate / 8),
gelu], name='final_pointwise')
self._trunk = Sequential([stem,
conv_tower,
transformer,
crop_final,
final_pointwise],
name='trunk')
trunk_name_scope.__exit__(None, None, None)
with tf.name_scope('heads'):
self._heads = {
head: Sequential(
lambda: [snt.Linear(num_channels), tf.nn.softplus],
name=f'head_{head}')
for head, num_channels in heads_channels.items()
}
# pylint: enable=g-complex-comprehension,g-long-lambda,cell-var-from-loop
@property
def trunk(self):
return self._trunk
@property
def heads(self):
return self._heads
def __call__(self, inputs: tf.Tensor,
is_training: bool) -> Dict[str, tf.Tensor]:
trunk_embedding = self.trunk(inputs, is_training=is_training)
return {
head: head_module(trunk_embedding, is_training=is_training)
for head, head_module in self.heads.items()
}
@tf.function(input_signature=[
tf.TensorSpec([None, SEQUENCE_LENGTH, 4], tf.float32)])
def predict_on_batch(self, x):
"""Method for SavedModel."""
return self(x, is_training=False)
class TargetLengthCrop1D(snt.Module):
"""Crop sequence to match the desired target length."""
def __init__(self,
target_length: Optional[int],
name: str = 'target_length_crop'):
super().__init__(name=name)
self._target_length = target_length
def __call__(self, inputs):
if self._target_length is None:
return inputs
trim = (inputs.shape[-2] - self._target_length) // 2
if trim < 0:
raise ValueError('inputs longer than target length')
elif trim == 0:
return inputs
else:
return inputs[..., trim:-trim, :]
class Sequential(snt.Module):
"""snt.Sequential automatically passing is_training where it exists."""
def __init__(self,
layers: Optional[Union[Callable[[], Iterable[snt.Module]],
Iterable[Callable[..., Any]]]] = None,
name: Optional[Text] = None):
super().__init__(name=name)
if layers is None:
self._layers = []
else:
# layers wrapped in a lambda function to have a common namespace.
if hasattr(layers, '__call__'):
layers = layers()
self._layers = [layer for layer in layers if layer is not None]
def __call__(self, inputs: tf.Tensor, is_training: bool, **kwargs):
outputs = inputs
for _, mod in enumerate(self._layers):
if accepts_is_training(mod):
outputs = mod(outputs, is_training=is_training, **kwargs)
else:
outputs = mod(outputs, **kwargs)
return outputs
def pooling_module(kind, pool_size):
"""Pooling module wrapper."""
if kind == 'attention':
return SoftmaxPooling1D(pool_size=pool_size, per_channel=True,
w_init_scale=2.0)
elif kind == 'max':
return tf.keras.layers.MaxPool1D(pool_size=pool_size, padding='same')
else:
raise ValueError(f'Invalid pooling kind: {kind}.')
class SoftmaxPooling1D(snt.Module):
"""Pooling operation with optional weights."""
def __init__(self,
pool_size: int = 2,
per_channel: bool = False,
w_init_scale: float = 0.0,
name: str = 'softmax_pooling'):
"""Softmax pooling.
Args:
pool_size: Pooling size, same as in Max/AvgPooling.
per_channel: If True, the logits/softmax weights will be computed for
each channel separately. If False, same weights will be used across all
channels.
w_init_scale: When 0.0 is equivalent to avg pooling, and when
~2.0 and `per_channel=False` it's equivalent to max pooling.
name: Module name.
"""
super().__init__(name=name)
self._pool_size = pool_size
self._per_channel = per_channel
self._w_init_scale = w_init_scale
self._logit_linear = None
@snt.once
def _initialize(self, num_features):
self._logit_linear = snt.Linear(
output_size=num_features if self._per_channel else 1,
with_bias=False, # Softmax is agnostic to shifts.
w_init=snt.initializers.Identity(self._w_init_scale))
def __call__(self, inputs):
_, length, num_features = inputs.shape
self._initialize(num_features)
inputs = tf.reshape(
inputs,
(-1, length // self._pool_size, self._pool_size, num_features))
return tf.reduce_sum(
inputs * tf.nn.softmax(self._logit_linear(inputs), axis=-2),
axis=-2)
class Residual(snt.Module):
"""Residual block."""
def __init__(self, module: snt.Module, name='residual'):
super().__init__(name=name)
self._module = module
def __call__(self, inputs: tf.Tensor, is_training: bool, *args,
**kwargs) -> tf.Tensor:
return inputs + self._module(inputs, is_training, *args, **kwargs)
def gelu(x: tf.Tensor) -> tf.Tensor:
"""Applies the Gaussian error linear unit (GELU) activation function.
Using approximiation in section 2 of the original paper:
https://arxiv.org/abs/1606.08415
Args:
x: Input tensor to apply gelu activation.
Returns:
Tensor with gelu activation applied to it.
"""
return tf.nn.sigmoid(1.702 * x) * x
def one_hot_encode(sequence: str,
alphabet: str = 'ACGT',
neutral_alphabet: str = 'N',
neutral_value: Any = 0,
dtype=np.float32) -> np.ndarray:
"""One-hot encode sequence."""
def to_uint8(string):
return np.frombuffer(string.encode('ascii'), dtype=np.uint8)
hash_table = np.zeros((np.iinfo(np.uint8).max, len(alphabet)), dtype=dtype)
hash_table[to_uint8(alphabet)] = np.eye(len(alphabet), dtype=dtype)
hash_table[to_uint8(neutral_alphabet)] = neutral_value
hash_table = hash_table.astype(dtype)
return hash_table[to_uint8(sequence)]
def exponential_linspace_int(start, end, num, divisible_by=1):
"""Exponentially increasing values of integers."""
def _round(x):
return int(np.round(x / divisible_by) * divisible_by)
base = np.exp(np.log(end / start) / (num - 1))
return [_round(start * base**i) for i in range(num)]
def accepts_is_training(module):
return 'is_training' in list(inspect.signature(module.__call__).parameters)
| deepmind-research-master | enformer/enformer.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test enformer model by applying random sequence as input.
Test:
$ python enformer_test.py
"""
import random
import unittest
import enformer
import numpy as np
class TestEnformer(unittest.TestCase):
def test_enformer(self):
model = enformer.Enformer(channels=1536, num_transformer_layers=11)
inputs = _get_random_input()
outputs = model(inputs, is_training=True)
self.assertEqual(outputs['human'].shape, (1, enformer.TARGET_LENGTH, 5313))
self.assertEqual(outputs['mouse'].shape, (1, enformer.TARGET_LENGTH, 1643))
def _get_random_input():
seq = ''.join(
[random.choice('ACGT') for _ in range(enformer.SEQUENCE_LENGTH)])
return np.expand_dims(enformer.one_hot_encode(seq), 0).astype(np.float32)
if __name__ == '__main__':
unittest.main()
| deepmind-research-master | enformer/enformer_test.py |
################################################################################
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Tests for curl."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from curl import training
class TrainingTest(absltest.TestCase):
def testRunTraining(self):
training.run_training(
dataset='mnist',
output_type='bernoulli',
n_y=10,
n_y_active=1,
training_data_type='sequential',
n_concurrent_classes=1,
lr_init=1e-3,
lr_factor=1.,
lr_schedule=[1],
blend_classes=False,
train_supervised=False,
n_steps=1000,
report_interval=1000,
knn_values=[3],
random_seed=1,
encoder_kwargs={
'encoder_type': 'multi',
'n_enc': [1200, 600, 300, 150],
'enc_strides': [1],
},
decoder_kwargs={
'decoder_type': 'single',
'n_dec': [500, 500],
'dec_up_strides': None,
},
n_z=32,
dynamic_expansion=True,
ll_thresh=-200.0,
classify_with_samples=False,
gen_replay_type='fixed',
use_supervised_replay=False,
)
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | curl/unit_test.py |
################################################################################
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Runs the supervised CL benchmark experiments in the paper."""
from absl import app
from absl import flags
from curl import training
flags.DEFINE_enum('dataset', 'mnist', ['mnist', 'omniglot'], 'Dataset.')
FLAGS = flags.FLAGS
def main(unused_argv):
training.run_training(
dataset=FLAGS.dataset,
output_type='bernoulli',
n_y=10,
n_y_active=10,
training_data_type='sequential',
n_concurrent_classes=2,
lr_init=1e-3,
lr_factor=1.,
lr_schedule=[1],
train_supervised=True,
blend_classes=False,
n_steps=100000,
report_interval=10000,
knn_values=[],
random_seed=1,
encoder_kwargs={
'encoder_type': 'multi',
'n_enc': [400, 400],
'enc_strides': [1],
},
decoder_kwargs={
'decoder_type': 'single',
'n_dec': [400, 400],
'dec_up_strides': None,
},
n_z=32,
dynamic_expansion=False,
ll_thresh=-10000.0,
classify_with_samples=False,
gen_replay_type='fixed',
use_supervised_replay=False,
)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | curl/train_sup.py |
################################################################################
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Implementation of Continual Unsupervised Representation Learning model."""
from absl import logging
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from curl import layers
from curl import utils
tfc = tf.compat.v1
# pylint: disable=g-long-lambda
# pylint: disable=redefined-outer-name
class SharedEncoder(snt.AbstractModule):
"""The shared encoder module, mapping input x to hiddens."""
def __init__(self, encoder_type, n_enc, enc_strides, name='shared_encoder'):
"""The shared encoder function, mapping input x to hiddens.
Args:
encoder_type: str, type of encoder, either 'conv' or 'multi'
n_enc: list, number of hidden units per layer in the encoder
enc_strides: list, stride in each layer (only for 'conv' encoder_type)
name: str, module name used for tf scope.
"""
super(SharedEncoder, self).__init__(name=name)
self._encoder_type = encoder_type
if encoder_type == 'conv':
self.shared_encoder = layers.SharedConvModule(
filters=n_enc,
strides=enc_strides,
kernel_size=3,
activation=tf.nn.relu)
elif encoder_type == 'multi':
self.shared_encoder = snt.nets.MLP(
name='mlp_shared_encoder',
output_sizes=n_enc,
activation=tf.nn.relu,
activate_final=True)
else:
raise ValueError('Unknown encoder_type {}'.format(encoder_type))
def _build(self, x, is_training):
if self._encoder_type == 'multi':
self.conv_shapes = None
x = snt.BatchFlatten()(x)
return self.shared_encoder(x)
else:
output = self.shared_encoder(x)
self.conv_shapes = self.shared_encoder.conv_shapes
return output
def cluster_encoder_fn(hiddens, n_y_active, n_y, is_training=True):
"""The cluster encoder function, modelling q(y | x).
Args:
hiddens: The shared encoder activations, 2D `Tensor` of size `[B, ...]`.
n_y_active: Tensor, the number of active components.
n_y: int, number of maximum components allowed (used for tensor size)
is_training: Boolean, whether to build the training graph or an evaluation
graph.
Returns:
The distribution `q(y | x)`.
"""
del is_training # unused for now
with tf.control_dependencies([tfc.assert_rank(hiddens, 2)]):
lin = snt.Linear(n_y, name='mlp_cluster_encoder_final')
logits = lin(hiddens)
# Only use the first n_y_active components, and set the remaining to zero.
if n_y > 1:
probs = tf.nn.softmax(logits[:, :n_y_active])
logging.info('Cluster softmax active probs shape: %s', str(probs.shape))
paddings1 = tf.stack([tf.constant(0), tf.constant(0)], axis=0)
paddings2 = tf.stack([tf.constant(0), n_y - n_y_active], axis=0)
paddings = tf.stack([paddings1, paddings2], axis=1)
probs = tf.pad(probs, paddings) + 0.0 * logits + 1e-12
else:
probs = tf.ones_like(logits)
logging.info('Cluster softmax probs shape: %s', str(probs.shape))
return tfp.distributions.OneHotCategorical(probs=probs)
def latent_encoder_fn(hiddens, y, n_y, n_z, is_training=True):
"""The latent encoder function, modelling q(z | x, y).
Args:
hiddens: The shared encoder activations, 2D `Tensor` of size `[B, ...]`.
y: Categorical cluster variable, `Tensor` of size `[B, n_y]`.
n_y: int, number of dims of y.
n_z: int, number of dims of z.
is_training: Boolean, whether to build the training graph or an evaluation
graph.
Returns:
The Gaussian distribution `q(z | x, y)`.
"""
del is_training # unused for now
with tf.control_dependencies([tfc.assert_rank(hiddens, 2)]):
# Logits for both mean and variance
n_logits = 2 * n_z
all_logits = []
for k in range(n_y):
lin = snt.Linear(n_logits, name='mlp_latent_encoder_' + str(k))
all_logits.append(lin(hiddens))
# Sum over cluster components.
all_logits = tf.stack(all_logits) # [n_y, B, n_logits]
logits = tf.einsum('ij,jik->ik', y, all_logits)
# Compute distribution from logits.
return utils.generate_gaussian(
logits=logits, sigma_nonlin='softplus', sigma_param='var')
def data_decoder_fn(z,
y,
output_type,
output_shape,
decoder_type,
n_dec,
dec_up_strides,
n_x,
n_y,
shared_encoder_conv_shapes=None,
is_training=True,
test_local_stats=True):
"""The data decoder function, modelling p(x | z).
Args:
z: Latent variables, `Tensor` of size `[B, n_z]`.
y: Categorical cluster variable, `Tensor` of size `[B, n_y]`.
output_type: str, output distribution ('bernoulli' or 'quantized_normal').
output_shape: list, shape of output (not including batch dimension).
decoder_type: str, 'single', 'multi', or 'deconv'.
n_dec: list, number of hidden units per layer in the decoder
dec_up_strides: list, stride in each layer (only for 'deconv' decoder_type).
n_x: int, number of dims of x.
n_y: int, number of dims of y.
shared_encoder_conv_shapes: the shapes of the activations of the
intermediate layers of the encoder,
is_training: Boolean, whether to build the training graph or an evaluation
graph.
test_local_stats: Boolean, whether to use the test batch statistics at test
time for batch norm (default) or the moving averages.
Returns:
The Bernoulli distribution `p(x | z)`.
"""
if output_type == 'bernoulli':
output_dist = lambda x: tfp.distributions.Bernoulli(logits=x)
n_out_factor = 1
out_shape = list(output_shape)
else:
raise NotImplementedError
if len(z.shape) != 2:
raise NotImplementedError('The data decoder function expects `z` to be '
'2D, but its shape was %s instead.' %
str(z.shape))
if len(y.shape) != 2:
raise NotImplementedError('The data decoder function expects `y` to be '
'2D, but its shape was %s instead.' %
str(y.shape))
# Upsample layer (deconvolutional, bilinear, ..).
if decoder_type == 'deconv':
# First, check that the encoder is convolutional too (needed for batchnorm)
if shared_encoder_conv_shapes is None:
raise ValueError('Shared encoder does not contain conv_shapes.')
num_output_channels = output_shape[-1]
conv_decoder = UpsampleModule(
filters=n_dec,
kernel_size=3,
activation=tf.nn.relu,
dec_up_strides=dec_up_strides,
enc_conv_shapes=shared_encoder_conv_shapes,
n_c=num_output_channels * n_out_factor,
method=decoder_type)
logits = conv_decoder(
z, is_training=is_training, test_local_stats=test_local_stats)
logits = tf.reshape(logits, [-1] + out_shape) # n_out_factor in last dim
# Multiple MLP decoders, one for each component.
elif decoder_type == 'multi':
all_logits = []
for k in range(n_y):
mlp_decoding = snt.nets.MLP(
name='mlp_latent_decoder_' + str(k),
output_sizes=n_dec + [n_x * n_out_factor],
activation=tf.nn.relu,
activate_final=False)
logits = mlp_decoding(z)
all_logits.append(logits)
all_logits = tf.stack(all_logits)
logits = tf.einsum('ij,jik->ik', y, all_logits)
logits = tf.reshape(logits, [-1] + out_shape) # Back to 4D
# Single (shared among components) MLP decoder.
elif decoder_type == 'single':
mlp_decoding = snt.nets.MLP(
name='mlp_latent_decoder',
output_sizes=n_dec + [n_x * n_out_factor],
activation=tf.nn.relu,
activate_final=False)
logits = mlp_decoding(z)
logits = tf.reshape(logits, [-1] + out_shape) # Back to 4D
else:
raise ValueError('Unknown decoder_type {}'.format(decoder_type))
return output_dist(logits)
def latent_decoder_fn(y, n_z, is_training=True):
"""The latent decoder function, modelling p(z | y).
Args:
y: Categorical cluster variable, `Tensor` of size `[B, n_y]`.
n_z: int, number of dims of z.
is_training: Boolean, whether to build the training graph or an evaluation
graph.
Returns:
The Gaussian distribution `p(z | y)`.
"""
del is_training # Unused for now.
if len(y.shape) != 2:
raise NotImplementedError('The latent decoder function expects `y` to be '
'2D, but its shape was %s instead.' %
str(y.shape))
lin_mu = snt.Linear(n_z, name='latent_prior_mu')
lin_sigma = snt.Linear(n_z, name='latent_prior_sigma')
mu = lin_mu(y)
sigma = lin_sigma(y)
logits = tf.concat([mu, sigma], axis=1)
return utils.generate_gaussian(
logits=logits, sigma_nonlin='softplus', sigma_param='var')
class Curl(object):
"""CURL model class."""
def __init__(self,
prior,
latent_decoder,
data_decoder,
shared_encoder,
cluster_encoder,
latent_encoder,
n_y_active,
kly_over_batch=False,
is_training=True,
name='curl'):
self.scope_name = name
self._shared_encoder = shared_encoder
self._prior = prior
self._latent_decoder = latent_decoder
self._data_decoder = data_decoder
self._cluster_encoder = cluster_encoder
self._latent_encoder = latent_encoder
self._n_y_active = n_y_active
self._kly_over_batch = kly_over_batch
self._is_training = is_training
self._cache = {}
def sample(self, sample_shape=(), y=None, mean=False):
"""Draws a sample from the learnt distribution p(x).
Args:
sample_shape: `int` or 0D `Tensor` giving the number of samples to return.
If empty tuple (default value), 1 sample will be returned.
y: Optional, the one hot label on which to condition the sample.
mean: Boolean, if True the expected value of the output distribution is
returned, otherwise samples from the output distribution.
Returns:
Sample tensor of shape `[B * N, ...]` where `B` is the batch size of
the prior, `N` is the number of samples requested, and `...` represents
the shape of the observations.
Raises:
ValueError: If both `sample_shape` and `n` are provided.
ValueError: If `sample_shape` has rank > 0 or if `sample_shape`
is an int that is < 1.
"""
with tf.name_scope('{}_sample'.format(self.scope_name)):
if y is None:
y = tf.to_float(self.compute_prior().sample(sample_shape))
if y.shape.ndims > 2:
y = snt.MergeDims(start=0, size=y.shape.ndims - 1, name='merge_y')(y)
z = self._latent_decoder(y, is_training=self._is_training)
if mean:
samples = self.predict(z.sample(), y).mean()
else:
samples = self.predict(z.sample(), y).sample()
return samples
def reconstruct(self, x, use_mode=True, use_mean=False):
"""Reconstructs the given observations.
Args:
x: Observed `Tensor`.
use_mode: Boolean, if true, take the argmax over q(y|x)
use_mean: Boolean, if true, use pixel-mean for reconstructions.
Returns:
The reconstructed samples x ~ p(x | y~q(y|x), z~q(z|x, y)).
"""
hiddens = self._shared_encoder(x, is_training=self._is_training)
qy = self.infer_cluster(hiddens)
y_sample = qy.mode() if use_mode else qy.sample()
y_sample = tf.to_float(y_sample)
qz = self.infer_latent(hiddens, y_sample)
p = self.predict(qz.sample(), y_sample)
if use_mean:
return p.mean()
else:
return p.sample()
def log_prob(self, x):
"""Redirects to log_prob_elbo with a warning."""
logging.warn('log_prob is actually a lower bound')
return self.log_prob_elbo(x)
def log_prob_elbo(self, x):
"""Returns evidence lower bound."""
log_p_x, kl_y, kl_z = self.log_prob_elbo_components(x)[:3]
return log_p_x - kl_y - kl_z
def log_prob_elbo_components(self, x, y=None, reduce_op=tf.reduce_sum):
"""Returns the components used in calculating the evidence lower bound.
Args:
x: Observed variables, `Tensor` of size `[B, I]` where `I` is the size of
a flattened input.
y: Optional labels, `Tensor` of size `[B, I]` where `I` is the size of a
flattened input.
reduce_op: The op to use for reducing across non-batch dimensions.
Typically either `tf.reduce_sum` or `tf.reduce_mean`.
Returns:
`log p(x|y,z)` of shape `[B]` where `B` is the batch size.
`KL[q(y|x) || p(y)]` of shape `[B]` where `B` is the batch size.
`KL[q(z|x,y) || p(z|y)]` of shape `[B]` where `B` is the batch size.
"""
cache_key = (x,)
# Checks if the output graph for this inputs has already been computed.
if cache_key in self._cache:
return self._cache[cache_key]
with tf.name_scope('{}_log_prob_elbo'.format(self.scope_name)):
hiddens = self._shared_encoder(x, is_training=self._is_training)
# 1) Compute KL[q(y|x) || p(y)] from x, and keep distribution q_y around
kl_y, q_y = self._kl_and_qy(hiddens) # [B], distribution
# For the next two terms, we need to marginalise over all y.
# First, construct every possible y indexing (as a one hot) and repeat it
# for every element in the batch [n_y_active, B, n_y].
# Note that the onehot have dimension of all y, while only the codes
# corresponding to active components are instantiated
bs, n_y = q_y.probs.shape
all_y = tf.tile(
tf.expand_dims(tf.one_hot(tf.range(self._n_y_active),
n_y), axis=1),
multiples=[1, bs, 1])
# 2) Compute KL[q(z|x,y) || p(z|y)] (for all possible y), and keep z's
# around [n_y, B] and [n_y, B, n_z]
kl_z_all, z_all = tf.map_fn(
fn=lambda y: self._kl_and_z(hiddens, y),
elems=all_y,
dtype=(tf.float32, tf.float32),
name='elbo_components_z_map')
kl_z_all = tf.transpose(kl_z_all, name='kl_z_all')
# Now take the expectation over y (scale by q(y|x))
y_logits = q_y.logits[:, :self._n_y_active] # [B, n_y]
y_probs = q_y.probs[:, :self._n_y_active] # [B, n_y]
y_probs = y_probs / tf.reduce_sum(y_probs, axis=1, keepdims=True)
kl_z = tf.reduce_sum(y_probs * kl_z_all, axis=1)
# 3) Evaluate logp and recon, i.e., log and mean of p(x|z,[y])
# (conditioning on y only in the `multi` decoder_type case, when
# train_supervised is True). Here we take the reconstruction from each
# possible component y and take its log prob. [n_y, B, Ix, Iy, Iz]
log_p_x_all = tf.map_fn(
fn=lambda val: self.predict(val[0], val[1]).log_prob(x),
elems=(z_all, all_y),
dtype=tf.float32,
name='elbo_components_logpx_map')
# Sum log probs over all dimensions apart from the first two (n_y, B),
# i.e., over I. Use einsum to construct higher order multiplication.
log_p_x_all = snt.BatchFlatten(preserve_dims=2)(log_p_x_all) # [n_y,B,I]
# Note, this is E_{q(y|x)} [ log p(x | z, y)], i.e., we scale log_p_x_all
# by q(y|x).
log_p_x = tf.einsum('ij,jik->ik', y_probs, log_p_x_all) # [B, I]
# We may also use a supervised loss for some samples [B, n_y]
if y is not None:
self.y_label = tf.one_hot(y, n_y)
else:
self.y_label = tfc.placeholder(
shape=[bs, n_y], dtype=tf.float32, name='y_label')
# This is computing log p(x | z, y=true_y)], which is basically equivalent
# to indexing into the correct element of `log_p_x_all`.
log_p_x_sup = tf.einsum('ij,jik->ik',
self.y_label[:, :self._n_y_active],
log_p_x_all) # [B, I]
kl_z_sup = tf.einsum('ij,ij->i',
self.y_label[:, :self._n_y_active],
kl_z_all) # [B]
# -log q(y=y_true | x)
kl_y_sup = tf.nn.sparse_softmax_cross_entropy_with_logits( # [B]
labels=tf.argmax(self.y_label[:, :self._n_y_active], axis=1),
logits=y_logits)
# Reduce over all dimension except batch.
dims_x = [k for k in range(1, log_p_x.shape.ndims)]
log_p_x = reduce_op(log_p_x, dims_x, name='log_p_x')
log_p_x_sup = reduce_op(log_p_x_sup, dims_x, name='log_p_x_sup')
# Store values needed externally
self.q_y = q_y
self.log_p_x_all = tf.transpose(
reduce_op(
log_p_x_all,
-1, # [B, n_y]
name='log_p_x_all'))
self.kl_z_all = kl_z_all
self.y_probs = y_probs
self._cache[cache_key] = (log_p_x, kl_y, kl_z, log_p_x_sup, kl_y_sup,
kl_z_sup)
return log_p_x, kl_y, kl_z, log_p_x_sup, kl_y_sup, kl_z_sup
def _kl_and_qy(self, hiddens):
"""Returns analytical or sampled KL div and the distribution q(y | x).
Args:
hiddens: The shared encoder activations, 2D `Tensor` of size `[B, ...]`.
Returns:
Pair `(kl, y)`, where `kl` is the KL divergence (a `Tensor` with shape
`[B]`, where `B` is the batch size), and `y` is a sample from the
categorical encoding distribution.
"""
with tf.control_dependencies([tfc.assert_rank(hiddens, 2)]):
q = self.infer_cluster(hiddens) # q(y|x)
p = self.compute_prior() # p(y)
try:
# Take the average proportions over whole batch then repeat it in each row
# before computing the KL
if self._kly_over_batch:
probs = tf.reduce_mean(
q.probs, axis=0, keepdims=True) * tf.ones_like(q.probs)
qmean = tfp.distributions.OneHotCategorical(probs=probs)
kl = tfp.distributions.kl_divergence(qmean, p)
else:
kl = tfp.distributions.kl_divergence(q, p)
except NotImplementedError:
y = q.sample(name='y_sample')
logging.warn('Using sampling KLD for y')
log_p_y = p.log_prob(y, name='log_p_y')
log_q_y = q.log_prob(y, name='log_q_y')
# Reduce over all dimension except batch.
sum_axis_p = [k for k in range(1, log_p_y.get_shape().ndims)]
log_p_y = tf.reduce_sum(log_p_y, sum_axis_p)
sum_axis_q = [k for k in range(1, log_q_y.get_shape().ndims)]
log_q_y = tf.reduce_sum(log_q_y, sum_axis_q)
kl = log_q_y - log_p_y
# Reduce over all dimension except batch.
sum_axis_kl = [k for k in range(1, kl.get_shape().ndims)]
kl = tf.reduce_sum(kl, sum_axis_kl, name='kl')
return kl, q
def _kl_and_z(self, hiddens, y):
"""Returns KL[q(z|y,x) || p(z|y)] and a sample for z from q(z|y,x).
Returns the analytical KL divergence KL[q(z|y,x) || p(z|y)] if one is
available (as registered with `kullback_leibler.RegisterKL`), or a sampled
KL divergence otherwise (in this case the returned sample is the one used
for the KL divergence).
Args:
hiddens: The shared encoder activations, 2D `Tensor` of size `[B, ...]`.
y: Categorical cluster random variable, `Tensor` of size `[B, n_y]`.
Returns:
Pair `(kl, z)`, where `kl` is the KL divergence (a `Tensor` with shape
`[B]`, where `B` is the batch size), and `z` is a sample from the encoding
distribution.
"""
with tf.control_dependencies([tfc.assert_rank(hiddens, 2)]):
q = self.infer_latent(hiddens, y) # q(z|x,y)
p = self.generate_latent(y) # p(z|y)
z = q.sample(name='z')
try:
kl = tfp.distributions.kl_divergence(q, p)
except NotImplementedError:
logging.warn('Using sampling KLD for z')
log_p_z = p.log_prob(z, name='log_p_z_y')
log_q_z = q.log_prob(z, name='log_q_z_xy')
# Reduce over all dimension except batch.
sum_axis_p = [k for k in range(1, log_p_z.get_shape().ndims)]
log_p_z = tf.reduce_sum(log_p_z, sum_axis_p)
sum_axis_q = [k for k in range(1, log_q_z.get_shape().ndims)]
log_q_z = tf.reduce_sum(log_q_z, sum_axis_q)
kl = log_q_z - log_p_z
# Reduce over all dimension except batch.
sum_axis_kl = [k for k in range(1, kl.get_shape().ndims)]
kl = tf.reduce_sum(kl, sum_axis_kl, name='kl')
return kl, z
def infer_latent(self, hiddens, y=None, use_mean_y=False):
"""Performs inference over the latent variable z.
Args:
hiddens: The shared encoder activations, 4D `Tensor` of size `[B, ...]`.
y: Categorical cluster variable, `Tensor` of size `[B, ...]`.
use_mean_y: Boolean, whether to take the mean encoding over all y.
Returns:
The distribution `q(z|x, y)`, which on sample produces tensors of size
`[N, B, ...]` where `B` is the batch size of `x` and `y`, and `N` is the
number of samples and `...` represents the shape of the latent variables.
"""
with tf.control_dependencies([tfc.assert_rank(hiddens, 2)]):
if y is None:
y = tf.to_float(self.infer_cluster(hiddens).mode())
if use_mean_y:
# If use_mean_y, then y must be probabilities
all_y = tf.tile(
tf.expand_dims(tf.one_hot(tf.range(y.shape[1]), y.shape[1]), axis=1),
multiples=[1, y.shape[0], 1])
# Compute z KL from x (for all possible y), and keep z's around
z_all = tf.map_fn(
fn=lambda y: self._latent_encoder(
hiddens, y, is_training=self._is_training).mean(),
elems=all_y,
dtype=tf.float32)
return tf.einsum('ij,jik->ik', y, z_all)
else:
return self._latent_encoder(hiddens, y, is_training=self._is_training)
def generate_latent(self, y):
"""Use the generative model to compute latent variable z, given a y.
Args:
y: Categorical cluster variable, `Tensor` of size `[B, ...]`.
Returns:
The distribution `p(z|y)`, which on sample produces tensors of size
`[N, B, ...]` where `B` is the batch size of `x`, and `N` is the number of
samples asked and `...` represents the shape of the latent variables.
"""
return self._latent_decoder(y, is_training=self._is_training)
def get_shared_rep(self, x, is_training):
"""Gets the shared representation from a given input x.
Args:
x: Observed variables, `Tensor` of size `[B, I]` where `I` is the size of
a flattened input.
is_training: bool, whether this constitutes training data or not.
Returns:
`log p(x|y,z)` of shape `[B]` where `B` is the batch size.
`KL[q(y|x) || p(y)]` of shape `[B]` where `B` is the batch size.
`KL[q(z|x,y) || p(z|y)]` of shape `[B]` where `B` is the batch size.
"""
return self._shared_encoder(x, is_training)
def infer_cluster(self, hiddens):
"""Performs inference over the categorical variable y.
Args:
hiddens: The shared encoder activations, 2D `Tensor` of size `[B, ...]`.
Returns:
The distribution `q(y|x)`, which on sample produces tensors of size
`[N, B, ...]` where `B` is the batch size of `x`, and `N` is the number of
samples asked and `...` represents the shape of the latent variables.
"""
with tf.control_dependencies([tfc.assert_rank(hiddens, 2)]):
return self._cluster_encoder(hiddens, is_training=self._is_training)
def predict(self, z, y):
"""Computes prediction over the observed variables.
Args:
z: Latent variables, `Tensor` of size `[B, ...]`.
y: Categorical cluster variable, `Tensor` of size `[B, ...]`.
Returns:
The distribution `p(x|z)`, which on sample produces tensors of size
`[N, B, ...]` where `N` is the number of samples asked.
"""
encoder_conv_shapes = getattr(self._shared_encoder, 'conv_shapes', None)
return self._data_decoder(
z,
y,
shared_encoder_conv_shapes=encoder_conv_shapes,
is_training=self._is_training)
def compute_prior(self):
"""Computes prior over the latent variables.
Returns:
The distribution `p(y)`, which on sample produces tensors of size
`[N, ...]` where `N` is the number of samples asked and `...` represents
the shape of the latent variables.
"""
return self._prior()
class UpsampleModule(snt.AbstractModule):
"""Convolutional decoder.
If `method` is 'deconv' apply transposed convolutions with stride 2,
otherwise apply the `method` upsampling function and then smooth with a
stride 1x1 convolution.
Params:
-------
filters: list, where the first element is the number of filters of the initial
MLP layer and the remaining elements are the number of filters of the
upsampling layers.
kernel_size: the size of the convolutional kernels. The same size will be
used in all convolutions.
activation: an activation function, applied to all layers but the last.
dec_up_strides: list, the upsampling factors of each upsampling convolutional
layer.
enc_conv_shapes: list, the shapes of the input and of all the intermediate
feature maps of the convolutional layers in the encoder.
n_c: the number of output channels.
"""
def __init__(self,
filters,
kernel_size,
activation,
dec_up_strides,
enc_conv_shapes,
n_c,
method='nn',
name='upsample_module'):
super(UpsampleModule, self).__init__(name=name)
assert len(filters) == len(dec_up_strides) + 1, (
'The decoder\'s filters should contain one element more than the '
'decoder\'s up stride list, but has %d elements instead of %d.\n'
'Decoder filters: %s\nDecoder up strides: %s' %
(len(filters), len(dec_up_strides) + 1, str(filters),
str(dec_up_strides)))
self._filters = filters
self._kernel_size = kernel_size
self._activation = activation
self._dec_up_strides = dec_up_strides
self._enc_conv_shapes = enc_conv_shapes
self._n_c = n_c
if method == 'deconv':
self._conv_layer = tf.layers.Conv2DTranspose
self._method = method
else:
self._conv_layer = tf.layers.Conv2D
self._method = getattr(tf.image.ResizeMethod, method.upper())
self._method_str = method.capitalize()
def _build(self, z, is_training=True, test_local_stats=True, use_bn=False):
batch_norm_args = {
'is_training': is_training,
'test_local_stats': test_local_stats
}
method = self._method
# Cycle over the encoder shapes backwards, to build a symmetrical decoder.
enc_conv_shapes = self._enc_conv_shapes[::-1]
strides = self._dec_up_strides
# We store the heights and widths of the encoder feature maps that are
# unique, i.e., the ones right after a layer with stride != 1. These will be
# used as a target to potentially crop the upsampled feature maps.
unique_hw = np.unique([(el[1], el[2]) for el in enc_conv_shapes], axis=0)
unique_hw = unique_hw.tolist()[::-1]
unique_hw.pop() # Drop the initial shape
# The first filter is an MLP.
mlp_filter, conv_filters = self._filters[0], self._filters[1:]
# The first shape is used after the MLP to go to 4D.
layers = [z]
# The shape of the first enc is used after the MLP to go back to 4D.
dec_mlp = snt.nets.MLP(
name='dec_mlp_projection',
output_sizes=[mlp_filter, np.prod(enc_conv_shapes[0][1:])],
use_bias=not use_bn,
activation=self._activation,
activate_final=True)
upsample_mlp_flat = dec_mlp(z)
if use_bn:
upsample_mlp_flat = snt.BatchNorm(scale=True)(upsample_mlp_flat,
**batch_norm_args)
layers.append(upsample_mlp_flat)
upsample = tf.reshape(upsample_mlp_flat, enc_conv_shapes[0])
layers.append(upsample)
for i, (filter_i, stride_i) in enumerate(zip(conv_filters, strides), 1):
if method != 'deconv' and stride_i > 1:
upsample = tf.image.resize_images(
upsample, [stride_i * el for el in upsample.shape.as_list()[1:3]],
method=method,
name='upsample_' + str(i))
upsample = self._conv_layer(
filters=filter_i,
kernel_size=self._kernel_size,
padding='same',
use_bias=not use_bn,
activation=self._activation,
strides=stride_i if method == 'deconv' else 1,
name='upsample_conv_' + str(i))(
upsample)
if use_bn:
upsample = snt.BatchNorm(scale=True)(upsample, **batch_norm_args)
if stride_i > 1:
hw = unique_hw.pop()
upsample = utils.maybe_center_crop(upsample, hw)
layers.append(upsample)
# Final layer, no upsampling.
x_logits = tf.layers.Conv2D(
filters=self._n_c,
kernel_size=self._kernel_size,
padding='same',
use_bias=not use_bn,
activation=None,
strides=1,
name='logits')(
upsample)
if use_bn:
x_logits = snt.BatchNorm(scale=True)(x_logits, **batch_norm_args)
layers.append(x_logits)
logging.info('%s upsampling module layer shapes', self._method_str)
logging.info('\n'.join([str(v.shape.as_list()) for v in layers]))
return x_logits
| deepmind-research-master | curl/model.py |
################################################################################
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Runs the unsupervised i.i.d benchmark experiments in the paper."""
from absl import app
from absl import flags
from curl import training
flags.DEFINE_enum('dataset', 'mnist', ['mnist', 'omniglot'], 'Dataset.')
FLAGS = flags.FLAGS
def main(unused_argv):
if FLAGS.dataset == 'mnist':
n_y = 25
n_y_active = 1
n_z = 50
else: # omniglot
n_y = 100
n_y_active = 1
n_z = 100
training.run_training(
dataset=FLAGS.dataset,
n_y=n_y,
n_y_active=n_y_active,
n_z=n_z,
output_type='bernoulli',
training_data_type='iid',
n_concurrent_classes=1,
lr_init=5e-4,
lr_factor=1.,
lr_schedule=[1],
blend_classes=False,
train_supervised=False,
n_steps=100000,
report_interval=10000,
knn_values=[3],
random_seed=1,
encoder_kwargs={
'encoder_type': 'multi',
'n_enc': [500, 500],
'enc_strides': [1],
},
decoder_kwargs={
'decoder_type': 'single',
'n_dec': [500],
'dec_up_strides': None,
},
dynamic_expansion=True,
ll_thresh=-200.0,
classify_with_samples=True,
gen_replay_type=None,
use_supervised_replay=False,
)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | curl/train_unsup.py |
################################################################################
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Some common utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
def generate_gaussian(logits, sigma_nonlin, sigma_param):
"""Generate a Gaussian distribution given a selected parameterisation."""
mu, sigma = tf.split(value=logits, num_or_size_splits=2, axis=1)
if sigma_nonlin == 'exp':
sigma = tf.exp(sigma)
elif sigma_nonlin == 'softplus':
sigma = tf.nn.softplus(sigma)
else:
raise ValueError('Unknown sigma_nonlin {}'.format(sigma_nonlin))
if sigma_param == 'var':
sigma = tf.sqrt(sigma)
elif sigma_param != 'std':
raise ValueError('Unknown sigma_param {}'.format(sigma_param))
return tfp.distributions.Normal(loc=mu, scale=sigma)
def construct_prior_probs(batch_size, n_y, n_y_active):
"""Construct the uniform prior probabilities.
Args:
batch_size: int, the size of the batch.
n_y: int, the number of categorical cluster components.
n_y_active: tf.Variable, the number of components that are currently in use.
Returns:
Tensor representing the prior probability matrix, size of [batch_size, n_y].
"""
probs = tf.ones((batch_size, n_y_active)) / tf.cast(
n_y_active, dtype=tf.float32)
paddings1 = tf.stack([tf.constant(0), tf.constant(0)], axis=0)
paddings2 = tf.stack([tf.constant(0), n_y - n_y_active], axis=0)
paddings = tf.stack([paddings1, paddings2], axis=1)
probs = tf.pad(probs, paddings, constant_values=1e-12)
probs.set_shape((batch_size, n_y))
logging.info('Prior shape: %s', str(probs.shape))
return probs
def maybe_center_crop(layer, target_hw):
"""Center crop the layer to match a target shape."""
l_height, l_width = layer.shape.as_list()[1:3]
t_height, t_width = target_hw
assert t_height <= l_height and t_width <= l_width
if (l_height - t_height) % 2 != 0 or (l_width - t_width) % 2 != 0:
logging.warn(
'It is impossible to center-crop [%d, %d] into [%d, %d].'
' Crop will be uneven.', t_height, t_width, l_height, l_width)
border = int((l_height - t_height) / 2)
x_0, x_1 = border, l_height - border
border = int((l_width - t_width) / 2)
y_0, y_1 = border, l_width - border
layer_cropped = layer[:, x_0:x_1, y_0:y_1, :]
return layer_cropped
| deepmind-research-master | curl/utils.py |
################################################################################
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Custom layers for CURL."""
from absl import logging
import sonnet as snt
import tensorflow.compat.v1 as tf
tfc = tf.compat.v1
class ResidualStack(snt.AbstractModule):
"""A stack of ResNet V2 blocks."""
def __init__(self,
num_hiddens,
num_residual_layers,
num_residual_hiddens,
filter_size=3,
initializers=None,
data_format='NHWC',
activation=tf.nn.relu,
name='residual_stack'):
"""Instantiate a ResidualStack."""
super(ResidualStack, self).__init__(name=name)
self._num_hiddens = num_hiddens
self._num_residual_layers = num_residual_layers
self._num_residual_hiddens = num_residual_hiddens
self._filter_size = filter_size
self._initializers = initializers
self._data_format = data_format
self._activation = activation
def _build(self, h):
for i in range(self._num_residual_layers):
h_i = self._activation(h)
h_i = snt.Conv2D(
output_channels=self._num_residual_hiddens,
kernel_shape=(self._filter_size, self._filter_size),
stride=(1, 1),
initializers=self._initializers,
data_format=self._data_format,
name='res_nxn_%d' % i)(
h_i)
h_i = self._activation(h_i)
h_i = snt.Conv2D(
output_channels=self._num_hiddens,
kernel_shape=(1, 1),
stride=(1, 1),
initializers=self._initializers,
data_format=self._data_format,
name='res_1x1_%d' % i)(
h_i)
h += h_i
return self._activation(h)
class SharedConvModule(snt.AbstractModule):
"""Convolutional decoder."""
def __init__(self,
filters,
kernel_size,
activation,
strides,
name='shared_conv_encoder'):
super(SharedConvModule, self).__init__(name=name)
self._filters = filters
self._kernel_size = kernel_size
self._activation = activation
self.strides = strides
assert len(strides) == len(filters) - 1
self.conv_shapes = None
def _build(self, x, is_training=True):
with tf.control_dependencies([tfc.assert_rank(x, 4)]):
self.conv_shapes = [x.shape.as_list()] # Needed by deconv module
conv = x
for i, (filter_i,
stride_i) in enumerate(zip(self._filters, self.strides), 1):
conv = tf.layers.Conv2D(
filters=filter_i,
kernel_size=self._kernel_size,
padding='same',
activation=self._activation,
strides=stride_i,
name='enc_conv_%d' % i)(
conv)
self.conv_shapes.append(conv.shape.as_list())
conv_flat = snt.BatchFlatten()(conv)
enc_mlp = snt.nets.MLP(
name='enc_mlp',
output_sizes=[self._filters[-1]],
activation=self._activation,
activate_final=True)
h = enc_mlp(conv_flat)
logging.info('Shared conv module layer shapes:')
logging.info('\n'.join([str(el) for el in self.conv_shapes]))
logging.info(h.shape.as_list())
return h
| deepmind-research-master | curl/layers.py |
################################################################################
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Training file to run most of the experiments in the paper.
The default parameters corresponding to the first set of experiments in Section
4.2.
For the expansion ablation, run with different ll_thresh values as in the paper.
Note that n_y_active represents the number of *active* components at the
start, and should be set to 1, while n_y represents the maximum number of
components allowed, and should be set sufficiently high (eg. n_y = 100).
For the MGR ablation, setting use_sup_replay = True switches to using SMGR,
and the gen_replay_type flag can switch between fixed and dynamic replay. The
generative snapshot period is set automatically in the train_curl.py file based
on these settings (ie. the data_period variable), so the 0.1T runs can be
reproduced by dividing this value by 10.
"""
from absl import app
from absl import flags
from curl import training
flags.DEFINE_enum('dataset', 'mnist', ['mnist', 'omniglot'], 'Dataset.')
FLAGS = flags.FLAGS
def main(unused_argv):
training.run_training(
dataset=FLAGS.dataset,
output_type='bernoulli',
n_y=30,
n_y_active=1,
training_data_type='sequential',
n_concurrent_classes=1,
lr_init=1e-3,
lr_factor=1.,
lr_schedule=[1],
blend_classes=False,
train_supervised=False,
n_steps=100000,
report_interval=10000,
knn_values=[10],
random_seed=1,
encoder_kwargs={
'encoder_type': 'multi',
'n_enc': [1200, 600, 300, 150],
'enc_strides': [1],
},
decoder_kwargs={
'decoder_type': 'single',
'n_dec': [500, 500],
'dec_up_strides': None,
},
n_z=32,
dynamic_expansion=True,
ll_thresh=-200.0,
classify_with_samples=False,
gen_replay_type='fixed',
use_supervised_replay=False,
)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | curl/train_main.py |
################################################################################
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Script to train CURL."""
import collections
import functools
from absl import logging
import numpy as np
from sklearn import neighbors
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
from curl import model
from curl import utils
tfc = tf.compat.v1
# pylint: disable=g-long-lambda
MainOps = collections.namedtuple('MainOps', [
'elbo', 'll', 'log_p_x', 'kl_y', 'kl_z', 'elbo_supervised', 'll_supervised',
'log_p_x_supervised', 'kl_y_supervised', 'kl_z_supervised',
'cat_probs', 'confusion', 'purity', 'latents'
])
DatasetTuple = collections.namedtuple('DatasetTuple', [
'train_data', 'train_iter_for_clf', 'train_data_for_clf',
'valid_iter', 'valid_data', 'test_iter', 'test_data', 'ds_info'
])
def compute_purity(confusion):
return np.sum(np.max(confusion, axis=0)).astype(float) / np.sum(confusion)
def process_dataset(iterator,
ops_to_run,
sess,
feed_dict=None,
aggregation_ops=np.stack,
processing_ops=None):
"""Process a dataset by computing ops and accumulating batch by batch.
Args:
iterator: iterator through the dataset.
ops_to_run: dict, tf ops to run as part of dataset processing.
sess: tf.Session to use.
feed_dict: dict, required placeholders.
aggregation_ops: fn or dict of fns, aggregation op to apply for each op.
processing_ops: fn or dict of fns, extra processing op to apply for each op.
Returns:
Results accumulated over dataset.
"""
if not isinstance(ops_to_run, dict):
raise TypeError('ops_to_run must be specified as a dict')
if not isinstance(aggregation_ops, dict):
aggregation_ops = {k: aggregation_ops for k in ops_to_run}
if not isinstance(processing_ops, dict):
processing_ops = {k: processing_ops for k in ops_to_run}
out_results = collections.OrderedDict()
sess.run(iterator.initializer)
while True:
# Iterate over the whole dataset and append the results to a per-key list.
try:
outs = sess.run(ops_to_run, feed_dict=feed_dict)
for key, value in outs.items():
out_results.setdefault(key, []).append(value)
except tf.errors.OutOfRangeError: # end of dataset iterator
break
# Aggregate and process results.
for key, value in out_results.items():
if aggregation_ops[key]:
out_results[key] = aggregation_ops[key](value)
if processing_ops[key]:
out_results[key] = processing_ops[key](out_results[key], axis=0)
return out_results
def get_data_sources(dataset, dataset_kwargs, batch_size, test_batch_size,
training_data_type, n_concurrent_classes, image_key,
label_key):
"""Create and return data sources for training, validation, and testing.
Args:
dataset: str, name of dataset ('mnist', 'omniglot', etc).
dataset_kwargs: dict, kwargs used in tf dataset constructors.
batch_size: int, batch size used for training.
test_batch_size: int, batch size used for evaluation.
training_data_type: str, how training data is seen ('iid', or 'sequential').
n_concurrent_classes: int, # classes seen at a time (ignored for 'iid').
image_key: str, name if image key in dataset.
label_key: str, name of label key in dataset.
Returns:
A namedtuple containing all of the dataset iterators and batches.
"""
# Load training data sources
ds_train, ds_info = tfds.load(
name=dataset,
split=tfds.Split.TRAIN,
with_info=True,
as_dataset_kwargs={'shuffle_files': False},
**dataset_kwargs)
# Validate assumption that data is in [0, 255]
assert ds_info.features[image_key].dtype == tf.uint8
n_classes = ds_info.features[label_key].num_classes
num_train_examples = ds_info.splits['train'].num_examples
def preprocess_data(x):
"""Convert images from uint8 in [0, 255] to float in [0, 1]."""
x[image_key] = tf.image.convert_image_dtype(x[image_key], tf.float32)
return x
if training_data_type == 'sequential':
c = None # The index of the class number, None for now and updated later
if n_concurrent_classes == 1:
filter_fn = lambda v: tf.equal(v[label_key], c)
else:
# Define the lowest and highest class number at each data period.
assert n_classes % n_concurrent_classes == 0, (
'Number of total classes must be divisible by '
'number of concurrent classes')
cmin = []
cmax = []
for i in range(int(n_classes / n_concurrent_classes)):
for _ in range(n_concurrent_classes):
cmin.append(i * n_concurrent_classes)
cmax.append((i + 1) * n_concurrent_classes)
filter_fn = lambda v: tf.logical_and(
tf.greater_equal(v[label_key], cmin[c]), tf.less(
v[label_key], cmax[c]))
# Set up data sources/queues (one for each class).
train_datasets = []
train_iterators = []
train_data = []
full_ds = ds_train.repeat().shuffle(num_train_examples, seed=0)
full_ds = full_ds.map(preprocess_data)
for c in range(n_classes):
filtered_ds = full_ds.filter(filter_fn).batch(
batch_size, drop_remainder=True)
train_datasets.append(filtered_ds)
train_iterators.append(train_datasets[-1].make_one_shot_iterator())
train_data.append(train_iterators[-1].get_next())
else: # not sequential
full_ds = ds_train.repeat().shuffle(num_train_examples, seed=0)
full_ds = full_ds.map(preprocess_data)
train_datasets = full_ds.batch(batch_size, drop_remainder=True)
train_data = train_datasets.make_one_shot_iterator().get_next()
# Set up data source to get full training set for classifier training
full_ds = ds_train.repeat(1).shuffle(num_train_examples, seed=0)
full_ds = full_ds.map(preprocess_data)
train_datasets_for_classifier = full_ds.batch(
test_batch_size, drop_remainder=True)
train_iter_for_classifier = (
train_datasets_for_classifier.make_initializable_iterator())
train_data_for_classifier = train_iter_for_classifier.get_next()
# Load validation dataset.
try:
valid_dataset = tfds.load(
name=dataset, split=tfds.Split.VALIDATION, **dataset_kwargs)
num_valid_examples = ds_info.splits[tfds.Split.VALIDATION].num_examples
assert (num_valid_examples %
test_batch_size == 0), ('test_batch_size must be a divisor of %d' %
num_valid_examples)
valid_dataset = valid_dataset.repeat(1).batch(
test_batch_size, drop_remainder=True)
valid_dataset = valid_dataset.map(preprocess_data)
valid_iter = valid_dataset.make_initializable_iterator()
valid_data = valid_iter.get_next()
except (KeyError, ValueError):
logging.warning('No validation set!!')
valid_iter = None
valid_data = None
# Load test dataset.
test_dataset = tfds.load(
name=dataset, split=tfds.Split.TEST, **dataset_kwargs)
num_test_examples = ds_info.splits['test'].num_examples
assert (num_test_examples %
test_batch_size == 0), ('test_batch_size must be a divisor of %d' %
num_test_examples)
test_dataset = test_dataset.repeat(1).batch(
test_batch_size, drop_remainder=True)
test_dataset = test_dataset.map(preprocess_data)
test_iter = test_dataset.make_initializable_iterator()
test_data = test_iter.get_next()
logging.info('Loaded %s data', dataset)
return DatasetTuple(train_data, train_iter_for_classifier,
train_data_for_classifier, valid_iter, valid_data,
test_iter, test_data, ds_info)
def setup_training_and_eval_graphs(x, label, y, n_y, curl_model,
classify_with_samples, is_training, name):
"""Set up the graph and return ops for training or evaluation.
Args:
x: tf placeholder for image.
label: tf placeholder for ground truth label.
y: tf placeholder for some self-supervised label/prediction.
n_y: int, dimensionality of discrete latent variable y.
curl_model: snt.AbstractModule representing the CURL model.
classify_with_samples: bool, whether to *sample* latents for classification.
is_training: bool, whether this graph is the training graph.
name: str, graph name.
Returns:
A namedtuple with the required graph ops to perform training or evaluation.
"""
# kl_y_supervised is -log q(y=y_true | x)
(log_p_x, kl_y, kl_z, log_p_x_supervised, kl_y_supervised,
kl_z_supervised) = curl_model.log_prob_elbo_components(x, y)
ll = log_p_x - kl_y - kl_z
elbo = -tf.reduce_mean(ll)
# Supervised loss, either for SMGR, or adaptation to supervised benchmark.
ll_supervised = log_p_x_supervised - kl_y_supervised - kl_z_supervised
elbo_supervised = -tf.reduce_mean(ll_supervised)
# Summaries
kl_y = tf.reduce_mean(kl_y)
kl_z = tf.reduce_mean(kl_z)
log_p_x_supervised = tf.reduce_mean(log_p_x_supervised)
kl_y_supervised = tf.reduce_mean(kl_y_supervised)
kl_z_supervised = tf.reduce_mean(kl_z_supervised)
# Evaluation.
hiddens = curl_model.get_shared_rep(x, is_training=is_training)
cat = curl_model.infer_cluster(hiddens)
cat_probs = cat.probs
confusion = tf.confusion_matrix(label, tf.argmax(cat_probs, axis=1),
num_classes=n_y, name=name + '_confusion')
purity = (tf.reduce_sum(tf.reduce_max(confusion, axis=0))
/ tf.reduce_sum(confusion))
if classify_with_samples:
latents = curl_model.infer_latent(
hiddens=hiddens, y=tf.to_float(cat.sample())).sample()
else:
latents = curl_model.infer_latent(
hiddens=hiddens, y=tf.to_float(cat.mode())).mean()
return MainOps(elbo, ll, log_p_x, kl_y, kl_z, elbo_supervised, ll_supervised,
log_p_x_supervised, kl_y_supervised, kl_z_supervised,
cat_probs, confusion, purity, latents)
def get_generated_data(sess, gen_op, y_input, gen_buffer_size,
component_counts):
"""Get generated model data (in place of saving a model snapshot).
Args:
sess: tf.Session.
gen_op: tf op representing a batch of generated data.
y_input: tf placeholder for which mixture components to generate from.
gen_buffer_size: int, number of data points to generate.
component_counts: np.array, prior probabilities over components.
Returns:
A tuple of two numpy arrays
The generated data
The corresponding labels
"""
batch_size, n_y = y_input.shape.as_list()
# Sample based on the history of all components used.
cluster_sample_probs = component_counts.astype(float)
cluster_sample_probs = np.maximum(1e-12, cluster_sample_probs)
cluster_sample_probs = cluster_sample_probs / np.sum(cluster_sample_probs)
# Now generate the data based on the specified cluster prior.
gen_buffer_images = []
gen_buffer_labels = []
for _ in range(gen_buffer_size):
gen_label = np.random.choice(
np.arange(n_y),
size=(batch_size,),
replace=True,
p=cluster_sample_probs)
y_gen_posterior_vals = np.zeros((batch_size, n_y))
y_gen_posterior_vals[np.arange(batch_size), gen_label] = 1
gen_image = sess.run(gen_op, feed_dict={y_input: y_gen_posterior_vals})
gen_buffer_images.append(gen_image)
gen_buffer_labels.append(gen_label)
gen_buffer_images = np.vstack(gen_buffer_images)
gen_buffer_labels = np.concatenate(gen_buffer_labels)
return gen_buffer_images, gen_buffer_labels
def setup_dynamic_ops(n_y):
"""Set up ops to move / copy mixture component weights for dynamic expansion.
Args:
n_y: int, dimensionality of discrete latent variable y.
Returns:
A dict containing all of the ops required for dynamic updating.
"""
# Set up graph ops to dynamically modify component params.
graph = tf.get_default_graph()
# 1) Ops to get and set latent encoder params (entire tensors)
latent_enc_tensors = {}
for k in range(n_y):
latent_enc_tensors['latent_w_' + str(k)] = graph.get_tensor_by_name(
'latent_encoder/mlp_latent_encoder_{}/w:0'.format(k))
latent_enc_tensors['latent_b_' + str(k)] = graph.get_tensor_by_name(
'latent_encoder/mlp_latent_encoder_{}/b:0'.format(k))
latent_enc_assign_ops = {}
latent_enc_phs = {}
for key, tensor in latent_enc_tensors.items():
latent_enc_phs[key] = tfc.placeholder(tensor.dtype, tensor.shape)
latent_enc_assign_ops[key] = tf.assign(tensor, latent_enc_phs[key])
# 2) Ops to get and set cluster encoder params (columns of a tensor)
# We will be copying column ind_from to column ind_to.
cluster_w = graph.get_tensor_by_name(
'cluster_encoder/mlp_cluster_encoder_final/w:0')
cluster_b = graph.get_tensor_by_name(
'cluster_encoder/mlp_cluster_encoder_final/b:0')
ind_from = tfc.placeholder(dtype=tf.int32)
ind_to = tfc.placeholder(dtype=tf.int32)
# Determine indices of cluster encoder weights and biases to be updated
w_indices = tf.transpose(
tf.stack([
tf.range(cluster_w.shape[0], dtype=tf.int32),
ind_to * tf.ones(shape=(cluster_w.shape[0],), dtype=tf.int32)
]))
b_indices = ind_to
# Determine updates themselves
cluster_w_updates = tf.squeeze(
tf.slice(cluster_w, begin=(0, ind_from), size=(cluster_w.shape[0], 1)))
cluster_b_updates = cluster_b[ind_from]
# Create update ops
cluster_w_update_op = tf.scatter_nd_update(cluster_w, w_indices,
cluster_w_updates)
cluster_b_update_op = tf.scatter_update(cluster_b, b_indices,
cluster_b_updates)
# 3) Ops to get and set latent prior params (columns of a tensor)
# We will be copying column ind_from to column ind_to.
latent_prior_mu_w = graph.get_tensor_by_name(
'latent_decoder/latent_prior_mu/w:0')
latent_prior_sigma_w = graph.get_tensor_by_name(
'latent_decoder/latent_prior_sigma/w:0')
mu_indices = tf.transpose(
tf.stack([
ind_to * tf.ones(shape=(latent_prior_mu_w.shape[1],), dtype=tf.int32),
tf.range(latent_prior_mu_w.shape[1], dtype=tf.int32)
]))
mu_updates = tf.squeeze(
tf.slice(
latent_prior_mu_w,
begin=(ind_from, 0),
size=(1, latent_prior_mu_w.shape[1])))
mu_update_op = tf.scatter_nd_update(latent_prior_mu_w, mu_indices, mu_updates)
sigma_indices = tf.transpose(
tf.stack([
ind_to *
tf.ones(shape=(latent_prior_sigma_w.shape[1],), dtype=tf.int32),
tf.range(latent_prior_sigma_w.shape[1], dtype=tf.int32)
]))
sigma_updates = tf.squeeze(
tf.slice(
latent_prior_sigma_w,
begin=(ind_from, 0),
size=(1, latent_prior_sigma_w.shape[1])))
sigma_update_op = tf.scatter_nd_update(latent_prior_sigma_w, sigma_indices,
sigma_updates)
dynamic_ops = {
'ind_from_ph': ind_from,
'ind_to_ph': ind_to,
'latent_enc_tensors': latent_enc_tensors,
'latent_enc_assign_ops': latent_enc_assign_ops,
'latent_enc_phs': latent_enc_phs,
'cluster_w_update_op': cluster_w_update_op,
'cluster_b_update_op': cluster_b_update_op,
'mu_update_op': mu_update_op,
'sigma_update_op': sigma_update_op
}
return dynamic_ops
def copy_component_params(ind_from, ind_to, sess, ind_from_ph, ind_to_ph,
latent_enc_tensors, latent_enc_assign_ops,
latent_enc_phs,
cluster_w_update_op, cluster_b_update_op,
mu_update_op, sigma_update_op):
"""Copy parameters from component i to component j.
Args:
ind_from: int, component index to copy from.
ind_to: int, component index to copy to.
sess: tf.Session.
ind_from_ph: tf placeholder for component to copy from.
ind_to_ph: tf placeholder for component to copy to.
latent_enc_tensors: dict, tensors in the latent posterior encoder.
latent_enc_assign_ops: dict, assignment ops for latent posterior encoder.
latent_enc_phs: dict, placeholders for assignment ops.
cluster_w_update_op: op for updating weights of cluster encoder.
cluster_b_update_op: op for updating biased of cluster encoder.
mu_update_op: op for updating mu weights of latent prior.
sigma_update_op: op for updating sigma weights of latent prior.
"""
update_ops = []
feed_dict = {}
# Copy for latent encoder.
new_w_val, new_b_val = sess.run([
latent_enc_tensors['latent_w_' + str(ind_from)],
latent_enc_tensors['latent_b_' + str(ind_from)]
])
update_ops.extend([
latent_enc_assign_ops['latent_w_' + str(ind_to)],
latent_enc_assign_ops['latent_b_' + str(ind_to)]
])
feed_dict.update({
latent_enc_phs['latent_w_' + str(ind_to)]: new_w_val,
latent_enc_phs['latent_b_' + str(ind_to)]: new_b_val
})
# Copy for cluster encoder softmax.
update_ops.extend([cluster_w_update_op, cluster_b_update_op])
feed_dict.update({ind_from_ph: ind_from, ind_to_ph: ind_to})
# Copy for latent prior.
update_ops.extend([mu_update_op, sigma_update_op])
feed_dict.update({ind_from_ph: ind_from, ind_to_ph: ind_to})
sess.run(update_ops, feed_dict)
def run_training(
dataset,
training_data_type,
n_concurrent_classes,
blend_classes,
train_supervised,
n_steps,
random_seed,
lr_init,
lr_factor,
lr_schedule,
output_type,
n_y,
n_y_active,
n_z,
encoder_kwargs,
decoder_kwargs,
dynamic_expansion,
ll_thresh,
classify_with_samples,
report_interval,
knn_values,
gen_replay_type,
use_supervised_replay):
"""Run training script.
Args:
dataset: str, name of the dataset.
training_data_type: str, type of training run ('iid' or 'sequential').
n_concurrent_classes: int, # of classes seen at a time (ignored for 'iid').
blend_classes: bool, whether to blend in samples from the next class.
train_supervised: bool, whether to use supervision during training.
n_steps: int, number of total training steps.
random_seed: int, seed for tf and numpy RNG.
lr_init: float, initial learning rate.
lr_factor: float, learning rate decay factor.
lr_schedule: float, epochs at which the decay should be applied.
output_type: str, output distribution (currently only 'bernoulli').
n_y: int, maximum possible dimensionality of discrete latent variable y.
n_y_active: int, starting dimensionality of discrete latent variable y.
n_z: int, dimensionality of continuous latent variable z.
encoder_kwargs: dict, parameters to specify encoder.
decoder_kwargs: dict, parameters to specify decoder.
dynamic_expansion: bool, whether to perform dynamic expansion.
ll_thresh: float, log-likelihood threshold below which to keep poor samples.
classify_with_samples: bool, whether to sample latents when classifying.
report_interval: int, number of steps after which to evaluate and report.
knn_values: list of ints, k values for different k-NN classifiers to run
(values of 3, 5, and 10 were used in different parts of the paper).
gen_replay_type: str, 'fixed', 'dynamic', or None.
use_supervised_replay: str, whether to use supervised replay (aka 'SMGR').
"""
# Set tf random seed.
tfc.set_random_seed(random_seed)
np.set_printoptions(precision=2, suppress=True)
# First set up the data source(s) and get dataset info.
if dataset == 'mnist':
batch_size = 100
test_batch_size = 1000
dataset_kwargs = {}
image_key = 'image'
label_key = 'label'
elif dataset == 'omniglot':
batch_size = 15
test_batch_size = 1318
dataset_kwargs = {}
image_key = 'image'
label_key = 'alphabet'
else:
raise NotImplementedError
dataset_ops = get_data_sources(dataset, dataset_kwargs, batch_size,
test_batch_size, training_data_type,
n_concurrent_classes, image_key, label_key)
train_data = dataset_ops.train_data
train_data_for_clf = dataset_ops.train_data_for_clf
valid_data = dataset_ops.valid_data
test_data = dataset_ops.test_data
output_shape = dataset_ops.ds_info.features[image_key].shape
n_x = np.prod(output_shape)
n_classes = dataset_ops.ds_info.features[label_key].num_classes
num_train_examples = dataset_ops.ds_info.splits['train'].num_examples
# Check that the number of classes is compatible with the training scenario
assert n_classes % n_concurrent_classes == 0
assert n_steps % (n_classes / n_concurrent_classes) == 0
# Set specific params depending on the type of gen replay
if gen_replay_type == 'fixed':
data_period = data_period = int(n_steps /
(n_classes / n_concurrent_classes))
gen_every_n = 2 # Blend in a gen replay batch every 2 steps
gen_refresh_period = data_period # How often to refresh the batches of
# generated data (equivalent to snapshotting a generative model)
gen_refresh_on_expansion = False # Don't refresh on dyn expansion
elif gen_replay_type == 'dynamic':
gen_every_n = 2 # Blend in a gen replay batch every 2 steps
gen_refresh_period = 1e8 # Never refresh generated data periodically
gen_refresh_on_expansion = True # Refresh on dyn expansion instead
elif gen_replay_type is None:
gen_every_n = 0 # Don't use any gen replay batches
gen_refresh_period = 1e8 # Never refresh generated data periodically
gen_refresh_on_expansion = False # Don't refresh on dyn expansion
else:
raise NotImplementedError
max_gen_batches = 5000 # Max num of gen batches (proxy for storing a model)
# Set dynamic expansion parameters
exp_wait_steps = 100 # Steps to wait after expansion before eligible again
exp_burn_in = 100 # Steps to wait at start of learning before eligible
exp_buffer_size = 100 # Size of the buffer of poorly explained data
num_buffer_train_steps = 10 # Num steps to train component on buffer
# Define a global tf variable for the number of active components.
n_y_active_np = n_y_active
n_y_active = tfc.get_variable(
initializer=tf.constant(n_y_active_np, dtype=tf.int32),
trainable=False,
name='n_y_active',
dtype=tf.int32)
logging.info('Starting CURL script on %s data.', dataset)
# Set up placeholders for training.
x_train_raw = tfc.placeholder(
dtype=tf.float32, shape=(batch_size,) + output_shape)
label_train = tfc.placeholder(dtype=tf.int32, shape=(batch_size,))
def binarize_fn(x):
"""Binarize a Bernoulli by rounding the probabilities.
Args:
x: tf tensor, input image.
Returns:
A tf tensor with the binarized image
"""
return tf.cast(tf.greater(x, 0.5 * tf.ones_like(x)), tf.float32)
if dataset == 'mnist':
x_train = binarize_fn(x_train_raw)
x_valid = binarize_fn(valid_data[image_key]) if valid_data else None
x_test = binarize_fn(test_data[image_key])
x_train_for_clf = binarize_fn(train_data_for_clf[image_key])
elif 'cifar' in dataset or dataset == 'omniglot':
x_train = x_train_raw
x_valid = valid_data[image_key] if valid_data else None
x_test = test_data[image_key]
x_train_for_clf = train_data_for_clf[image_key]
else:
raise ValueError('Unknown dataset {}'.format(dataset))
label_valid = valid_data[label_key] if valid_data else None
label_test = test_data[label_key]
# Set up CURL modules.
shared_encoder = model.SharedEncoder(name='shared_encoder', **encoder_kwargs)
latent_encoder = functools.partial(model.latent_encoder_fn, n_y=n_y, n_z=n_z)
latent_encoder = snt.Module(latent_encoder, name='latent_encoder')
latent_decoder = functools.partial(model.latent_decoder_fn, n_z=n_z)
latent_decoder = snt.Module(latent_decoder, name='latent_decoder')
cluster_encoder = functools.partial(
model.cluster_encoder_fn, n_y_active=n_y_active, n_y=n_y)
cluster_encoder = snt.Module(cluster_encoder, name='cluster_encoder')
data_decoder = functools.partial(
model.data_decoder_fn,
output_type=output_type,
output_shape=output_shape,
n_x=n_x,
n_y=n_y,
**decoder_kwargs)
data_decoder = snt.Module(data_decoder, name='data_decoder')
# Uniform prior over y.
prior_train_probs = utils.construct_prior_probs(batch_size, n_y, n_y_active)
prior_train = snt.Module(
lambda: tfp.distributions.OneHotCategorical(probs=prior_train_probs),
name='prior_unconditional_train')
prior_test_probs = utils.construct_prior_probs(test_batch_size, n_y,
n_y_active)
prior_test = snt.Module(
lambda: tfp.distributions.OneHotCategorical(probs=prior_test_probs),
name='prior_unconditional_test')
model_train = model.Curl(
prior_train,
latent_decoder,
data_decoder,
shared_encoder,
cluster_encoder,
latent_encoder,
n_y_active,
is_training=True,
name='curl_train')
model_eval = model.Curl(
prior_test,
latent_decoder,
data_decoder,
shared_encoder,
cluster_encoder,
latent_encoder,
n_y_active,
is_training=False,
name='curl_test')
# Set up training graph
y_train = label_train if train_supervised else None
y_valid = label_valid if train_supervised else None
y_test = label_test if train_supervised else None
train_ops = setup_training_and_eval_graphs(
x_train,
label_train,
y_train,
n_y,
model_train,
classify_with_samples,
is_training=True,
name='train')
hiddens_for_clf = model_eval.get_shared_rep(x_train_for_clf,
is_training=False)
cat_for_clf = model_eval.infer_cluster(hiddens_for_clf)
if classify_with_samples:
latents_for_clf = model_eval.infer_latent(
hiddens=hiddens_for_clf, y=tf.to_float(cat_for_clf.sample())).sample()
else:
latents_for_clf = model_eval.infer_latent(
hiddens=hiddens_for_clf, y=tf.to_float(cat_for_clf.mode())).mean()
# Set up validation graph
if valid_data is not None:
valid_ops = setup_training_and_eval_graphs(
x_valid,
label_valid,
y_valid,
n_y,
model_eval,
classify_with_samples,
is_training=False,
name='valid')
# Set up test graph
test_ops = setup_training_and_eval_graphs(
x_test,
label_test,
y_test,
n_y,
model_eval,
classify_with_samples,
is_training=False,
name='test')
# Set up optimizer (with scheduler).
global_step = tf.train.get_or_create_global_step()
lr_schedule = [
tf.cast(el * num_train_examples / batch_size, tf.int64)
for el in lr_schedule
]
num_schedule_steps = tf.reduce_sum(
tf.cast(global_step >= lr_schedule, tf.float32))
lr = float(lr_init) * float(lr_factor)**num_schedule_steps
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = optimizer.minimize(train_ops.elbo)
train_step_supervised = optimizer.minimize(train_ops.elbo_supervised)
# For dynamic expansion, we want to train only new-component-related params
cat_params = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
'cluster_encoder/mlp_cluster_encoder_final')
component_params = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
'latent_encoder/mlp_latent_encoder_*')
prior_params = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES,
'latent_decoder/latent_prior*')
train_step_expansion = optimizer.minimize(
train_ops.elbo_supervised,
var_list=cat_params+component_params+prior_params)
# Set up ops for generative replay
if gen_every_n > 0:
# How many generative batches will we use each period?
gen_buffer_size = min(
int(gen_refresh_period / gen_every_n), max_gen_batches)
# Class each sample should be drawn from (default to uniform prior)
y_gen = tfp.distributions.OneHotCategorical(
probs=np.ones((batch_size, n_y)) / n_y,
dtype=tf.float32,
name='extra_train_classes').sample()
gen_samples = model_train.sample(y=y_gen, mean=True)
if dataset == 'mnist' or dataset == 'omniglot':
gen_samples = binarize_fn(gen_samples)
# Set up ops to dynamically modify parameters (for dynamic expansion)
dynamic_ops = setup_dynamic_ops(n_y)
logging.info('Created computation graph.')
n_steps_per_class = n_steps / n_classes # pylint: disable=invalid-name
cumulative_component_counts = np.array([0] * n_y).astype(float)
recent_component_counts = np.array([0] * n_y).astype(float)
gen_buffer_ind = 0
# Buffer of poorly explained data (if we're doing dynamic expansion).
poor_data_buffer = []
poor_data_labels = []
all_full_poor_data_buffers = []
all_full_poor_data_labels = []
has_expanded = False
steps_since_expansion = 0
gen_buffer_ind = 0
eligible_for_expansion = False # Flag to ensure we wait a bit after expansion
# Set up basic ops to run and quantities to log.
ops_to_run = {
'train_ELBO': train_ops.elbo,
'train_log_p_x': train_ops.log_p_x,
'train_kl_y': train_ops.kl_y,
'train_kl_z': train_ops.kl_z,
'train_ll': train_ops.ll,
'train_batch_purity': train_ops.purity,
'train_probs': train_ops.cat_probs,
'n_y_active': n_y_active
}
if valid_data is not None:
valid_ops_to_run = {
'valid_ELBO': valid_ops.elbo,
'valid_kl_y': valid_ops.kl_y,
'valid_kl_z': valid_ops.kl_z,
'valid_confusion': valid_ops.confusion
}
else:
valid_ops_to_run = {}
test_ops_to_run = {
'test_ELBO': test_ops.elbo,
'test_kl_y': test_ops.kl_y,
'test_kl_z': test_ops.kl_z,
'test_confusion': test_ops.confusion
}
to_log = ['train_batch_purity']
to_log_eval = ['test_purity', 'test_ELBO', 'test_kl_y', 'test_kl_z']
if valid_data is not None:
to_log_eval += ['valid_ELBO', 'valid_purity']
if train_supervised:
# Track supervised losses, train on supervised loss.
ops_to_run.update({
'train_ELBO_supervised': train_ops.elbo_supervised,
'train_log_p_x_supervised': train_ops.log_p_x_supervised,
'train_kl_y_supervised': train_ops.kl_y_supervised,
'train_kl_z_supervised': train_ops.kl_z_supervised,
'train_ll_supervised': train_ops.ll_supervised
})
default_train_step = train_step_supervised
to_log += [
'train_ELBO_supervised', 'train_log_p_x_supervised',
'train_kl_y_supervised', 'train_kl_z_supervised'
]
else:
# Track unsupervised losses, train on unsupervised loss.
ops_to_run.update({
'train_ELBO': train_ops.elbo,
'train_kl_y': train_ops.kl_y,
'train_kl_z': train_ops.kl_z,
'train_ll': train_ops.ll
})
default_train_step = train_step
to_log += ['train_ELBO', 'train_kl_y', 'train_kl_z']
with tf.train.SingularMonitoredSession() as sess:
for step in range(n_steps):
feed_dict = {}
# Use the default training loss, but vary it each step depending on the
# training scenario (eg. for supervised gen replay, we alternate losses)
ops_to_run['train_step'] = default_train_step
### 1) PERIODICALLY TAKE SNAPSHOTS FOR GENERATIVE REPLAY ###
if (gen_refresh_period and step % gen_refresh_period == 0 and
gen_every_n > 0):
# First, increment cumulative count and reset recent probs count.
cumulative_component_counts += recent_component_counts
recent_component_counts = np.zeros(n_y)
# Generate enough samples for the rest of the next period
# (Functionally equivalent to storing and sampling from the model).
gen_buffer_images, gen_buffer_labels = get_generated_data(
sess=sess,
gen_op=gen_samples,
y_input=y_gen,
gen_buffer_size=gen_buffer_size,
component_counts=cumulative_component_counts)
### 2) DECIDE WHICH DATA SOURCE TO USE (GENERATIVE OR REAL DATA) ###
periodic_refresh_started = (
gen_refresh_period and step >= gen_refresh_period)
refresh_on_expansion_started = (gen_refresh_on_expansion and has_expanded)
if ((periodic_refresh_started or refresh_on_expansion_started) and
gen_every_n > 0 and step % gen_every_n == 1):
# Use generated data for the training batch
used_real_data = False
s = gen_buffer_ind * batch_size
e = (gen_buffer_ind + 1) * batch_size
gen_data_array = {
'image': gen_buffer_images[s:e],
'label': gen_buffer_labels[s:e]
}
gen_buffer_ind = (gen_buffer_ind + 1) % gen_buffer_size
# Feed it as x_train because it's already reshaped and binarized.
feed_dict.update({
x_train: gen_data_array['image'],
label_train: gen_data_array['label']
})
if use_supervised_replay:
# Convert label to one-hot before feeding in.
gen_label_onehot = np.eye(n_y)[gen_data_array['label']]
feed_dict.update({model_train.y_label: gen_label_onehot})
ops_to_run['train_step'] = train_step_supervised
else:
# Else use the standard training data sources.
used_real_data = True
# Select appropriate data source for iid or sequential setup.
if training_data_type == 'sequential':
current_data_period = int(
min(step / n_steps_per_class, len(train_data) - 1))
# If training supervised, set n_y_active directly based on how many
# classes have been seen
if train_supervised:
assert not dynamic_expansion
n_y_active_np = n_concurrent_classes * (
current_data_period // n_concurrent_classes +1)
n_y_active.load(n_y_active_np, sess)
train_data_array = sess.run(train_data[current_data_period])
# If we are blending classes, figure out where we are in the data
# period and add some fraction of other samples.
if blend_classes:
# If in the first quarter, blend in examples from the previous class
if (step % n_steps_per_class < n_steps_per_class / 4 and
current_data_period > 0):
other_train_data_array = sess.run(
train_data[current_data_period - 1])
num_other = int(
(n_steps_per_class / 2 - 2 *
(step % n_steps_per_class)) * batch_size / n_steps_per_class)
other_inds = np.random.permutation(batch_size)[:num_other]
train_data_array[image_key][:num_other] = other_train_data_array[
image_key][other_inds]
train_data_array[label_key][:num_other] = other_train_data_array[
label_key][other_inds]
# If in the last quarter, blend in examples from the next class
elif (step % n_steps_per_class > 3 * n_steps_per_class / 4 and
current_data_period < n_classes - 1):
other_train_data_array = sess.run(train_data[current_data_period +
1])
num_other = int(
(2 * (step % n_steps_per_class) - 3 * n_steps_per_class / 2) *
batch_size / n_steps_per_class)
other_inds = np.random.permutation(batch_size)[:num_other]
train_data_array[image_key][:num_other] = other_train_data_array[
image_key][other_inds]
train_data_array['label'][:num_other] = other_train_data_array[
label_key][other_inds]
# Otherwise, just use the current class
else:
train_data_array = sess.run(train_data)
feed_dict.update({
x_train_raw: train_data_array[image_key],
label_train: train_data_array[label_key]
})
### 3) PERFORM A GRADIENT STEP ###
results = sess.run(ops_to_run, feed_dict=feed_dict)
del results['train_step']
### 4) COMPUTE ADDITIONAL DIAGNOSTIC OPS ON VALIDATION/TEST SETS. ###
if (step+1) % report_interval == 0:
if valid_data is not None:
logging.info('Evaluating on validation and test set!')
proc_ops = {
k: (np.sum if 'confusion' in k
else np.mean) for k in valid_ops_to_run
}
results.update(
process_dataset(
dataset_ops.valid_iter,
valid_ops_to_run,
sess,
feed_dict=feed_dict,
processing_ops=proc_ops))
results['valid_purity'] = compute_purity(results['valid_confusion'])
else:
logging.info('Evaluating on test set!')
proc_ops = {
k: (np.sum if 'confusion' in k
else np.mean) for k in test_ops_to_run
}
results.update(process_dataset(dataset_ops.test_iter,
test_ops_to_run,
sess,
feed_dict=feed_dict,
processing_ops=proc_ops))
results['test_purity'] = compute_purity(results['test_confusion'])
curr_to_log = to_log + to_log_eval
else:
curr_to_log = list(to_log) # copy to prevent in-place modifications
### 5) DYNAMIC EXPANSION ###
if dynamic_expansion and used_real_data:
# If we're doing dynamic expansion and below max capacity then add
# poorly defined data points to a buffer.
# First check whether the model is eligible for expansion (the model
# becomes ineligible for a fixed time after each expansion, and when
# it has hit max capacity).
if (steps_since_expansion >= exp_wait_steps and step >= exp_burn_in and
n_y_active_np < n_y):
eligible_for_expansion = True
steps_since_expansion += 1
if eligible_for_expansion:
# Add poorly explained data samples to a buffer.
poor_inds = results['train_ll'] < ll_thresh
poor_data_buffer.extend(feed_dict[x_train_raw][poor_inds])
poor_data_labels.extend(feed_dict[label_train][poor_inds])
n_poor_data = len(poor_data_buffer)
# If buffer is big enough, then add a new component and train just the
# new component with several steps of gradient descent.
# (We just feed in a onehot cluster vector to indicate which
# component).
if n_poor_data >= exp_buffer_size:
# Dump the buffers so we can log them.
all_full_poor_data_buffers.append(poor_data_buffer)
all_full_poor_data_labels.append(poor_data_labels)
# Take a new generative snapshot if specified.
if gen_refresh_on_expansion and gen_every_n > 0:
# Increment cumulative count and reset recent probs count.
cumulative_component_counts += recent_component_counts
recent_component_counts = np.zeros(n_y)
gen_buffer_images, gen_buffer_labels = get_generated_data(
sess=sess,
gen_op=gen_samples,
y_input=y_gen,
gen_buffer_size=gen_buffer_size,
component_counts=cumulative_component_counts)
# Cull to a multiple of batch_size (keep the later data samples).
n_poor_batches = int(n_poor_data / batch_size)
poor_data_buffer = poor_data_buffer[-(n_poor_batches * batch_size):]
poor_data_labels = poor_data_labels[-(n_poor_batches * batch_size):]
# Find most probable component (on poor batch).
poor_cprobs = []
for bs in range(n_poor_batches):
poor_cprobs.append(
sess.run(
train_ops.cat_probs,
feed_dict={
x_train_raw:
poor_data_buffer[bs * batch_size:(bs + 1) *
batch_size]
}))
best_cluster = np.argmax(np.sum(np.vstack(poor_cprobs), axis=0))
# Initialize parameters of the new component from most prob
# existing.
new_cluster = n_y_active_np
copy_component_params(best_cluster, new_cluster, sess,
**dynamic_ops)
# Increment mixture component count n_y_active.
n_y_active_np += 1
n_y_active.load(n_y_active_np, sess)
# Perform a number of steps of gradient descent on the data buffer,
# training only the new component (supervised loss).
for _ in range(num_buffer_train_steps):
for bs in range(n_poor_batches):
x_batch = poor_data_buffer[bs * batch_size:(bs + 1) *
batch_size]
label_batch = [new_cluster] * batch_size
label_onehot_batch = np.eye(n_y)[label_batch]
_ = sess.run(
train_step_expansion,
feed_dict={
x_train_raw: x_batch,
model_train.y_label: label_onehot_batch
})
# Empty the buffer.
poor_data_buffer = []
poor_data_labels = []
# Reset the threshold flag so we have a burn in before the next
# component.
eligible_for_expansion = False
has_expanded = True
steps_since_expansion = 0
# Accumulate counts.
if used_real_data:
train_cat_probs_vals = results['train_probs']
recent_component_counts += np.sum(
train_cat_probs_vals, axis=0).astype(float)
### 6) LOGGING AND EVALUATION ###
cleanup_for_print = lambda x: ', {}: %.{}f'.format(
x.capitalize().replace('_', ' '), 3)
log_str = 'Iteration %d'
log_str += ''.join([cleanup_for_print(el) for el in curr_to_log])
log_str += ' n_active: %d'
logging.info(
log_str,
*([step] + [results[el] for el in curr_to_log] + [n_y_active_np]))
# Periodically perform evaluation
if (step + 1) % report_interval == 0:
# Report test purity and related measures
logging.info(
'Iteration %d, Test purity: %.3f, Test ELBO: %.3f, Test '
'KLy: %.3f, Test KLz: %.3f', step, results['test_purity'],
results['test_ELBO'], results['test_kl_y'], results['test_kl_z'])
# Flush data only once in a while to allow buffering of data for more
# efficient writes.
results['all_full_poor_data_buffers'] = all_full_poor_data_buffers
results['all_full_poor_data_labels'] = all_full_poor_data_labels
logging.info('Also training a classifier in latent space')
# Perform knn classification from latents, to evaluate discriminability.
# Get and encode training and test datasets.
clf_train_vals = process_dataset(
dataset_ops.train_iter_for_clf, {
'latents': latents_for_clf,
'labels': train_data_for_clf[label_key]
},
sess,
feed_dict,
aggregation_ops=np.concatenate)
clf_test_vals = process_dataset(
dataset_ops.test_iter, {
'latents': test_ops.latents,
'labels': test_data[label_key]
},
sess,
aggregation_ops=np.concatenate)
# Perform knn classification.
knn_models = []
for nval in knn_values:
# Fit training dataset.
clf = neighbors.KNeighborsClassifier(n_neighbors=nval)
clf.fit(clf_train_vals['latents'], clf_train_vals['labels'])
knn_models.append(clf)
results['train_' + str(nval) + 'nn_acc'] = clf.score(
clf_train_vals['latents'], clf_train_vals['labels'])
# Get test performance.
results['test_' + str(nval) + 'nn_acc'] = clf.score(
clf_test_vals['latents'], clf_test_vals['labels'])
logging.info(
'Iteration %d %d-NN classifier accuracies, Training: '
'%.3f, Test: %.3f', step, nval,
results['train_' + str(nval) + 'nn_acc'],
results['test_' + str(nval) + 'nn_acc'])
| deepmind-research-master | curl/training.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WideResNet and PreActResNet implementations in PyTorch."""
from typing import Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
CIFAR10_MEAN = (0.4914, 0.4822, 0.4465)
CIFAR10_STD = (0.2471, 0.2435, 0.2616)
CIFAR100_MEAN = (0.5071, 0.4865, 0.4409)
CIFAR100_STD = (0.2673, 0.2564, 0.2762)
class _Swish(torch.autograd.Function):
"""Custom implementation of swish."""
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class Swish(nn.Module):
"""Module using custom implementation."""
def forward(self, input_tensor):
return _Swish.apply(input_tensor)
class _Block(nn.Module):
"""WideResNet Block."""
def __init__(self, in_planes, out_planes, stride, activation_fn=nn.ReLU):
super().__init__()
self.batchnorm_0 = nn.BatchNorm2d(in_planes)
self.relu_0 = activation_fn()
# We manually pad to obtain the same effect as `SAME` (necessary when
# `stride` is different than 1).
self.conv_0 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=0, bias=False)
self.batchnorm_1 = nn.BatchNorm2d(out_planes)
self.relu_1 = activation_fn()
self.conv_1 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.has_shortcut = in_planes != out_planes
if self.has_shortcut:
self.shortcut = nn.Conv2d(in_planes, out_planes, kernel_size=1,
stride=stride, padding=0, bias=False)
else:
self.shortcut = None
self._stride = stride
def forward(self, x):
if self.has_shortcut:
x = self.relu_0(self.batchnorm_0(x))
else:
out = self.relu_0(self.batchnorm_0(x))
v = x if self.has_shortcut else out
if self._stride == 1:
v = F.pad(v, (1, 1, 1, 1))
elif self._stride == 2:
v = F.pad(v, (0, 1, 0, 1))
else:
raise ValueError('Unsupported `stride`.')
out = self.conv_0(v)
out = self.relu_1(self.batchnorm_1(out))
out = self.conv_1(out)
out = torch.add(self.shortcut(x) if self.has_shortcut else x, out)
return out
class _BlockGroup(nn.Module):
"""WideResNet block group."""
def __init__(self, num_blocks, in_planes, out_planes, stride,
activation_fn=nn.ReLU):
super().__init__()
block = []
for i in range(num_blocks):
block.append(
_Block(i == 0 and in_planes or out_planes,
out_planes,
i == 0 and stride or 1,
activation_fn=activation_fn))
self.block = nn.Sequential(*block)
def forward(self, x):
return self.block(x)
class WideResNet(nn.Module):
"""WideResNet."""
def __init__(self,
num_classes: int = 10,
depth: int = 28,
width: int = 10,
activation_fn: nn.Module = nn.ReLU,
mean: Union[Tuple[float, ...], float] = CIFAR10_MEAN,
std: Union[Tuple[float, ...], float] = CIFAR10_STD,
padding: int = 0,
num_input_channels: int = 3):
super().__init__()
self.mean = torch.tensor(mean).view(num_input_channels, 1, 1)
self.std = torch.tensor(std).view(num_input_channels, 1, 1)
self.mean_cuda = None
self.std_cuda = None
self.padding = padding
num_channels = [16, 16 * width, 32 * width, 64 * width]
assert (depth - 4) % 6 == 0
num_blocks = (depth - 4) // 6
self.init_conv = nn.Conv2d(num_input_channels, num_channels[0],
kernel_size=3, stride=1, padding=1, bias=False)
self.layer = nn.Sequential(
_BlockGroup(num_blocks, num_channels[0], num_channels[1], 1,
activation_fn=activation_fn),
_BlockGroup(num_blocks, num_channels[1], num_channels[2], 2,
activation_fn=activation_fn),
_BlockGroup(num_blocks, num_channels[2], num_channels[3], 2,
activation_fn=activation_fn))
self.batchnorm = nn.BatchNorm2d(num_channels[3])
self.relu = activation_fn()
self.logits = nn.Linear(num_channels[3], num_classes)
self.num_channels = num_channels[3]
def forward(self, x):
if self.padding > 0:
x = F.pad(x, (self.padding,) * 4)
if x.is_cuda:
if self.mean_cuda is None:
self.mean_cuda = self.mean.cuda()
self.std_cuda = self.std.cuda()
out = (x - self.mean_cuda) / self.std_cuda
else:
out = (x - self.mean) / self.std
out = self.init_conv(out)
out = self.layer(out)
out = self.relu(self.batchnorm(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.num_channels)
return self.logits(out)
class _PreActBlock(nn.Module):
"""Pre-activation ResNet Block."""
def __init__(self, in_planes, out_planes, stride, activation_fn=nn.ReLU):
super().__init__()
self._stride = stride
self.batchnorm_0 = nn.BatchNorm2d(in_planes)
self.relu_0 = activation_fn()
# We manually pad to obtain the same effect as `SAME` (necessary when
# `stride` is different than 1).
self.conv_2d_1 = nn.Conv2d(in_planes, out_planes, kernel_size=3,
stride=stride, padding=0, bias=False)
self.batchnorm_1 = nn.BatchNorm2d(out_planes)
self.relu_1 = activation_fn()
self.conv_2d_2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.has_shortcut = stride != 1 or in_planes != out_planes
if self.has_shortcut:
self.shortcut = nn.Conv2d(in_planes, out_planes, kernel_size=3,
stride=stride, padding=0, bias=False)
def _pad(self, x):
if self._stride == 1:
x = F.pad(x, (1, 1, 1, 1))
elif self._stride == 2:
x = F.pad(x, (0, 1, 0, 1))
else:
raise ValueError('Unsupported `stride`.')
return x
def forward(self, x):
out = self.relu_0(self.batchnorm_0(x))
shortcut = self.shortcut(self._pad(x)) if self.has_shortcut else x
out = self.conv_2d_1(self._pad(out))
out = self.conv_2d_2(self.relu_1(self.batchnorm_1(out)))
return out + shortcut
class PreActResNet(nn.Module):
"""Pre-activation ResNet."""
def __init__(self,
num_classes: int = 10,
depth: int = 18,
width: int = 0, # Used to make the constructor consistent.
activation_fn: nn.Module = nn.ReLU,
mean: Union[Tuple[float, ...], float] = CIFAR10_MEAN,
std: Union[Tuple[float, ...], float] = CIFAR10_STD,
padding: int = 0,
num_input_channels: int = 3):
super().__init__()
if width != 0:
raise ValueError('Unsupported `width`.')
self.mean = torch.tensor(mean).view(num_input_channels, 1, 1)
self.std = torch.tensor(std).view(num_input_channels, 1, 1)
self.mean_cuda = None
self.std_cuda = None
self.padding = padding
self.conv_2d = nn.Conv2d(num_input_channels, 64, kernel_size=3, stride=1,
padding=1, bias=False)
if depth == 18:
num_blocks = (2, 2, 2, 2)
elif depth == 34:
num_blocks = (3, 4, 6, 3)
else:
raise ValueError('Unsupported `depth`.')
self.layer_0 = self._make_layer(64, 64, num_blocks[0], 1, activation_fn)
self.layer_1 = self._make_layer(64, 128, num_blocks[1], 2, activation_fn)
self.layer_2 = self._make_layer(128, 256, num_blocks[2], 2, activation_fn)
self.layer_3 = self._make_layer(256, 512, num_blocks[3], 2, activation_fn)
self.batchnorm = nn.BatchNorm2d(512)
self.relu = activation_fn()
self.logits = nn.Linear(512, num_classes)
def _make_layer(self, in_planes, out_planes, num_blocks, stride,
activation_fn):
layers = []
for i, stride in enumerate([stride] + [1] * (num_blocks - 1)):
layers.append(
_PreActBlock(i == 0 and in_planes or out_planes,
out_planes,
stride,
activation_fn))
return nn.Sequential(*layers)
def forward(self, x):
if self.padding > 0:
x = F.pad(x, (self.padding,) * 4)
if x.is_cuda:
if self.mean_cuda is None:
self.mean_cuda = self.mean.cuda()
self.std_cuda = self.std.cuda()
out = (x - self.mean_cuda) / self.std_cuda
else:
out = (x - self.mean) / self.std
out = self.conv_2d(out)
out = self.layer_0(out)
out = self.layer_1(out)
out = self.layer_2(out)
out = self.layer_3(out)
out = self.relu(self.batchnorm(out))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
return self.logits(out)
| deepmind-research-master | adversarial_robustness/pytorch/model_zoo.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluates a PyTorch checkpoint on CIFAR-10/100 or MNIST."""
from absl import app
from absl import flags
import torch
from torch.utils import data
from torchvision import datasets
from torchvision import transforms
import tqdm
from adversarial_robustness.pytorch import model_zoo
_CKPT = flags.DEFINE_string(
'ckpt', None, 'Path to checkpoint.')
_DATASET = flags.DEFINE_enum(
'dataset', 'cifar10', ['cifar10', 'cifar100', 'mnist'],
'Dataset on which the checkpoint is evaluated.')
_WIDTH = flags.DEFINE_integer(
'width', 16, 'Width of WideResNet (if set to zero uses a PreActResNet).')
_DEPTH = flags.DEFINE_integer(
'depth', 70, 'Depth of WideResNet or PreActResNet.')
_USE_CUDA = flags.DEFINE_boolean(
'use_cuda', True, 'Whether to use CUDA.')
_BATCH_SIZE = flags.DEFINE_integer(
'batch_size', 100, 'Batch size.')
_NUM_BATCHES = flags.DEFINE_integer(
'num_batches', 0,
'Number of batches to evaluate (zero means the whole dataset).')
def main(unused_argv):
print(f'Loading "{_CKPT.value}"')
# Create model and dataset.
if _WIDTH.value == 0:
print(f'Using a PreActResNet with depth {_DEPTH.value}.')
model_ctor = model_zoo.PreActResNet
else:
print(f'Using a WideResNet with depth {_DEPTH.value} and width '
f'{_WIDTH.value}.')
model_ctor = model_zoo.WideResNet
if _DATASET.value == 'mnist':
model = model_ctor(
num_classes=10, depth=_DEPTH.value, width=_WIDTH.value,
activation_fn=model_zoo.Swish, mean=.5, std=.5, padding=2,
num_input_channels=1)
dataset_fn = datasets.MNIST
elif _DATASET.value == 'cifar10':
model = model_ctor(
num_classes=10, depth=_DEPTH.value, width=_WIDTH.value,
activation_fn=model_zoo.Swish, mean=model_zoo.CIFAR10_MEAN,
std=model_zoo.CIFAR10_STD)
dataset_fn = datasets.CIFAR10
else:
assert _DATASET.value == 'cifar100'
model = model_ctor(
num_classes=100, depth=_DEPTH.value, width=_WIDTH.value,
activation_fn=model_zoo.Swish, mean=model_zoo.CIFAR100_MEAN,
std=model_zoo.CIFAR100_STD)
dataset_fn = datasets.CIFAR100
# Load model.
if _CKPT.value != 'dummy':
params = torch.load(_CKPT.value)
model.load_state_dict(params)
if _USE_CUDA.value:
model.cuda()
model.eval()
print('Successfully loaded.')
# Load dataset.
transform_chain = transforms.Compose([transforms.ToTensor()])
ds = dataset_fn(root='/tmp/data', train=False, transform=transform_chain,
download=True)
test_loader = data.DataLoader(ds, batch_size=_BATCH_SIZE.value, shuffle=False,
num_workers=0)
# Evaluation.
correct = 0
total = 0
batch_count = 0
total_batches = min((10_000 - 1) // _BATCH_SIZE.value + 1, _NUM_BATCHES.value)
with torch.no_grad():
for images, labels in tqdm.tqdm(test_loader, total=total_batches):
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
batch_count += 1
if _NUM_BATCHES.value > 0 and batch_count >= _NUM_BATCHES.value:
break
print(f'Accuracy on the {total} test images: {100 * correct / total:.2f}%')
if __name__ == '__main__':
flags.mark_flag_as_required('ckpt')
app.run(main)
| deepmind-research-master | adversarial_robustness/pytorch/eval.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WideResNet implementation in JAX using Haiku."""
from typing import Any, Dict, Optional
import chex
import haiku as hk
import jax
import jax.numpy as jnp
class _WideResNetBlock(hk.Module):
"""Block of a WideResNet."""
def __init__(self, num_filters, stride=1, projection_shortcut=False,
activation=jax.nn.relu, norm_args=None, name=None):
super().__init__(name=name)
num_bottleneck_layers = 1
self._activation = activation
if norm_args is None:
norm_args = {
'create_offset': False,
'create_scale': True,
'decay_rate': .99,
}
self._bn_modules = []
self._conv_modules = []
for i in range(num_bottleneck_layers + 1):
s = stride if i == 0 else 1
self._bn_modules.append(hk.BatchNorm(
name='batchnorm_{}'.format(i),
**norm_args))
self._conv_modules.append(hk.Conv2D(
output_channels=num_filters,
padding='SAME',
kernel_shape=(3, 3),
stride=s,
with_bias=False,
name='conv_{}'.format(i))) # pytype: disable=not-callable
if projection_shortcut:
self._shortcut = hk.Conv2D(
output_channels=num_filters,
kernel_shape=(1, 1),
stride=stride,
with_bias=False,
name='shortcut') # pytype: disable=not-callable
else:
self._shortcut = None
def __call__(self, inputs, **norm_kwargs):
x = inputs
orig_x = inputs
for i, (bn, conv) in enumerate(zip(self._bn_modules, self._conv_modules)):
x = bn(x, **norm_kwargs)
x = self._activation(x)
if self._shortcut is not None and i == 0:
orig_x = x
x = conv(x)
if self._shortcut is not None:
shortcut_x = self._shortcut(orig_x)
x += shortcut_x
else:
x += orig_x
return x
class WideResNet(hk.Module):
"""WideResNet designed for CIFAR-10."""
def __init__(self,
num_classes: int = 10,
depth: int = 28,
width: int = 10,
activation: str = 'relu',
norm_args: Optional[Dict[str, Any]] = None,
name: Optional[str] = None):
super(WideResNet, self).__init__(name=name)
if (depth - 4) % 6 != 0:
raise ValueError('depth should be 6n+4.')
self._activation = getattr(jax.nn, activation)
if norm_args is None:
norm_args = {
'create_offset': True,
'create_scale': True,
'decay_rate': .99,
}
self._conv = hk.Conv2D(
output_channels=16,
kernel_shape=(3, 3),
stride=1,
with_bias=False,
name='init_conv') # pytype: disable=not-callable
self._bn = hk.BatchNorm(
name='batchnorm',
**norm_args)
self._linear = hk.Linear(
num_classes,
w_init=jnp.zeros,
name='logits')
blocks_per_layer = (depth - 4) // 6
filter_sizes = [width * n for n in [16, 32, 64]]
self._blocks = []
for layer_num, filter_size in enumerate(filter_sizes):
blocks_of_layer = []
for i in range(blocks_per_layer):
stride = 2 if (layer_num != 0 and i == 0) else 1
projection_shortcut = (i == 0)
blocks_of_layer.append(_WideResNetBlock(
num_filters=filter_size,
stride=stride,
projection_shortcut=projection_shortcut,
activation=self._activation,
norm_args=norm_args,
name='resnet_lay_{}_block_{}'.format(layer_num, i)))
self._blocks.append(blocks_of_layer)
def __call__(self, inputs: chex.Array, **norm_kwargs) -> chex.Array:
net = inputs
net = self._conv(net)
# Blocks.
for blocks_of_layer in self._blocks:
for block in blocks_of_layer:
net = block(net, **norm_kwargs)
net = self._bn(net, **norm_kwargs)
net = self._activation(net)
net = jnp.mean(net, axis=[1, 2])
return self._linear(net)
| deepmind-research-master | adversarial_robustness/jax/model_zoo.py |
# Copyright 2021 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quick script to test that experiment can import and run."""
from absl import app
import jax
import jax.numpy as jnp
from jaxline import utils as jl_utils
from adversarial_robustness.jax import experiment
@jl_utils.disable_pmap_jit
def test_experiment(unused_argv):
"""Tests the main experiment."""
config = experiment.get_config()
exp_config = config.experiment_kwargs.config
exp_config.dry_run = True
exp_config.emulated_workers = 0
exp_config.training.batch_size = 2
exp_config.evaluation.batch_size = 2
exp_config.model.kwargs.depth = 10
exp_config.model.kwargs.width = 1
xp = experiment.Experiment('train', exp_config, jax.random.PRNGKey(0))
bcast = jax.pmap(lambda x: x)
global_step = bcast(jnp.zeros(jax.local_device_count()))
rng = bcast(jnp.stack([jax.random.PRNGKey(0)] * jax.local_device_count()))
print('Taking a single experiment step for test purposes!')
result = xp.step(global_step, rng)
print(f'Step successfully taken, resulting metrics are {result}')
if __name__ == '__main__':
app.run(test_experiment)
| deepmind-research-master | adversarial_robustness/jax/experiment_test.py |
# Copyright 2021 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datasets."""
from typing import Sequence
import chex
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
_CIFAR10_MEAN = (0.4914, 0.4822, 0.4465)
_CIFAR10_STD = (0.2471, 0.2435, 0.2616)
_CIFAR100_MEAN = (0.5071, 0.4865, 0.4409)
_CIFAR100_STD = (0.2673, 0.2564, 0.2762)
_DATA_URL = 'https://storage.googleapis.com/dm-adversarial-robustness/'
_ALLOWED_FILES = ('cifar10_ddpm.npz',)
_WEBPAGE = ('https://github.com/deepmind/deepmind-research/tree/master/'
'adversarial_robustness')
def cifar10_preprocess(mode: str = 'train'):
"""Preprocessing functions for CIFAR-10."""
def _preprocess_fn_train(example):
"""Preprocessing of CIFAR-10 images for training."""
image = example['image']
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = _random_jitter(image, pad=4, crop=32)
image = tf.image.random_flip_left_right(image)
label = tf.cast(example['label'], tf.int32)
return {'image': image, 'label': label}
def _preprocess_fn_test(example):
"""Preprocessing of CIFAR-10 images for testing."""
image = example['image']
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
label = tf.cast(example['label'], tf.int32)
return {'image': image, 'label': label}
return _preprocess_fn_train if mode == 'train' else _preprocess_fn_test
def cifar10_normalize(image: chex.Array) -> chex.Array:
means = jnp.array(_CIFAR10_MEAN, dtype=image.dtype)
stds = jnp.array(_CIFAR10_STD, dtype=image.dtype)
return (image - means) / stds
def mnist_normalize(image: chex.Array) -> chex.Array:
image = jnp.pad(image, ((0, 0), (2, 2), (2, 2), (0, 0)), 'constant',
constant_values=0)
return (image - .5) * 2.
def cifar100_normalize(image: chex.Array) -> chex.Array:
means = jnp.array(_CIFAR100_MEAN, dtype=image.dtype)
stds = jnp.array(_CIFAR100_STD, dtype=image.dtype)
return (image - means) / stds
def load_cifar10(batch_sizes: Sequence[int],
subset: str = 'train',
is_training: bool = True,
drop_remainder: bool = True,
repeat: int = 1) -> tf.data.Dataset:
"""Loads CIFAR-10."""
if subset == 'train':
ds = tfds.load(name='cifar10', split=tfds.Split.TRAIN)
# In Gowal et al. (https://arxiv.org/abs/2010.03593) and Rebuffi et al.
# (https://arxiv.org/abs/2103.01946), we also keep a separate validation
# subset for early stopping and would run: ds = ds.skip(1_024).
elif subset == 'test':
ds = tfds.load(name='cifar10', split=tfds.Split.TEST)
else:
raise ValueError('Unknown subset: "{}"'.format(subset))
ds = ds.cache()
if is_training:
ds = ds.repeat()
ds = ds.shuffle(buffer_size=50_000, seed=0)
ds = _repeat_batch(batch_sizes, ds, repeat=repeat)
ds = ds.map(cifar10_preprocess('train' if is_training else 'test'),
num_parallel_calls=tf.data.AUTOTUNE)
for batch_size in reversed(batch_sizes):
ds = ds.batch(batch_size, drop_remainder=drop_remainder)
return ds.prefetch(tf.data.AUTOTUNE)
def load_extra(batch_sizes: Sequence[int],
path_npz: str,
is_training: bool = True,
drop_remainder: bool = True) -> tf.data.Dataset:
"""Loads extra data from a given path."""
if not tf.io.gfile.exists(path_npz):
if path_npz in _ALLOWED_FILES:
path_npz = tf.keras.utils.get_file(path_npz, _DATA_URL + path_npz)
else:
raise ValueError(f'Extra data not found ({path_npz}). See {_WEBPAGE} for '
'more details.')
with tf.io.gfile.GFile(path_npz, 'rb') as fp:
npzfile = np.load(fp)
data = {'image': npzfile['image'], 'label': npzfile['label']}
with tf.device('/device:cpu:0'): # Prevent allocation to happen on GPU.
ds = tf.data.Dataset.from_tensor_slices(data)
ds = ds.cache()
if is_training:
ds = ds.repeat()
ds = ds.shuffle(buffer_size=50_000, seed=jax.host_id())
ds = ds.map(cifar10_preprocess('train' if is_training else 'test'),
num_parallel_calls=tf.data.AUTOTUNE)
for batch_size in reversed(batch_sizes):
ds = ds.batch(batch_size, drop_remainder=drop_remainder)
return ds.prefetch(tf.data.AUTOTUNE)
def load_dummy_data(batch_sizes: Sequence[int],
is_training: bool = True,
**unused_kwargs) -> tf.data.Dataset:
"""Loads fictive data (use this function when testing)."""
ds = tf.data.Dataset.from_tensor_slices({
'image': np.zeros((1, 32, 32, 3), np.float32),
'label': np.zeros((1,), np.int32),
})
ds = ds.repeat()
if not is_training:
total_batch_size = np.prod(batch_sizes)
ds = ds.take(total_batch_size)
ds = ds.map(cifar10_preprocess('train' if is_training else 'test'),
num_parallel_calls=tf.data.AUTOTUNE)
for batch_size in reversed(batch_sizes):
ds = ds.batch(batch_size, drop_remainder=True)
return ds.prefetch(tf.data.AUTOTUNE)
def _random_jitter(image: tf.Tensor, pad: int, crop: int) -> tf.Tensor:
shape = image.shape.as_list()
image = tf.pad(image, [[pad, pad], [pad, pad], [0, 0]])
image = tf.image.random_crop(image, size=[crop, crop, shape[2]])
return image
def _repeat_batch(batch_sizes: Sequence[int],
ds: tf.data.Dataset,
repeat: int = 1) -> tf.data.Dataset:
"""Tiles the inner most batch dimension."""
if repeat <= 1:
return ds
if batch_sizes[-1] % repeat != 0:
raise ValueError(f'The last element of `batch_sizes` ({batch_sizes}) must '
f'be divisible by `repeat` ({repeat}).')
# Perform regular batching with reduced number of elements.
for i, batch_size in enumerate(reversed(batch_sizes)):
ds = ds.batch(batch_size // repeat if i == 0 else batch_size,
drop_remainder=True)
# Repeat batch.
fn = lambda x: tf.repeat(x, repeats=repeat, axis=len(batch_sizes) - 1)
def repeat_inner_batch(example):
return jax.tree_map(fn, example)
ds = ds.map(repeat_inner_batch,
num_parallel_calls=tf.data.AUTOTUNE)
# Unbatch.
for _ in batch_sizes:
ds = ds.unbatch()
return ds
| deepmind-research-master | adversarial_robustness/jax/datasets.py |
# Copyright 2021 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAXline experiment to perform robust adversarial training."""
import functools
import os
from typing import Callable, Optional, Tuple
from absl import flags
from absl import logging
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from jaxline import base_config
from jaxline import experiment
from jaxline import utils as jl_utils
from ml_collections import config_dict
import numpy as np
import optax
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
from adversarial_robustness.jax import attacks
from adversarial_robustness.jax import datasets
from adversarial_robustness.jax import model_zoo
from adversarial_robustness.jax import utils
FLAGS = flags.FLAGS
def get_config():
"""Return config object for training."""
config = base_config.get_base_config()
# Batch size, training steps and data.
num_classes = 10
num_epochs = 400
# Gowal et al. (2020) and Rebuffi et al. (2021) use 1024 as batch size.
# Reducing this batch size may require further adjustments to the batch
# normalization decay or the learning rate. If you have to use a batch size
# of 256, reduce the number of emulated workers to 1 (it should match the
# results of using a batch size of 1024 with 4 workers).
train_batch_size = 1024
def steps_from_epochs(n):
return max(int(n * 50_000 / train_batch_size), 1)
num_steps = steps_from_epochs(num_epochs)
test_batch_size = train_batch_size
# Specify the path to the downloaded data. You can download data from
# https://github.com/deepmind/deepmind-research/tree/master/adversarial_robustness.
# If the path is set to "cifar10_ddpm.npz" and is not found in the current
# directory, the corresponding data will be downloaded.
extra_npz = 'cifar10_ddpm.npz' # Can be `None`.
# Learning rate.
learning_rate = .1 * max(train_batch_size / 256, 1.)
learning_rate_warmup = steps_from_epochs(10)
use_cosine_schedule = True
if use_cosine_schedule:
learning_rate_fn = utils.get_cosine_schedule(learning_rate, num_steps,
learning_rate_warmup)
else:
learning_rate_fn = utils.get_step_schedule(learning_rate, num_steps,
learning_rate_warmup)
# Model definition.
model_ctor = model_zoo.WideResNet
model_kwargs = dict(
num_classes=num_classes,
depth=28,
width=10,
activation='swish')
# Attack used during training (can be None).
epsilon = 8 / 255
train_attack = attacks.UntargetedAttack(
attacks.PGD(
attacks.Adam(optax.piecewise_constant_schedule(
init_value=.1,
boundaries_and_scales={5: .1})),
num_steps=10,
initialize_fn=attacks.linf_initialize_fn(epsilon),
project_fn=attacks.linf_project_fn(epsilon, bounds=(0., 1.))),
loss_fn=attacks.untargeted_kl_divergence)
# Attack used during evaluation (can be None).
eval_attack = attacks.UntargetedAttack(
attacks.PGD(
attacks.Adam(learning_rate_fn=optax.piecewise_constant_schedule(
init_value=.1,
boundaries_and_scales={20: .1, 30: .01})),
num_steps=40,
initialize_fn=attacks.linf_initialize_fn(epsilon),
project_fn=attacks.linf_project_fn(epsilon, bounds=(0., 1.))),
loss_fn=attacks.untargeted_margin)
config.experiment_kwargs = config_dict.ConfigDict(dict(config=dict(
epsilon=epsilon,
num_classes=num_classes,
# Results from various publications use 4 worker machines, which results
# in slight differences when using less worker machines. To compensate for
# such discrepancies, we emulate these additional workers. Set to zero,
# when using more than 4 workers.
emulated_workers=4,
dry_run=False,
save_final_checkpoint_as_npy=True,
model=dict(
constructor=model_ctor,
kwargs=model_kwargs),
training=dict(
batch_size=train_batch_size,
learning_rate=learning_rate_fn,
weight_decay=5e-4,
swa_decay=.995,
use_cutmix=False,
supervised_batch_ratio=.3 if extra_npz is not None else 1.,
extra_data_path=extra_npz,
extra_label_smoothing=.1,
attack=train_attack),
evaluation=dict(
# If `interval` is positive, synchronously evaluate at regular
# intervals. Setting it to zero will not evaluate while training,
# unless `--jaxline_mode` is set to `train_eval_multithreaded`, which
# asynchronously evaluates checkpoints.
interval=steps_from_epochs(40),
batch_size=test_batch_size,
attack=eval_attack),
)))
config.checkpoint_dir = '/tmp/jaxline/robust'
config.train_checkpoint_all_hosts = False
config.training_steps = num_steps
config.interval_type = 'steps'
config.log_train_data_interval = steps_from_epochs(.5)
config.log_tensors_interval = steps_from_epochs(.5)
config.save_checkpoint_interval = steps_from_epochs(40)
config.eval_specific_checkpoint_dir = ''
return config
class Experiment(experiment.AbstractExperiment):
"""CIFAR-10 experiment."""
CHECKPOINT_ATTRS = {
'_params': 'params',
'_avg_params': 'avg_params',
'_opt_state': 'opt_state',
'_state': 'state',
}
def __init__(self, mode, config, init_rng):
super().__init__(mode=mode)
self.config = config
self._params = None # Network weights.
self._avg_params = None # Averaged network weights.
self._state = None # Network state (e.g., batch statistics).
self._opt_state = None # Optimizer state.
# Build model.
self.model = hk.transform_with_state(self._get_model())
if mode == 'train':
self._initialize_training(init_rng)
if self.config.evaluation.interval > 0:
self._last_evaluation_scalars = {}
self._initialize_evaluation()
elif mode == 'eval':
self._initialize_evaluation()
elif mode == 'train_eval_multithreaded':
self._initialize_training(init_rng)
self._initialize_evaluation()
else:
raise ValueError(f'Unknown mode: "{mode}"')
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(self, global_step, rng, *unused_args, **unused_kwargs):
# Get next inputs.
supervised_inputs = next(self.supervised_train_input)
if self.extra_train_input is None:
extra_inputs = None
else:
extra_inputs = next(self.extra_train_input)
# Perform step.
(self._params, self._avg_params, self._state, self._opt_state,
scalars) = self.train_fn(
params=self._params,
avg_params=self._avg_params,
state=self._state,
opt_state=self._opt_state,
global_step=global_step,
supervised_inputs=supervised_inputs,
extra_inputs=extra_inputs,
rng=rng)
scalars = jl_utils.get_first(scalars)
# Save final checkpoint.
if self.config.save_final_checkpoint_as_npy and not self.config.dry_run:
global_step_value = jl_utils.get_first(global_step)
if global_step_value == FLAGS.config.get('training_steps', 1) - 1:
f_np = lambda x: np.array(jax.device_get(jl_utils.get_first(x)))
np_params = jax.tree_map(f_np, self._avg_params or self._params)
np_state = jax.tree_map(f_np, self._state)
path_npy = os.path.join(FLAGS.config.checkpoint_dir, 'checkpoint.npy')
with tf.io.gfile.GFile(path_npy, 'wb') as fp:
np.save(fp, (np_params, np_state))
logging.info('Saved final checkpoint at %s', path_npy)
# Run synchronous evaluation.
if self.config.evaluation.interval <= 0:
return scalars
global_step_value = jl_utils.get_first(global_step)
if (global_step_value % self.config.evaluation.interval != 0 and
global_step_value != FLAGS.config.get('training_steps', 1) - 1):
return _merge_eval_scalars(scalars, self._last_evaluation_scalars)
logging.info('Running synchronous evaluation...')
eval_scalars = self.evaluate(global_step, rng)
f_list = lambda x: x.tolist() if isinstance(x, jnp.ndarray) else x
self._last_evaluation_scalars = jax.tree_map(f_list, eval_scalars)
logging.info('(eval) global_step: %d, %s', global_step_value,
self._last_evaluation_scalars)
return _merge_eval_scalars(scalars, self._last_evaluation_scalars)
def _train_fn(self, params, avg_params, state, opt_state, global_step,
supervised_inputs, extra_inputs, rng):
scalars = {}
images, labels, target_probs = self.concatenate(supervised_inputs,
extra_inputs)
# Apply CutMix.
if self.config.training.use_cutmix:
aug_rng, rng = jax.random.split(rng)
images, target_probs = utils.cutmix(aug_rng, images, target_probs,
split=self._repeat_batch)
# Perform adversarial attack.
if self.config.training.attack is None:
adv_images = None
grad_fn = jax.grad(self._cross_entropy_loss_fn, has_aux=True)
else:
attack = self.config.training.attack
attack_rng, rng = jax.random.split(rng)
def logits_fn(x):
x = self.normalize_fn(x)
return self.model.apply(params, state, rng, x, is_training=False,
test_local_stats=True)[0]
if attack.expects_labels():
if self.config.training.use_cutmix:
raise ValueError('Use `untargeted_kl_divergence` when using CutMix.')
target_labels = labels
else:
assert attack.expects_probabilities()
if self.config.training.use_cutmix:
# When using CutMix, regress the attack away from mixed labels.
target_labels = target_probs
else:
target_labels = jax.nn.softmax(logits_fn(images))
adv_images = attack(logits_fn, attack_rng, images, target_labels)
grad_fn = jax.grad(self._trades_loss_fn, has_aux=True)
# Compute loss and gradients.
scaled_grads, (state, loss_scalars) = grad_fn(
params, state, images, adv_images, labels, target_probs, rng)
grads = jax.lax.psum(scaled_grads, axis_name='i')
scalars.update(loss_scalars)
updates, opt_state = self.optimizer.update(grads, opt_state, params)
params = optax.apply_updates(params, updates)
# Stochastic weight averaging.
if self.config.training.swa_decay > 0:
avg_params = utils.ema_update(global_step, avg_params, params,
decay_rate=self.config.training.swa_decay)
learning_rate = self.config.training.learning_rate(global_step)
scalars['learning_rate'] = learning_rate
scalars = jax.lax.pmean(scalars, axis_name='i')
return params, avg_params, state, opt_state, scalars
def _cross_entropy_loss_fn(self, params, state, images, adv_images, labels,
target_probs, rng):
scalars = {}
images = self.normalize_fn(images)
logits, state = self.model.apply(
params, state, rng, images, is_training=True)
loss = jnp.mean(utils.cross_entropy(logits, target_probs))
loss += self.config.training.weight_decay * utils.weight_decay(params)
if not self.config.training.use_cutmix:
scalars['top_1_acc'] = utils.accuracy(logits, labels)
scalars['train_loss'] = loss
scaled_loss = loss / jax.device_count()
return scaled_loss, (state, scalars)
def _trades_loss_fn(self, params, state, images, adv_images, labels,
target_probs, rng, beta=6.):
"""Calculates TRADES loss (https://arxiv.org/pdf/1901.08573)."""
scalars = {}
def apply_fn(x, **norm_kwargs):
x = self.normalize_fn(x)
return self.model.apply(params, state, rng, x, **norm_kwargs)
# Clean images.
clean_logits, _ = apply_fn(images, is_training=False, test_local_stats=True)
if not self.config.training.use_cutmix:
scalars['top_1_acc'] = utils.accuracy(clean_logits, labels)
# Adversarial images. Update BN stats with adversarial images.
adv_logits, state = apply_fn(adv_images, is_training=True)
if not self.config.training.use_cutmix:
scalars['top_1_adv_acc'] = utils.accuracy(adv_logits, labels)
# Compute loss.
clean_loss = jnp.mean(utils.cross_entropy(clean_logits, target_probs))
adv_loss = jnp.mean(utils.kl_divergence(adv_logits, clean_logits))
reg_loss = self.config.training.weight_decay * utils.weight_decay(params)
loss = clean_loss + beta * adv_loss + reg_loss
scalars['train_loss'] = loss
scaled_loss = loss / jax.device_count()
return scaled_loss, (state, scalars)
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(self, global_step, rng, *unused_args, **unused_kwargs):
scalars = self.eval_epoch(self._params, self._state, rng)
if self._avg_params:
avg_scalars = self.eval_epoch(self._avg_params or self._params,
self._state, rng)
for k, v in avg_scalars.items():
scalars[k + '_swa'] = v
return scalars
def eval_epoch(self, params, state, rng):
host_id = jax.host_id()
num_samples = 0
batch_axis = 1
summed_scalars = None
# Converting to numpy here allows us to reset the generator.
eval_input = tfds.as_numpy(self.eval_input)
for all_inputs in eval_input:
# The inputs are send to multiple workers.
inputs = jax.tree_map(lambda x: x[host_id], all_inputs)
num_samples += jax.device_count() * inputs['image'].shape[batch_axis]
scalars = jl_utils.get_first(self.eval_fn(params, state, inputs, rng))
# Accumulate the sum of scalars for each step.
scalars = jax.tree_map(lambda x: jnp.sum(x, axis=0), scalars)
if summed_scalars is None:
summed_scalars = scalars
else:
summed_scalars = jax.tree_map(jnp.add, summed_scalars, scalars)
mean_scalars = jax.tree_map(lambda x: x / num_samples, summed_scalars)
return mean_scalars
def _eval_fn(self, params, state, inputs, rng):
images = inputs['image']
labels = inputs['label']
attack_rng, rng = jax.random.split(rng)
def logits_fn(x):
x = self.normalize_fn(x)
return self.model.apply(params, state, rng, x, is_training=False,
test_local_stats=False)[0]
# Clean accuracy.
logits = logits_fn(images)
predicted_label = jnp.argmax(logits, axis=-1)
correct = jnp.equal(predicted_label, labels).astype(jnp.float32)
scalars = {'top_1_acc': correct}
# Adversarial accuracy.
if self.config.evaluation.attack is not None:
attack = self.config.evaluation.attack
assert attack.expects_labels()
adv_images = attack(logits_fn, attack_rng, images, labels)
adv_logits = logits_fn(adv_images)
predicted_label = jnp.argmax(adv_logits, axis=-1)
correct = jnp.equal(predicted_label, labels).astype(jnp.float32)
scalars['top_1_adv_acc'] = correct
# Returned values will be summed and finally divided by num_samples.
return jax.lax.psum(scalars, axis_name='i')
def _initialize_training(self, rng):
# Initialize inputs.
if self.config.emulated_workers > 0:
per_device_workers, ragged = divmod(self.config.emulated_workers,
jax.host_count())
if ragged:
raise ValueError('Number of emulated workers must be divisible by the '
'number of physical workers `jax.host_count()`.')
self._repeat_batch = per_device_workers
else:
self._repeat_batch = 1
self.supervised_train_input = jl_utils.py_prefetch(
self._supervised_train_dataset)
if self.config.training.extra_data_path is None:
self.extra_train_input = None
else:
self.extra_train_input = jl_utils.py_prefetch(
self._extra_train_dataset)
self.normalize_fn = datasets.cifar10_normalize
# Optimizer.
self.optimizer = utils.sgd_momentum(self.config.training.learning_rate,
momentum=.9, nesterov=True)
# Initialize parameters.
if self._params is None:
logging.info('Initializing parameters randomly rather than restoring '
'from checkpoint.')
# Create inputs to initialize the network state.
images, _, _ = jax.pmap(self.concatenate)(
next(self.supervised_train_input),
next(self.extra_train_input) if self.extra_train_input is not None
else None)
images = jax.pmap(self.normalize_fn)(images)
# Initialize weights and biases.
init_net = jax.pmap(
lambda *a: self.model.init(*a, is_training=True), axis_name='i')
init_rng = jl_utils.bcast_local_devices(rng)
self._params, self._state = init_net(init_rng, images)
# Setup weight averaging.
if self.config.training.swa_decay > 0:
self._avg_params = self._params
else:
self._avg_params = None
# Initialize optimizer state.
init_opt = jax.pmap(self.optimizer.init, axis_name='i')
self._opt_state = init_opt(self._params)
# Initialize step function.
self.train_fn = jax.pmap(self._train_fn, axis_name='i',
donate_argnums=(0, 1, 2, 3))
def _initialize_evaluation(self):
load_fn = (datasets.load_dummy_data if self.config.dry_run else
datasets.load_cifar10)
self.eval_input = _dataset(
functools.partial(load_fn, subset='test'),
is_training=False, total_batch_size=self.config.evaluation.batch_size)
self.normalize_fn = datasets.cifar10_normalize
self.eval_fn = jax.pmap(self._eval_fn, axis_name='i')
def _supervised_train_dataset(self) -> tfds.typing.Tree[np.ndarray]:
"""Creates the training dataset."""
load_fn = (datasets.load_dummy_data if self.config.dry_run else
datasets.load_cifar10)
load_fn = functools.partial(load_fn, subset='train',
repeat=self._repeat_batch)
ds = _dataset(load_fn, is_training=True, repeat=self._repeat_batch,
total_batch_size=self.config.training.batch_size,
ratio=self.config.training.supervised_batch_ratio)
return tfds.as_numpy(ds)
def _extra_train_dataset(self) -> tfds.typing.Tree[np.ndarray]:
"""Creates the training dataset."""
load_extra_fn = (self.config.training.get('load_extra_fn', None) or
datasets.load_extra)
load_fn = datasets.load_dummy_data if self.config.dry_run else load_extra_fn
load_fn = functools.partial(
load_fn, path_npz=self.config.training.extra_data_path)
ds = _dataset(
load_fn, is_training=True, repeat=self._repeat_batch,
total_batch_size=self.config.training.batch_size,
one_minus_ratio=self.config.training.supervised_batch_ratio)
return tfds.as_numpy(ds)
def _get_model(self) -> Callable[..., chex.Array]:
config = self.config.model
def forward_fn(inputs, **norm_kwargs):
model_instance = config.constructor(**config.kwargs.to_dict())
return model_instance(inputs, **norm_kwargs)
return forward_fn
def concatenate(
self,
supervised_inputs: chex.ArrayTree,
extra_inputs: chex.ArrayTree
) -> Tuple[chex.Array, chex.Array, chex.Array]:
"""Concatenate inputs."""
num_classes = self.config.num_classes
supervised_images = supervised_inputs['image']
supervised_labels = supervised_inputs['label']
if extra_inputs is None:
images = supervised_images
labels = supervised_labels
target_probs = hk.one_hot(labels, num_classes)
else:
extra_images = extra_inputs['image']
images = jnp.concatenate([supervised_images, extra_images], axis=0)
extra_labels = extra_inputs['label']
labels = jnp.concatenate([supervised_labels, extra_labels], axis=0)
supervised_one_hot_labels = hk.one_hot(supervised_labels, num_classes)
extra_one_hot_labels = hk.one_hot(extra_labels, num_classes)
if self.config.training.extra_label_smoothing > 0:
pos = 1. - self.config.training.extra_label_smoothing
neg = self.config.training.extra_label_smoothing / num_classes
extra_one_hot_labels = pos * extra_one_hot_labels + neg
target_probs = jnp.concatenate(
[supervised_one_hot_labels, extra_one_hot_labels], axis=0)
return images, labels, target_probs
def _dataset(load_fn,
is_training: bool,
total_batch_size: int,
ratio: Optional[float] = None,
one_minus_ratio: Optional[float] = None,
repeat: int = 1) -> tf.data.Dataset:
"""Creates a dataset."""
num_devices = jax.device_count()
per_device_batch_size, ragged = divmod(total_batch_size, num_devices)
if ragged:
raise ValueError(
f'Global batch size {total_batch_size} must be divisible by the '
f'total number of devices {num_devices}')
if repeat > 1:
if per_device_batch_size % repeat:
raise ValueError(
f'Per device batch size {per_device_batch_size} must be divisible '
f'by the number of repeated batches {repeat}')
per_device_batch_size //= repeat
if ratio is None and one_minus_ratio is None:
pass # Use full batch size.
elif one_minus_ratio is None:
per_device_batch_size = max(
1, min(round(per_device_batch_size * ratio),
per_device_batch_size - 1))
elif ratio is None:
batch_size = max(1, min(round(per_device_batch_size * one_minus_ratio),
per_device_batch_size - 1))
per_device_batch_size = per_device_batch_size - batch_size
else:
raise ValueError('Only one of `ratio` or `one_minus_ratio` must be '
'specified')
if repeat > 1:
per_device_batch_size *= repeat
# When testing, we need to batch data across all devices (not just local
# devices).
num_local_devices = jax.local_device_count()
if is_training:
batch_sizes = [num_local_devices, per_device_batch_size]
else:
num_hosts = jax.host_count()
assert num_hosts * num_local_devices == num_devices
batch_sizes = [num_hosts, num_local_devices, per_device_batch_size]
return load_fn(batch_sizes, is_training=is_training)
def _merge_eval_scalars(a, b):
if b is None:
return a
for k, v in b.items():
a['eval_' + k] = v
return a
| deepmind-research-master | adversarial_robustness/jax/experiment.py |
# Copyright 2021 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions."""
import re
from typing import Optional, Sequence, Tuple
import chex
import einops
import haiku as hk
import jax
import jax.numpy as jnp
import optax
def get_cosine_schedule(
max_learning_rate: float,
total_steps: int,
warmup_steps: int = 0) -> optax.Schedule:
"""Builds a cosine decay schedule with initial warm-up."""
if total_steps < warmup_steps:
return optax.linear_schedule(init_value=0., end_value=max_learning_rate,
transition_steps=warmup_steps)
return optax.join_schedules([
optax.linear_schedule(init_value=0., end_value=max_learning_rate,
transition_steps=warmup_steps),
optax.cosine_decay_schedule(init_value=max_learning_rate,
decay_steps=total_steps - warmup_steps),
], [warmup_steps])
def get_step_schedule(
max_learning_rate: float,
total_steps: int,
warmup_steps: int = 0) -> optax.Schedule:
"""Builds a step schedule with initial warm-up."""
if total_steps < warmup_steps:
return optax.linear_schedule(init_value=0., end_value=max_learning_rate,
transition_steps=warmup_steps)
return optax.join_schedules([
optax.linear_schedule(init_value=0., end_value=max_learning_rate,
transition_steps=warmup_steps),
optax.piecewise_constant_schedule(
init_value=max_learning_rate,
boundaries_and_scales={total_steps * 2 // 3: .1}),
], [warmup_steps])
def sgd_momentum(learning_rate_fn: optax.Schedule,
momentum: float = 0.,
nesterov: bool = False) -> optax.GradientTransformation:
return optax.chain(
optax.trace(decay=momentum, nesterov=nesterov),
optax.scale_by_schedule(learning_rate_fn),
optax.scale(-1.))
def cross_entropy(logits: chex.Array, labels: chex.Array) -> chex.Array:
return -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1)
def kl_divergence(q_logits: chex.Array,
p_logits: chex.Array) -> chex.Array:
"""Compute the KL divergence."""
p_probs = jax.nn.softmax(p_logits)
return cross_entropy(q_logits, p_probs) - cross_entropy(p_logits, p_probs)
def accuracy(logits: chex.Array, labels: chex.Array) -> chex.Array:
predicted_label = jnp.argmax(logits, axis=-1)
correct = jnp.equal(predicted_label, labels).astype(jnp.float32)
return jnp.sum(correct, axis=0) / logits.shape[0]
def weight_decay(params: hk.Params,
regex_match: Optional[Sequence[str]] = None,
regex_ignore: Optional[Sequence[str]] = None) -> chex.Array:
"""Computes the L2 regularization loss."""
if regex_match is None:
regex_match = ('.*w$', '.*b$')
if regex_ignore is None:
regex_ignore = ('.*batchnorm.*',)
l2_norm = 0.
for mod_name, mod_params in params.items():
for param_name, param in mod_params.items():
name = '/'.join([mod_name, param_name])
if (regex_match and
all(not re.match(regex, name) for regex in regex_match)):
continue
if (regex_ignore and
any(re.match(regex, name) for regex in regex_ignore)):
continue
l2_norm += jnp.sum(jnp.square(param))
return .5 * l2_norm
def ema_update(step: chex.Array,
avg_params: chex.ArrayTree,
new_params: chex.ArrayTree,
decay_rate: float = 0.99,
warmup_steps: int = 0,
dynamic_decay: bool = True) -> chex.ArrayTree:
"""Applies an exponential moving average."""
factor = (step >= warmup_steps).astype(jnp.float32)
if dynamic_decay:
# Uses TF-style EMA.
delta = step - warmup_steps
decay = jnp.minimum(decay_rate, (1. + delta) / (10. + delta))
else:
decay = decay_rate
decay *= factor
def _weighted_average(p1, p2):
d = decay.astype(p1.dtype)
return (1 - d) * p1 + d * p2
return jax.tree_map(_weighted_average, new_params, avg_params)
def cutmix(rng: chex.PRNGKey,
images: chex.Array,
labels: chex.Array,
alpha: float = 1.,
beta: float = 1.,
split: int = 1) -> Tuple[chex.Array, chex.Array]:
"""Composing two images by inserting a patch into another image."""
batch_size, height, width, _ = images.shape
split_batch_size = batch_size // split if split > 1 else batch_size
# Masking bounding box.
box_rng, lam_rng, rng = jax.random.split(rng, num=3)
lam = jax.random.beta(lam_rng, a=alpha, b=beta, shape=())
cut_rat = jnp.sqrt(1. - lam)
cut_w = jnp.array(width * cut_rat, dtype=jnp.int32)
cut_h = jnp.array(height * cut_rat, dtype=jnp.int32)
box_coords = _random_box(box_rng, height, width, cut_h, cut_w)
# Adjust lambda.
lam = 1. - (box_coords[2] * box_coords[3] / (height * width))
idx = jax.random.permutation(rng, split_batch_size)
def _cutmix(x, y):
images_a = x
images_b = x[idx, :, :, :]
y = lam * y + (1. - lam) * y[idx, :]
x = _compose_two_images(images_a, images_b, box_coords)
return x, y
if split <= 1:
return _cutmix(images, labels)
# Apply CutMix separately on each sub-batch. This reverses the effect of
# `repeat` in datasets.
images = einops.rearrange(images, '(b1 b2) ... -> b1 b2 ...', b2=split)
labels = einops.rearrange(labels, '(b1 b2) ... -> b1 b2 ...', b2=split)
images, labels = jax.vmap(_cutmix, in_axes=1, out_axes=1)(images, labels)
images = einops.rearrange(images, 'b1 b2 ... -> (b1 b2) ...', b2=split)
labels = einops.rearrange(labels, 'b1 b2 ... -> (b1 b2) ...', b2=split)
return images, labels
def _random_box(rng: chex.PRNGKey,
height: chex.Numeric,
width: chex.Numeric,
cut_h: chex.Array,
cut_w: chex.Array) -> chex.Array:
"""Sample a random box of shape [cut_h, cut_w]."""
height_rng, width_rng = jax.random.split(rng)
i = jax.random.randint(
height_rng, shape=(), minval=0, maxval=height, dtype=jnp.int32)
j = jax.random.randint(
width_rng, shape=(), minval=0, maxval=width, dtype=jnp.int32)
bby1 = jnp.clip(i - cut_h // 2, 0, height)
bbx1 = jnp.clip(j - cut_w // 2, 0, width)
h = jnp.clip(i + cut_h // 2, 0, height) - bby1
w = jnp.clip(j + cut_w // 2, 0, width) - bbx1
return jnp.array([bby1, bbx1, h, w])
def _compose_two_images(images: chex.Array,
image_permutation: chex.Array,
bbox: chex.Array) -> chex.Array:
"""Inserting the second minibatch into the first at the target locations."""
def _single_compose_two_images(image1, image2):
height, width, _ = image1.shape
mask = _window_mask(bbox, (height, width))
return image1 * (1. - mask) + image2 * mask
return jax.vmap(_single_compose_two_images)(images, image_permutation)
def _window_mask(destination_box: chex.Array,
size: Tuple[int, int]) -> jnp.ndarray:
"""Mask a part of the image."""
height_offset, width_offset, h, w = destination_box
h_range = jnp.reshape(jnp.arange(size[0]), [size[0], 1, 1])
w_range = jnp.reshape(jnp.arange(size[1]), [1, size[1], 1])
return jnp.logical_and(
jnp.logical_and(height_offset <= h_range,
h_range < height_offset + h),
jnp.logical_and(width_offset <= w_range,
w_range < width_offset + w)).astype(jnp.float32)
| deepmind-research-master | adversarial_robustness/jax/utils.py |
# Copyright 2021 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs a JAXline experiment to perform robust adversarial training."""
import functools
from absl import app
from absl import flags
from jaxline import platform
import tensorflow.compat.v2 as tf
from adversarial_robustness.jax import experiment
if __name__ == '__main__':
flags.mark_flag_as_required('config')
try:
tf.config.set_visible_devices([], 'GPU') # Prevent TF from using the GPU.
except tf.errors.NotFoundError:
pass
app.run(functools.partial(platform.main, experiment.Experiment))
| deepmind-research-master | adversarial_robustness/jax/train.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluates a JAX checkpoint on CIFAR-10/100 or MNIST."""
import functools
from absl import app
from absl import flags
import haiku as hk
import numpy as np
import optax
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tqdm
from adversarial_robustness.jax import attacks
from adversarial_robustness.jax import datasets
from adversarial_robustness.jax import model_zoo
_CKPT = flags.DEFINE_string(
'ckpt', None, 'Path to checkpoint.')
_DATASET = flags.DEFINE_enum(
'dataset', 'cifar10', ['cifar10', 'cifar100', 'mnist'],
'Dataset on which the checkpoint is evaluated.')
_WIDTH = flags.DEFINE_integer(
'width', 16, 'Width of WideResNet.')
_DEPTH = flags.DEFINE_integer(
'depth', 70, 'Depth of WideResNet.')
_BATCH_SIZE = flags.DEFINE_integer(
'batch_size', 100, 'Batch size.')
_NUM_BATCHES = flags.DEFINE_integer(
'num_batches', 0,
'Number of batches to evaluate (zero means the whole dataset).')
def main(unused_argv):
print(f'Loading "{_CKPT.value}"')
print(f'Using a WideResNet with depth {_DEPTH.value} and width '
f'{_WIDTH.value}.')
# Create dataset.
if _DATASET.value == 'mnist':
_, data_test = tf.keras.datasets.mnist.load_data()
normalize_fn = datasets.mnist_normalize
elif _DATASET.value == 'cifar10':
_, data_test = tf.keras.datasets.cifar10.load_data()
normalize_fn = datasets.cifar10_normalize
else:
assert _DATASET.value == 'cifar100'
_, data_test = tf.keras.datasets.cifar100.load_data()
normalize_fn = datasets.cifar100_normalize
# Create model.
@hk.transform_with_state
def model_fn(x, is_training=False):
model = model_zoo.WideResNet(
num_classes=10, depth=_DEPTH.value, width=_WIDTH.value,
activation='swish')
return model(normalize_fn(x), is_training=is_training)
# Build dataset.
images, labels = data_test
samples = (images.astype(np.float32) / 255.,
np.squeeze(labels, axis=-1).astype(np.int64))
data = tf.data.Dataset.from_tensor_slices(samples).batch(_BATCH_SIZE.value)
test_loader = tfds.as_numpy(data)
# Load model parameters.
rng_seq = hk.PRNGSequence(0)
if _CKPT.value == 'dummy':
for images, _ in test_loader:
break
params, state = model_fn.init(next(rng_seq), images, is_training=True)
# Reset iterator.
test_loader = tfds.as_numpy(data)
else:
params, state = np.load(_CKPT.value, allow_pickle=True)
# Create adversarial attack. We run a PGD-40 attack with margin loss.
epsilon = 8 / 255
eval_attack = attacks.UntargetedAttack(
attacks.PGD(
attacks.Adam(learning_rate_fn=optax.piecewise_constant_schedule(
init_value=.1,
boundaries_and_scales={20: .1, 30: .01})),
num_steps=40,
initialize_fn=attacks.linf_initialize_fn(epsilon),
project_fn=attacks.linf_project_fn(epsilon, bounds=(0., 1.))),
loss_fn=attacks.untargeted_margin)
def logits_fn(x, rng):
return model_fn.apply(params, state, rng, x)[0]
# Evaluation.
correct = 0
adv_correct = 0
total = 0
batch_count = 0
total_batches = min((10_000 - 1) // _BATCH_SIZE.value + 1, _NUM_BATCHES.value)
for images, labels in tqdm.tqdm(test_loader, total=total_batches):
rng = next(rng_seq)
loop_logits_fn = functools.partial(logits_fn, rng=rng)
# Clean examples.
outputs = loop_logits_fn(images)
correct += (np.argmax(outputs, 1) == labels).sum().item()
# Adversarial examples.
adv_images = eval_attack(loop_logits_fn, next(rng_seq), images, labels)
outputs = loop_logits_fn(adv_images)
predicted = np.argmax(outputs, 1)
adv_correct += (predicted == labels).sum().item()
total += labels.shape[0]
batch_count += 1
if _NUM_BATCHES.value > 0 and batch_count >= _NUM_BATCHES.value:
break
print(f'Accuracy on the {total} test images: {100 * correct / total:.2f}%')
print(f'Robust accuracy: {100 * adv_correct / total:.2f}%')
if __name__ == '__main__':
flags.mark_flag_as_required('ckpt')
try:
tf.config.set_visible_devices([], 'GPU') # Prevent TF from using the GPU.
except tf.errors.NotFoundError:
pass
app.run(main)
| deepmind-research-master | adversarial_robustness/jax/eval.py |
# Copyright 2021 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adversarial attacks.
This file contains all the code necessary to create untargeted adversarial
attacks in JAX (within an l-infinity ball). For example, to create an untargeted
FGSM attack (with a single step), one can do the following:
```
import attacks
epsilon = 8/255 # Perturbation radius for inputs between 0 and 1.
fgsm_attack = attacks.UntargetedAttack(
attacks.PGD(
attacks.IteratedFGSM(epsilon),
num_steps=1,
initialize_fn=attacks.linf_initialize_fn(epsilon),
project_fn=attacks.linf_project_fn(epsilon, bounds=(0., 1.))),
loss_fn=attacks.untargeted_cross_entropy)
```
Just as elegantly, one can specify an adversarial attack on KL-divergence
to a target distribution (using 10 steps with Adam and a piecewise constant step
schedule):
```
kl_attack_with_adam = attacks.UntargetedAttack(
attacks.PGD(
attacks.Adam(optax.piecewise_constant_schedule(
init_value=.1,
boundaries_and_scales={5: .1})),
num_steps=10,
initialize_fn=attacks.linf_initialize_fn(epsilon),
project_fn=attacks.linf_project_fn(epsilon, bounds=(0., 1.))),
loss_fn=attacks.untargeted_kl_divergence)
```
The attack instances can be used later on to build adversarial examples:
```
my_model = ... # Model. We assume that 'my_model(.)' returns logits.
clean_images, image_labels = ... # Batch of images and associated labels.
rng = jax.random.PRNGKey(0) # A random generator state.
adversarial_images = fgsm_attack(my_model, rng, clean_images, image_labels)
```
See `experiment.py` or `eval.py` for more examples.
This file contains the following components:
* Losses:
* untargeted_cross_entropy: minimizes the likelihood of the label class.
* untargeted_kl_divergence: maximizes the KL-divergence of the predictions with
a target distribution.
* untargeted_margin: maximizes the margin loss (distance from the highest
non-true logits to the label class logit)
* Step optimizers:
* SGD: Stochastic Gradient Descent.
* IteratedFGSM: Also called BIM (see https://arxiv.org/pdf/1607.02533).
* Adam: See https://arxiv.org/pdf/1412.6980.
* Initialization and projection functions:
* linf_initialize_fn: Initialize function for l-infinity attacks.
* linf_project_fn: Projection function for l-infinity attacks.
* Projected Gradient Descent (PGD):
* PGD: Runs Projected Gradient Descent using the specified optimizer,
initialization and projection functions for a given number of steps.
* Untargeted attack:
* UntargetedAttack: Combines PGD and a specific loss function to find
adversarial examples.
"""
import functools
import inspect
from typing import Callable, Optional, Tuple, Union
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import optax
ModelFn = Callable[[chex.Array], chex.Array]
LossFn = Callable[[chex.Array], chex.Array]
ClassificationLossFn = Callable[[chex.Array, chex.Array], chex.Array]
OptimizeFn = Callable[[LossFn, chex.PRNGKey, chex.Array], chex.Array]
NormalizeFn = Callable[[chex.Array], chex.Array]
InitializeFn = Callable[[chex.PRNGKey, chex.Array], chex.Array]
ProjectFn = Callable[[chex.Array, chex.Array], chex.Array]
def untargeted_cross_entropy(logits: chex.Array,
labels: chex.Array) -> chex.Array:
"""Maximize the cross-entropy of the true class (make it less likely)."""
num_classes = logits.shape[-1]
log_probs = jax.nn.log_softmax(logits)
return jnp.sum(
hk.one_hot(labels, num_classes).astype(logits.dtype) * log_probs, axis=-1)
def untargeted_kl_divergence(logits: chex.Array,
label_probs: chex.Array) -> chex.Array:
"""Maximize the KL divergence between logits and label distribution."""
# We are explicitly maximizing the cross-entropy, as this is equivalent to
# maximizing the KL divergence (when `label_probs` does not depend
# on the values that produce `logits`).
log_probs = jax.nn.log_softmax(logits)
return jnp.sum(label_probs * log_probs, axis=-1)
def untargeted_margin(logits: chex.Array,
labels: chex.Array) -> chex.Array:
"""Make the highest non-correct logits higher than the true class logits."""
batch_size = logits.shape[0]
num_classes = logits.shape[-1]
label_logits = logits[jnp.arange(batch_size), labels]
logit_mask = hk.one_hot(labels, num_classes).astype(logits.dtype)
highest_logits = jnp.max(logits - 1e8 * logit_mask, axis=-1)
return label_logits - highest_logits
class UntargetedAttack:
"""Performs an untargeted attack."""
def __init__(self,
optimize_fn: OptimizeFn,
loss_fn: ClassificationLossFn = untargeted_cross_entropy):
"""Creates an untargeted attack.
Args:
optimize_fn: An `Optimizer` instance or any callable that takes
a loss function and an initial input and outputs a new input that
minimizes the loss function.
loss_fn: `loss_fn` is a surrogate loss. Its goal should be make the true
class less likely than any other class. Typical options for `loss_fn`
are `untargeted_cross_entropy` or `untargeted_margin`.
"""
self._optimize_fn = optimize_fn
self._loss_fn = loss_fn
def __call__(self,
logits_fn: ModelFn,
rng: chex.PRNGKey,
inputs: chex.Array,
labels: chex.Array) -> chex.Array:
"""Returns adversarial inputs."""
def _loss_fn(x):
return self._loss_fn(logits_fn(x), labels)
return self._optimize_fn(_loss_fn, rng, inputs)
# Convenience functions to detect the type of inputs required by the loss.
def expects_labels(self):
return 'labels' in inspect.getfullargspec(self._loss_fn).args
def expects_probabilities(self):
return 'label_probs' in inspect.getfullargspec(self._loss_fn).args
class StepOptimizer:
"""Makes a single gradient step that minimizes a loss function."""
def __init__(self,
gradient_transformation: optax.GradientTransformation):
self._gradient_transformation = gradient_transformation
def init(self,
loss_fn: LossFn,
x: chex.Array) -> optax.OptState:
self._loss_fn = loss_fn
return self._gradient_transformation.init(x)
def minimize(
self,
x: chex.Array,
state: optax.OptState) -> Tuple[chex.Array, chex.Array, optax.OptState]:
"""Performs a single minimization step."""
g, loss = gradients_fn(self._loss_fn, x)
if g is None:
raise ValueError('loss_fn does not depend on input.')
updates, state = self._gradient_transformation.update(g, state, x)
return optax.apply_updates(x, updates), loss, state
class SGD(StepOptimizer):
"""Vanilla gradient descent optimizer."""
def __init__(self,
learning_rate_fn: Union[float, int, optax.Schedule],
normalize_fn: Optional[NormalizeFn] = None):
# Accept schedules, as well as scalar values.
if isinstance(learning_rate_fn, (float, int)):
lr = float(learning_rate_fn)
learning_rate_fn = lambda _: lr
# Normalization.
def update_fn(updates, state, params=None):
del params
updates = jax.tree_map(normalize_fn or (lambda x: x), updates)
return updates, state
gradient_transformation = optax.chain(
optax.GradientTransformation(lambda _: optax.EmptyState(), update_fn),
optax.scale_by_schedule(learning_rate_fn),
optax.scale(-1.))
super(SGD, self).__init__(gradient_transformation)
class IteratedFGSM(SGD):
"""L-infinity normalized steps."""
def __init__(self,
learning_rate_fn: Union[float, int, optax.Schedule]):
super(IteratedFGSM, self).__init__(learning_rate_fn, jnp.sign)
class Adam(StepOptimizer):
"""The Adam optimizer defined in https://arxiv.org/abs/1412.6980."""
def __init__(
self,
learning_rate_fn: Union[float, int, optax.Schedule],
normalize_fn: Optional[NormalizeFn] = None,
beta1: float = .9,
beta2: float = .999,
epsilon: float = 1e-9):
# Accept schedules, as well as scalar values.
if isinstance(learning_rate_fn, (float, int)):
lr = float(learning_rate_fn)
learning_rate_fn = lambda _: lr
# Normalization.
def update_fn(updates, state, params=None):
del params
updates = jax.tree_map(normalize_fn or (lambda x: x), updates)
return updates, state
gradient_transformation = optax.chain(
optax.GradientTransformation(lambda _: optax.EmptyState(), update_fn),
optax.scale_by_adam(b1=beta1, b2=beta2, eps=epsilon),
optax.scale_by_schedule(learning_rate_fn),
optax.scale(-1.))
super(Adam, self).__init__(gradient_transformation)
class PGD:
"""Runs Project Gradient Descent (see https://arxiv.org/pdf/1706.06083)."""
def __init__(self,
optimizer: StepOptimizer,
num_steps: int,
initialize_fn: Optional[InitializeFn] = None,
project_fn: Optional[ProjectFn] = None):
self._optimizer = optimizer
if initialize_fn is None:
initialize_fn = lambda rng, x: x
self._initialize_fn = initialize_fn
if project_fn is None:
project_fn = lambda x, origin_x: x
self._project_fn = project_fn
self._num_steps = num_steps
def __call__(self,
loss_fn: LossFn,
rng: chex.PRNGKey,
x: chex.Array) -> chex.Array:
def _optimize(rng, x):
"""Optimizes loss_fn when keep_best is False."""
def body_fn(_, inputs):
opt_state, current_x = inputs
current_x, _, opt_state = self._optimizer.minimize(current_x, opt_state)
current_x = self._project_fn(current_x, x)
return opt_state, current_x
opt_state = self._optimizer.init(loss_fn, x)
current_x = self._project_fn(self._initialize_fn(rng, x), x)
_, current_x = jax.lax.fori_loop(0, self._num_steps, body_fn,
(opt_state, current_x))
return current_x
return jax.lax.stop_gradient(_optimize(rng, x))
def linf_project_fn(epsilon: float, bounds: Tuple[float, float]) -> ProjectFn:
def project_fn(x, origin_x):
dx = jnp.clip(x - origin_x, -epsilon, epsilon)
return jnp.clip(origin_x + dx, bounds[0], bounds[1])
return project_fn
def linf_initialize_fn(epsilon: float) -> InitializeFn:
def initialize_fn(rng, x):
return x + jax.random.uniform(rng, x.shape, minval=-epsilon,
maxval=epsilon).astype(x.dtype)
return initialize_fn
def gradients_fn(loss_fn: LossFn,
x: chex.Array) -> Tuple[chex.Array, chex.Array]:
"""Returns the analytical gradient as computed by `jax.grad`."""
@functools.partial(jax.grad, has_aux=True)
def grad_reduced_loss_fn(x):
loss = loss_fn(x)
return jnp.sum(loss), loss
return grad_reduced_loss_fn(x)
| deepmind-research-master | adversarial_robustness/jax/attacks.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training and evaluation loops for an experiment."""
import time
from typing import Any, Mapping, Text, Type, Union
from absl import app
from absl import flags
from absl import logging
import jax
import numpy as np
from byol import byol_experiment
from byol import eval_experiment
from byol.configs import byol as byol_config
from byol.configs import eval as eval_config
flags.DEFINE_string('experiment_mode',
'pretrain', 'The experiment, pretrain or linear-eval')
flags.DEFINE_string('worker_mode', 'train', 'The mode, train or eval')
flags.DEFINE_string('worker_tpu_driver', '', 'The tpu driver to use')
flags.DEFINE_integer('pretrain_epochs', 1000, 'Number of pre-training epochs')
flags.DEFINE_integer('batch_size', 4096, 'Total batch size')
flags.DEFINE_string('checkpoint_root', '/tmp/byol',
'The directory to save checkpoints to.')
flags.DEFINE_integer('log_tensors_interval', 60, 'Log tensors every n seconds.')
FLAGS = flags.FLAGS
Experiment = Union[
Type[byol_experiment.ByolExperiment],
Type[eval_experiment.EvalExperiment]]
def train_loop(experiment_class: Experiment, config: Mapping[Text, Any]):
"""The main training loop.
This loop periodically saves a checkpoint to be evaluated in the eval_loop.
Args:
experiment_class: the constructor for the experiment (either byol_experiment
or eval_experiment).
config: the experiment config.
"""
experiment = experiment_class(**config)
rng = jax.random.PRNGKey(0)
step = 0
host_id = jax.host_id()
last_logging = time.time()
if config['checkpointing_config']['use_checkpointing']:
checkpoint_data = experiment.load_checkpoint()
if checkpoint_data is None:
step = 0
else:
step, rng = checkpoint_data
local_device_count = jax.local_device_count()
while step < config['max_steps']:
step_rng, rng = tuple(jax.random.split(rng))
# Broadcast the random seeds across the devices
step_rng_device = jax.random.split(step_rng, num=jax.device_count())
step_rng_device = step_rng_device[
host_id * local_device_count:(host_id + 1) * local_device_count]
step_device = np.broadcast_to(step, [local_device_count])
# Perform a training step and get scalars to log.
scalars = experiment.step(global_step=step_device, rng=step_rng_device)
# Checkpointing and logging.
if config['checkpointing_config']['use_checkpointing']:
experiment.save_checkpoint(step, rng)
current_time = time.time()
if current_time - last_logging > FLAGS.log_tensors_interval:
logging.info('Step %d: %s', step, scalars)
last_logging = current_time
step += 1
logging.info('Saving final checkpoint')
logging.info('Step %d: %s', step, scalars)
experiment.save_checkpoint(step, rng)
def eval_loop(experiment_class: Experiment, config: Mapping[Text, Any]):
"""The main evaluation loop.
This loop periodically loads a checkpoint and evaluates its performance on the
test set, by calling experiment.evaluate.
Args:
experiment_class: the constructor for the experiment (either byol_experiment
or eval_experiment).
config: the experiment config.
"""
experiment = experiment_class(**config)
last_evaluated_step = -1
while True:
checkpoint_data = experiment.load_checkpoint()
if checkpoint_data is None:
logging.info('No checkpoint found. Waiting for 10s.')
time.sleep(10)
continue
step, _ = checkpoint_data
if step <= last_evaluated_step:
logging.info('Checkpoint at step %d already evaluated, waiting.', step)
time.sleep(10)
continue
host_id = jax.host_id()
local_device_count = jax.local_device_count()
step_device = np.broadcast_to(step, [local_device_count])
scalars = experiment.evaluate(global_step=step_device)
if host_id == 0: # Only perform logging in one host.
logging.info('Evaluation at step %d: %s', step, scalars)
last_evaluated_step = step
if last_evaluated_step >= config['max_steps']:
return
def main(_):
if FLAGS.worker_tpu_driver:
jax.config.update('jax_xla_backend', 'tpu_driver')
jax.config.update('jax_backend_target', FLAGS.worker_tpu_driver)
logging.info('Backend: %s %r', FLAGS.worker_tpu_driver, jax.devices())
if FLAGS.experiment_mode == 'pretrain':
experiment_class = byol_experiment.ByolExperiment
config = byol_config.get_config(FLAGS.pretrain_epochs, FLAGS.batch_size)
elif FLAGS.experiment_mode == 'linear-eval':
experiment_class = eval_experiment.EvalExperiment
config = eval_config.get_config(f'{FLAGS.checkpoint_root}/pretrain.pkl',
FLAGS.batch_size)
else:
raise ValueError(f'Unknown experiment mode: {FLAGS.experiment_mode}')
config['checkpointing_config']['checkpoint_dir'] = FLAGS.checkpoint_root # pytype: disable=unsupported-operands # dict-kwargs
if FLAGS.worker_mode == 'train':
train_loop(experiment_class, config)
elif FLAGS.worker_mode == 'eval':
eval_loop(experiment_class, config)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | byol/main_loop.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BYOL pre-training implementation.
Use this experiment to pre-train a self-supervised representation.
"""
import functools
from typing import Any, Generator, Mapping, NamedTuple, Text, Tuple, Union
from absl import logging
from acme.jax import utils as acme_utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
from byol.utils import augmentations
from byol.utils import checkpointing
from byol.utils import dataset
from byol.utils import helpers
from byol.utils import networks
from byol.utils import optimizers
from byol.utils import schedules
# Type declarations.
LogsDict = Mapping[Text, jnp.ndarray]
class _ByolExperimentState(NamedTuple):
"""Byol's model and optimization parameters and state."""
online_params: hk.Params
target_params: hk.Params
online_state: hk.State
target_state: hk.State
opt_state: optimizers.LarsState
class ByolExperiment:
"""Byol's training and evaluation component definition."""
def __init__(
self,
random_seed: int,
num_classes: int,
batch_size: int,
max_steps: int,
enable_double_transpose: bool,
base_target_ema: float,
network_config: Mapping[Text, Any],
optimizer_config: Mapping[Text, Any],
lr_schedule_config: Mapping[Text, Any],
evaluation_config: Mapping[Text, Any],
checkpointing_config: Mapping[Text, Any]):
"""Constructs the experiment.
Args:
random_seed: the random seed to use when initializing network weights.
num_classes: the number of classes; used for the online evaluation.
batch_size: the total batch size; should be a multiple of the number of
available accelerators.
max_steps: the number of training steps; used for the lr/target network
ema schedules.
enable_double_transpose: see dataset.py; only has effect on TPU.
base_target_ema: the initial value for the ema decay rate of the target
network.
network_config: the configuration for the network.
optimizer_config: the configuration for the optimizer.
lr_schedule_config: the configuration for the learning rate schedule.
evaluation_config: the evaluation configuration.
checkpointing_config: the configuration for checkpointing.
"""
self._random_seed = random_seed
self._enable_double_transpose = enable_double_transpose
self._num_classes = num_classes
self._lr_schedule_config = lr_schedule_config
self._batch_size = batch_size
self._max_steps = max_steps
self._base_target_ema = base_target_ema
self._optimizer_config = optimizer_config
self._evaluation_config = evaluation_config
# Checkpointed experiment state.
self._byol_state = None
# Input pipelines.
self._train_input = None
self._eval_input = None
# build the transformed ops
forward_fn = functools.partial(self._forward, **network_config)
self.forward = hk.without_apply_rng(hk.transform_with_state(forward_fn))
# training can handle multiple devices, thus the pmap
self.update_pmap = jax.pmap(self._update_fn, axis_name='i')
# evaluation can only handle single device
self.eval_batch_jit = jax.jit(self._eval_batch)
self._checkpointer = checkpointing.Checkpointer(**checkpointing_config)
def _forward(
self,
inputs: dataset.Batch,
projector_hidden_size: int,
projector_output_size: int,
predictor_hidden_size: int,
encoder_class: Text,
encoder_config: Mapping[Text, Any],
bn_config: Mapping[Text, Any],
is_training: bool,
) -> Mapping[Text, jnp.ndarray]:
"""Forward application of byol's architecture.
Args:
inputs: A batch of data, i.e. a dictionary, with either two keys,
(`images` and `labels`) or three keys (`view1`, `view2`, `labels`).
projector_hidden_size: hidden size of the projector MLP.
projector_output_size: output size of the projector and predictor MLPs.
predictor_hidden_size: hidden size of the predictor MLP.
encoder_class: type of the encoder (should match a class in
utils/networks).
encoder_config: passed to the encoder constructor.
bn_config: passed to the hk.BatchNorm constructors.
is_training: Training or evaluating the model? When True, inputs must
contain keys `view1` and `view2`. When False, inputs must contain key
`images`.
Returns:
All outputs of the model, i.e. a dictionary with projection, prediction
and logits keys, for either the two views, or the image.
"""
encoder = getattr(networks, encoder_class)
net = encoder(
num_classes=None, # Don't build the final linear layer
bn_config=bn_config,
**encoder_config)
projector = networks.MLP(
name='projector',
hidden_size=projector_hidden_size,
output_size=projector_output_size,
bn_config=bn_config)
predictor = networks.MLP(
name='predictor',
hidden_size=predictor_hidden_size,
output_size=projector_output_size,
bn_config=bn_config)
classifier = hk.Linear(
output_size=self._num_classes, name='classifier')
def apply_once_fn(images: jnp.ndarray, suffix: Text = ''):
images = dataset.normalize_images(images)
embedding = net(images, is_training=is_training)
proj_out = projector(embedding, is_training)
pred_out = predictor(proj_out, is_training)
# Note the stop_gradient: label information is not leaked into the
# main network.
classif_out = classifier(jax.lax.stop_gradient(embedding))
outputs = {}
outputs['projection' + suffix] = proj_out
outputs['prediction' + suffix] = pred_out
outputs['logits' + suffix] = classif_out
return outputs
if is_training:
outputs_view1 = apply_once_fn(inputs['view1'], '_view1')
outputs_view2 = apply_once_fn(inputs['view2'], '_view2')
return {**outputs_view1, **outputs_view2}
else:
return apply_once_fn(inputs['images'], '')
def _optimizer(self, learning_rate: float) -> optax.GradientTransformation:
"""Build optimizer from config."""
return optimizers.lars(
learning_rate,
weight_decay_filter=optimizers.exclude_bias_and_norm,
lars_adaptation_filter=optimizers.exclude_bias_and_norm,
**self._optimizer_config)
def loss_fn(
self,
online_params: hk.Params,
target_params: hk.Params,
online_state: hk.State,
target_state: hk.Params,
rng: jnp.ndarray,
inputs: dataset.Batch,
) -> Tuple[jnp.ndarray, Tuple[Mapping[Text, hk.State], LogsDict]]:
"""Compute BYOL's loss function.
Args:
online_params: parameters of the online network (the loss is later
differentiated with respect to the online parameters).
target_params: parameters of the target network.
online_state: internal state of online network.
target_state: internal state of target network.
rng: random number generator state.
inputs: inputs, containing two batches of crops from the same images,
view1 and view2 and labels
Returns:
BYOL's loss, a mapping containing the online and target networks updated
states after processing inputs, and various logs.
"""
if self._should_transpose_images():
inputs = dataset.transpose_images(inputs)
inputs = augmentations.postprocess(inputs, rng)
labels = inputs['labels']
online_network_out, online_state = self.forward.apply(
params=online_params,
state=online_state,
inputs=inputs,
is_training=True)
target_network_out, target_state = self.forward.apply(
params=target_params,
state=target_state,
inputs=inputs,
is_training=True)
# Representation loss
# The stop_gradient is not necessary as we explicitly take the gradient with
# respect to online parameters only in `optax.apply_updates`. We leave it to
# indicate that gradients are not backpropagated through the target network.
repr_loss = helpers.regression_loss(
online_network_out['prediction_view1'],
jax.lax.stop_gradient(target_network_out['projection_view2']))
repr_loss = repr_loss + helpers.regression_loss(
online_network_out['prediction_view2'],
jax.lax.stop_gradient(target_network_out['projection_view1']))
repr_loss = jnp.mean(repr_loss)
# Classification loss (with gradient flows stopped from flowing into the
# ResNet). This is used to provide an evaluation of the representation
# quality during training.
classif_loss = helpers.softmax_cross_entropy(
logits=online_network_out['logits_view1'],
labels=jax.nn.one_hot(labels, self._num_classes))
top1_correct = helpers.topk_accuracy(
online_network_out['logits_view1'],
inputs['labels'],
topk=1,
)
top5_correct = helpers.topk_accuracy(
online_network_out['logits_view1'],
inputs['labels'],
topk=5,
)
top1_acc = jnp.mean(top1_correct)
top5_acc = jnp.mean(top5_correct)
classif_loss = jnp.mean(classif_loss)
loss = repr_loss + classif_loss
logs = dict(
loss=loss,
repr_loss=repr_loss,
classif_loss=classif_loss,
top1_accuracy=top1_acc,
top5_accuracy=top5_acc,
)
return loss, (dict(online_state=online_state,
target_state=target_state), logs)
def _should_transpose_images(self):
"""Should we transpose images (saves host-to-device time on TPUs)."""
return (self._enable_double_transpose and
jax.local_devices()[0].platform == 'tpu')
def _update_fn(
self,
byol_state: _ByolExperimentState,
global_step: jnp.ndarray,
rng: jnp.ndarray,
inputs: dataset.Batch,
) -> Tuple[_ByolExperimentState, LogsDict]:
"""Update online and target parameters.
Args:
byol_state: current BYOL state.
global_step: current training step.
rng: current random number generator
inputs: inputs, containing two batches of crops from the same images,
view1 and view2 and labels
Returns:
Tuple containing the updated Byol state after processing the inputs, and
various logs.
"""
online_params = byol_state.online_params
target_params = byol_state.target_params
online_state = byol_state.online_state
target_state = byol_state.target_state
opt_state = byol_state.opt_state
# update online network
grad_fn = jax.grad(self.loss_fn, argnums=0, has_aux=True)
grads, (net_states, logs) = grad_fn(online_params, target_params,
online_state, target_state, rng, inputs)
# cross-device grad and logs reductions
grads = jax.tree_map(lambda v: jax.lax.pmean(v, axis_name='i'), grads)
logs = jax.tree_map(lambda x: jax.lax.pmean(x, axis_name='i'), logs)
learning_rate = schedules.learning_schedule(
global_step,
batch_size=self._batch_size,
total_steps=self._max_steps,
**self._lr_schedule_config)
updates, opt_state = self._optimizer(learning_rate).update(
grads, opt_state, online_params)
online_params = optax.apply_updates(online_params, updates)
# update target network
tau = schedules.target_ema(
global_step,
base_ema=self._base_target_ema,
max_steps=self._max_steps)
target_params = jax.tree_map(lambda x, y: x + (1 - tau) * (y - x),
target_params, online_params)
logs['tau'] = tau
logs['learning_rate'] = learning_rate
return _ByolExperimentState(
online_params=online_params,
target_params=target_params,
online_state=net_states['online_state'],
target_state=net_states['target_state'],
opt_state=opt_state), logs
def _make_initial_state(
self,
rng: jnp.ndarray,
dummy_input: dataset.Batch,
) -> _ByolExperimentState:
"""BYOL's _ByolExperimentState initialization.
Args:
rng: random number generator used to initialize parameters. If working in
a multi device setup, this need to be a ShardedArray.
dummy_input: a dummy image, used to compute intermediate outputs shapes.
Returns:
Initial Byol state.
"""
rng_online, rng_target = jax.random.split(rng)
if self._should_transpose_images():
dummy_input = dataset.transpose_images(dummy_input)
# Online and target parameters are initialized using different rngs,
# in our experiments we did not notice a significant different with using
# the same rng for both.
online_params, online_state = self.forward.init(
rng_online,
dummy_input,
is_training=True,
)
target_params, target_state = self.forward.init(
rng_target,
dummy_input,
is_training=True,
)
opt_state = self._optimizer(0).init(online_params)
return _ByolExperimentState(
online_params=online_params,
target_params=target_params,
opt_state=opt_state,
online_state=online_state,
target_state=target_state,
)
def step(self, *,
global_step: jnp.ndarray,
rng: jnp.ndarray) -> Mapping[Text, np.ndarray]:
"""Performs a single training step."""
if self._train_input is None:
self._initialize_train()
inputs = next(self._train_input)
self._byol_state, scalars = self.update_pmap(
self._byol_state,
global_step=global_step,
rng=rng,
inputs=inputs,
)
return helpers.get_first(scalars)
def save_checkpoint(self, step: int, rng: jnp.ndarray):
self._checkpointer.maybe_save_checkpoint(
self._byol_state, step=step, rng=rng, is_final=step >= self._max_steps)
def load_checkpoint(self) -> Union[Tuple[int, jnp.ndarray], None]:
checkpoint_data = self._checkpointer.maybe_load_checkpoint()
if checkpoint_data is None:
return None
self._byol_state, step, rng = checkpoint_data
return step, rng
def _initialize_train(self):
"""Initialize train.
This includes initializing the input pipeline and Byol's state.
"""
self._train_input = acme_utils.prefetch(self._build_train_input())
# Check we haven't already restored params
if self._byol_state is None:
logging.info(
'Initializing parameters rather than restoring from checkpoint.')
# initialize Byol and setup optimizer state
inputs = next(self._train_input)
init_byol = jax.pmap(self._make_initial_state, axis_name='i')
# Init uses the same RNG key on all hosts+devices to ensure everyone
# computes the same initial state and parameters.
init_rng = jax.random.PRNGKey(self._random_seed)
init_rng = helpers.bcast_local_devices(init_rng)
self._byol_state = init_byol(rng=init_rng, dummy_input=inputs)
def _build_train_input(self) -> Generator[dataset.Batch, None, None]:
"""Loads the (infinitely looping) dataset iterator."""
num_devices = jax.device_count()
global_batch_size = self._batch_size
per_device_batch_size, ragged = divmod(global_batch_size, num_devices)
if ragged:
raise ValueError(
f'Global batch size {global_batch_size} must be divisible by '
f'num devices {num_devices}')
return dataset.load(
dataset.Split.TRAIN_AND_VALID,
preprocess_mode=dataset.PreprocessMode.PRETRAIN,
transpose=self._should_transpose_images(),
batch_dims=[jax.local_device_count(), per_device_batch_size])
def _eval_batch(
self,
params: hk.Params,
state: hk.State,
batch: dataset.Batch,
) -> Mapping[Text, jnp.ndarray]:
"""Evaluates a batch.
Args:
params: Parameters of the model to evaluate. Typically Byol's online
parameters.
state: State of the model to evaluate. Typically Byol's online state.
batch: Batch of data to evaluate (must contain keys images and labels).
Returns:
Unreduced evaluation loss and top1 accuracy on the batch.
"""
if self._should_transpose_images():
batch = dataset.transpose_images(batch)
outputs, _ = self.forward.apply(params, state, batch, is_training=False)
logits = outputs['logits']
labels = hk.one_hot(batch['labels'], self._num_classes)
loss = helpers.softmax_cross_entropy(logits, labels, reduction=None)
top1_correct = helpers.topk_accuracy(logits, batch['labels'], topk=1)
top5_correct = helpers.topk_accuracy(logits, batch['labels'], topk=5)
# NOTE: Returned values will be summed and finally divided by num_samples.
return {
'eval_loss': loss,
'top1_accuracy': top1_correct,
'top5_accuracy': top5_correct,
}
def _eval_epoch(self, subset: Text, batch_size: int):
"""Evaluates an epoch."""
num_samples = 0.
summed_scalars = None
params = helpers.get_first(self._byol_state.online_params)
state = helpers.get_first(self._byol_state.online_state)
split = dataset.Split.from_string(subset)
dataset_iterator = dataset.load(
split,
preprocess_mode=dataset.PreprocessMode.EVAL,
transpose=self._should_transpose_images(),
batch_dims=[batch_size])
for inputs in dataset_iterator:
num_samples += inputs['labels'].shape[0]
scalars = self.eval_batch_jit(params, state, inputs)
# Accumulate the sum of scalars for each step.
scalars = jax.tree_map(lambda x: jnp.sum(x, axis=0), scalars)
if summed_scalars is None:
summed_scalars = scalars
else:
summed_scalars = jax.tree_map(jnp.add, summed_scalars, scalars)
mean_scalars = jax.tree_map(lambda x: x / num_samples, summed_scalars)
return mean_scalars
def evaluate(self, global_step, **unused_args):
"""Thin wrapper around _eval_epoch."""
global_step = np.array(helpers.get_first(global_step))
scalars = jax.device_get(self._eval_epoch(**self._evaluation_config))
logging.info('[Step %d] Eval scalars: %s', global_step, scalars)
return scalars
| deepmind-research-master | byol/byol_experiment.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for BYOL's main training loop."""
from absl import flags
from absl.testing import absltest
import tensorflow_datasets as tfds
from byol import byol_experiment
from byol import eval_experiment
from byol import main_loop
from byol.configs import byol as byol_config
from byol.configs import eval as eval_config
FLAGS = flags.FLAGS
class MainLoopTest(absltest.TestCase):
def test_pretrain(self):
config = byol_config.get_config(num_epochs=40, batch_size=4)
temp_dir = self.create_tempdir().full_path
# Override some config fields to make test lighter.
config['network_config']['encoder_class'] = 'TinyResNet'
config['network_config']['projector_hidden_size'] = 256
config['network_config']['predictor_hidden_size'] = 256
config['checkpointing_config']['checkpoint_dir'] = temp_dir
config['evaluation_config']['batch_size'] = 16
config['max_steps'] = 16
with tfds.testing.mock_data(num_examples=64):
experiment_class = byol_experiment.ByolExperiment
main_loop.train_loop(experiment_class, config)
main_loop.eval_loop(experiment_class, config)
def test_linear_eval(self):
config = eval_config.get_config(checkpoint_to_evaluate=None, batch_size=4)
temp_dir = self.create_tempdir().full_path
# Override some config fields to make test lighter.
config['network_config']['encoder_class'] = 'TinyResNet'
config['allow_train_from_scratch'] = True
config['checkpointing_config']['checkpoint_dir'] = temp_dir
config['evaluation_config']['batch_size'] = 16
config['max_steps'] = 16
with tfds.testing.mock_data(num_examples=64):
experiment_class = eval_experiment.EvalExperiment
main_loop.train_loop(experiment_class, config)
main_loop.eval_loop(experiment_class, config)
def test_pipeline(self):
b_config = byol_config.get_config(num_epochs=40, batch_size=4)
temp_dir = self.create_tempdir().full_path
# Override some config fields to make test lighter.
b_config['network_config']['encoder_class'] = 'TinyResNet'
b_config['network_config']['projector_hidden_size'] = 256
b_config['network_config']['predictor_hidden_size'] = 256
b_config['checkpointing_config']['checkpoint_dir'] = temp_dir
b_config['evaluation_config']['batch_size'] = 16
b_config['max_steps'] = 16
with tfds.testing.mock_data(num_examples=64):
main_loop.train_loop(byol_experiment.ByolExperiment, b_config)
e_config = eval_config.get_config(
checkpoint_to_evaluate=f'{temp_dir}/pretrain.pkl',
batch_size=4)
# Override some config fields to make test lighter.
e_config['network_config']['encoder_class'] = 'TinyResNet'
e_config['allow_train_from_scratch'] = True
e_config['checkpointing_config']['checkpoint_dir'] = temp_dir
e_config['evaluation_config']['batch_size'] = 16
e_config['max_steps'] = 16
with tfds.testing.mock_data(num_examples=64):
main_loop.train_loop(eval_experiment.EvalExperiment, e_config)
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | byol/main_loop_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup BYOL for pip package."""
import setuptools
setuptools.setup(
name='byol',
description='Bootstrap Your Own Latents',
long_description=open('README.md').read(),
author='DeepMind',
author_email='[email protected]',
url='https://github.com/deepmind/deepmind-research/byol',
install_requires=[
'chex',
'dm-acme',
'dm-haiku',
'dm-tree',
'jax',
'jaxlib',
'numpy>=1.16',
'optax',
'tensorflow',
'tensorflow-datasets',
],
package_dir={'byol': ''},
py_modules=[
'byol.byol_experiment', 'byol.eval_experiment', 'byol.main_loop'
],
packages=['byol.configs', 'byol.utils'])
| deepmind-research-master | byol/setup.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linear evaluation or fine-tuning pipeline.
Use this experiment to evaluate a checkpoint from byol_experiment.
"""
import functools
from typing import Any, Generator, Mapping, NamedTuple, Optional, Text, Tuple, Union
from absl import logging
from acme.jax import utils as acme_utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
from byol.utils import checkpointing
from byol.utils import dataset
from byol.utils import helpers
from byol.utils import networks
from byol.utils import schedules
# Type declarations.
OptState = Tuple[optax.TraceState, optax.ScaleByScheduleState, optax.ScaleState]
LogsDict = Mapping[Text, jnp.ndarray]
class _EvalExperimentState(NamedTuple):
backbone_params: hk.Params
classif_params: hk.Params
backbone_state: hk.State
backbone_opt_state: Union[None, OptState]
classif_opt_state: OptState
class EvalExperiment:
"""Linear evaluation experiment."""
def __init__(
self,
random_seed: int,
num_classes: int,
batch_size: int,
max_steps: int,
enable_double_transpose: bool,
checkpoint_to_evaluate: Optional[Text],
allow_train_from_scratch: bool,
freeze_backbone: bool,
network_config: Mapping[Text, Any],
optimizer_config: Mapping[Text, Any],
lr_schedule_config: Mapping[Text, Any],
evaluation_config: Mapping[Text, Any],
checkpointing_config: Mapping[Text, Any]):
"""Constructs the experiment.
Args:
random_seed: the random seed to use when initializing network weights.
num_classes: the number of classes; used for the online evaluation.
batch_size: the total batch size; should be a multiple of the number of
available accelerators.
max_steps: the number of training steps; used for the lr/target network
ema schedules.
enable_double_transpose: see dataset.py; only has effect on TPU.
checkpoint_to_evaluate: the path to the checkpoint to evaluate.
allow_train_from_scratch: whether to allow training without specifying a
checkpoint to evaluate (training from scratch).
freeze_backbone: whether the backbone resnet should remain frozen (linear
evaluation) or be trainable (fine-tuning).
network_config: the configuration for the network.
optimizer_config: the configuration for the optimizer.
lr_schedule_config: the configuration for the learning rate schedule.
evaluation_config: the evaluation configuration.
checkpointing_config: the configuration for checkpointing.
"""
self._random_seed = random_seed
self._enable_double_transpose = enable_double_transpose
self._num_classes = num_classes
self._lr_schedule_config = lr_schedule_config
self._batch_size = batch_size
self._max_steps = max_steps
self._checkpoint_to_evaluate = checkpoint_to_evaluate
self._allow_train_from_scratch = allow_train_from_scratch
self._freeze_backbone = freeze_backbone
self._optimizer_config = optimizer_config
self._evaluation_config = evaluation_config
# Checkpointed experiment state.
self._experiment_state = None
# Input pipelines.
self._train_input = None
self._eval_input = None
backbone_fn = functools.partial(self._backbone_fn, **network_config)
self.forward_backbone = hk.without_apply_rng(
hk.transform_with_state(backbone_fn))
self.forward_classif = hk.without_apply_rng(hk.transform(self._classif_fn))
self.update_pmap = jax.pmap(self._update_func, axis_name='i')
self.eval_batch_jit = jax.jit(self._eval_batch)
self._is_backbone_training = not self._freeze_backbone
self._checkpointer = checkpointing.Checkpointer(**checkpointing_config)
def _should_transpose_images(self):
"""Should we transpose images (saves host-to-device time on TPUs)."""
return (self._enable_double_transpose and
jax.local_devices()[0].platform == 'tpu')
def _backbone_fn(
self,
inputs: dataset.Batch,
encoder_class: Text,
encoder_config: Mapping[Text, Any],
bn_decay_rate: float,
is_training: bool,
) -> jnp.ndarray:
"""Forward of the encoder (backbone)."""
bn_config = {'decay_rate': bn_decay_rate}
encoder = getattr(networks, encoder_class)
model = encoder(
None,
bn_config=bn_config,
**encoder_config)
if self._should_transpose_images():
inputs = dataset.transpose_images(inputs)
images = dataset.normalize_images(inputs['images'])
return model(images, is_training=is_training)
def _classif_fn(
self,
embeddings: jnp.ndarray,
) -> jnp.ndarray:
classifier = hk.Linear(output_size=self._num_classes)
return classifier(embeddings)
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(self, *,
global_step: jnp.ndarray,
rng: jnp.ndarray) -> Mapping[Text, np.ndarray]:
"""Performs a single training step."""
if self._train_input is None:
self._initialize_train(rng)
inputs = next(self._train_input)
self._experiment_state, scalars = self.update_pmap(
self._experiment_state, global_step, inputs)
scalars = helpers.get_first(scalars)
return scalars
def save_checkpoint(self, step: int, rng: jnp.ndarray):
self._checkpointer.maybe_save_checkpoint(
self._experiment_state, step=step, rng=rng,
is_final=step >= self._max_steps)
def load_checkpoint(self) -> Union[Tuple[int, jnp.ndarray], None]:
checkpoint_data = self._checkpointer.maybe_load_checkpoint()
if checkpoint_data is None:
return None
self._experiment_state, step, rng = checkpoint_data
return step, rng
def _initialize_train(self, rng):
"""BYOL's _ExperimentState initialization.
Args:
rng: random number generator used to initialize parameters. If working in
a multi device setup, this need to be a ShardedArray.
dummy_input: a dummy image, used to compute intermediate outputs shapes.
Returns:
Initial EvalExperiment state.
Raises:
RuntimeError: invalid or empty checkpoint.
"""
self._train_input = acme_utils.prefetch(self._build_train_input())
# Check we haven't already restored params
if self._experiment_state is None:
inputs = next(self._train_input)
if self._checkpoint_to_evaluate is not None:
# Load params from checkpoint
checkpoint_data = checkpointing.load_checkpoint(
self._checkpoint_to_evaluate)
if checkpoint_data is None:
raise RuntimeError('Invalid checkpoint.')
backbone_params = checkpoint_data['experiment_state'].online_params
backbone_state = checkpoint_data['experiment_state'].online_state
backbone_params = helpers.bcast_local_devices(backbone_params)
backbone_state = helpers.bcast_local_devices(backbone_state)
else:
if not self._allow_train_from_scratch:
raise ValueError(
'No checkpoint specified, but `allow_train_from_scratch` '
'set to False')
# Initialize with random parameters
logging.info(
'No checkpoint specified, initializing the networks from scratch '
'(dry run mode)')
backbone_params, backbone_state = jax.pmap(
functools.partial(self.forward_backbone.init, is_training=True),
axis_name='i')(rng=rng, inputs=inputs)
init_experiment = jax.pmap(self._make_initial_state, axis_name='i')
# Init uses the same RNG key on all hosts+devices to ensure everyone
# computes the same initial state and parameters.
init_rng = jax.random.PRNGKey(self._random_seed)
init_rng = helpers.bcast_local_devices(init_rng)
self._experiment_state = init_experiment(
rng=init_rng,
dummy_input=inputs,
backbone_params=backbone_params,
backbone_state=backbone_state)
# Clear the backbone optimizer's state when the backbone is frozen.
if self._freeze_backbone:
self._experiment_state = _EvalExperimentState(
backbone_params=self._experiment_state.backbone_params,
classif_params=self._experiment_state.classif_params,
backbone_state=self._experiment_state.backbone_state,
backbone_opt_state=None,
classif_opt_state=self._experiment_state.classif_opt_state,
)
def _make_initial_state(
self,
rng: jnp.ndarray,
dummy_input: dataset.Batch,
backbone_params: hk.Params,
backbone_state: hk.Params,
) -> _EvalExperimentState:
"""_EvalExperimentState initialization."""
# Initialize the backbone params
# Always create the batchnorm weights (is_training=True), they will be
# overwritten when loading the checkpoint.
embeddings, _ = self.forward_backbone.apply(
backbone_params, backbone_state, dummy_input, is_training=True)
backbone_opt_state = self._optimizer(0.).init(backbone_params)
# Initialize the classifier params and optimizer_state
classif_params = self.forward_classif.init(rng, embeddings)
classif_opt_state = self._optimizer(0.).init(classif_params)
return _EvalExperimentState(
backbone_params=backbone_params,
classif_params=classif_params,
backbone_state=backbone_state,
backbone_opt_state=backbone_opt_state,
classif_opt_state=classif_opt_state,
)
def _build_train_input(self) -> Generator[dataset.Batch, None, None]:
"""See base class."""
num_devices = jax.device_count()
global_batch_size = self._batch_size
per_device_batch_size, ragged = divmod(global_batch_size, num_devices)
if ragged:
raise ValueError(
f'Global batch size {global_batch_size} must be divisible by '
f'num devices {num_devices}')
return dataset.load(
dataset.Split.TRAIN_AND_VALID,
preprocess_mode=dataset.PreprocessMode.LINEAR_TRAIN,
transpose=self._should_transpose_images(),
batch_dims=[jax.local_device_count(), per_device_batch_size])
def _optimizer(self, learning_rate: float):
"""Build optimizer from config."""
return optax.sgd(learning_rate, **self._optimizer_config)
def _loss_fn(
self,
backbone_params: hk.Params,
classif_params: hk.Params,
backbone_state: hk.State,
inputs: dataset.Batch,
) -> Tuple[jnp.ndarray, Tuple[jnp.ndarray, hk.State]]:
"""Compute the classification loss function.
Args:
backbone_params: parameters of the encoder network.
classif_params: parameters of the linear classifier.
backbone_state: internal state of encoder network.
inputs: inputs, containing `images` and `labels`.
Returns:
The classification loss and various logs.
"""
embeddings, backbone_state = self.forward_backbone.apply(
backbone_params,
backbone_state,
inputs,
is_training=not self._freeze_backbone)
logits = self.forward_classif.apply(classif_params, embeddings)
labels = hk.one_hot(inputs['labels'], self._num_classes)
loss = helpers.softmax_cross_entropy(logits, labels, reduction='mean')
scaled_loss = loss / jax.device_count()
return scaled_loss, (loss, backbone_state)
def _update_func(
self,
experiment_state: _EvalExperimentState,
global_step: jnp.ndarray,
inputs: dataset.Batch,
) -> Tuple[_EvalExperimentState, LogsDict]:
"""Applies an update to parameters and returns new state."""
# This function computes the gradient of the first output of loss_fn and
# passes through the other arguments unchanged.
# Gradient of the first output of _loss_fn wrt the backbone (arg 0) and the
# classifier parameters (arg 1). The auxiliary outputs are returned as-is.
grad_loss_fn = jax.grad(self._loss_fn, has_aux=True, argnums=(0, 1))
grads, aux_outputs = grad_loss_fn(
experiment_state.backbone_params,
experiment_state.classif_params,
experiment_state.backbone_state,
inputs,
)
backbone_grads, classifier_grads = grads
train_loss, new_backbone_state = aux_outputs
classifier_grads = jax.lax.psum(classifier_grads, axis_name='i')
# Compute the decayed learning rate
learning_rate = schedules.learning_schedule(
global_step,
batch_size=self._batch_size,
total_steps=self._max_steps,
**self._lr_schedule_config)
# Compute and apply updates via our optimizer.
classif_updates, new_classif_opt_state = \
self._optimizer(learning_rate).update(
classifier_grads,
experiment_state.classif_opt_state)
new_classif_params = optax.apply_updates(experiment_state.classif_params,
classif_updates)
if self._freeze_backbone:
del backbone_grads, new_backbone_state # Unused
# The backbone is not updated.
new_backbone_params = experiment_state.backbone_params
new_backbone_opt_state = None
new_backbone_state = experiment_state.backbone_state
else:
backbone_grads = jax.lax.psum(backbone_grads, axis_name='i')
# Compute and apply updates via our optimizer.
backbone_updates, new_backbone_opt_state = \
self._optimizer(learning_rate).update(
backbone_grads,
experiment_state.backbone_opt_state)
new_backbone_params = optax.apply_updates(
experiment_state.backbone_params, backbone_updates)
experiment_state = _EvalExperimentState(
new_backbone_params,
new_classif_params,
new_backbone_state,
new_backbone_opt_state,
new_classif_opt_state,
)
# Scalars to log (note: we log the mean across all hosts/devices).
scalars = {'train_loss': train_loss}
scalars = jax.lax.pmean(scalars, axis_name='i')
return experiment_state, scalars
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(self, global_step, **unused_args):
"""See base class."""
global_step = np.array(helpers.get_first(global_step))
scalars = jax.device_get(self._eval_epoch(**self._evaluation_config))
logging.info('[Step %d] Eval scalars: %s', global_step, scalars)
return scalars
def _eval_batch(
self,
backbone_params: hk.Params,
classif_params: hk.Params,
backbone_state: hk.State,
inputs: dataset.Batch,
) -> LogsDict:
"""Evaluates a batch."""
embeddings, backbone_state = self.forward_backbone.apply(
backbone_params, backbone_state, inputs, is_training=False)
logits = self.forward_classif.apply(classif_params, embeddings)
labels = hk.one_hot(inputs['labels'], self._num_classes)
loss = helpers.softmax_cross_entropy(logits, labels, reduction=None)
top1_correct = helpers.topk_accuracy(logits, inputs['labels'], topk=1)
top5_correct = helpers.topk_accuracy(logits, inputs['labels'], topk=5)
# NOTE: Returned values will be summed and finally divided by num_samples.
return {
'eval_loss': loss,
'top1_accuracy': top1_correct,
'top5_accuracy': top5_correct
}
def _eval_epoch(self, subset: Text, batch_size: int):
"""Evaluates an epoch."""
num_samples = 0.
summed_scalars = None
backbone_params = helpers.get_first(self._experiment_state.backbone_params)
classif_params = helpers.get_first(self._experiment_state.classif_params)
backbone_state = helpers.get_first(self._experiment_state.backbone_state)
split = dataset.Split.from_string(subset)
dataset_iterator = dataset.load(
split,
preprocess_mode=dataset.PreprocessMode.EVAL,
transpose=self._should_transpose_images(),
batch_dims=[batch_size])
for inputs in dataset_iterator:
num_samples += inputs['labels'].shape[0]
scalars = self.eval_batch_jit(
backbone_params,
classif_params,
backbone_state,
inputs,
)
# Accumulate the sum of scalars for each step.
scalars = jax.tree_map(lambda x: jnp.sum(x, axis=0), scalars)
if summed_scalars is None:
summed_scalars = scalars
else:
summed_scalars = jax.tree_map(jnp.add, summed_scalars, scalars)
mean_scalars = jax.tree_map(lambda x: x / num_samples, summed_scalars)
return mean_scalars
| deepmind-research-master | byol/eval_experiment.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| deepmind-research-master | byol/utils/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of LARS Optimizer with optax."""
from typing import Any, Callable, List, NamedTuple, Optional, Tuple
import jax
import jax.numpy as jnp
import optax
import tree as nest
# A filter function takes a path and a value as input and outputs True for
# variable to apply update and False not to apply the update
FilterFn = Callable[[Tuple[Any], jnp.ndarray], jnp.ndarray]
def exclude_bias_and_norm(path: Tuple[Any], val: jnp.ndarray) -> jnp.ndarray:
"""Filter to exclude biaises and normalizations weights."""
del val
if path[-1] == "b" or "norm" in path[-2]:
return False
return True
def _partial_update(updates: optax.Updates,
new_updates: optax.Updates,
params: optax.Params,
filter_fn: Optional[FilterFn] = None) -> optax.Updates:
"""Returns new_update for params which filter_fn is True else updates."""
if filter_fn is None:
return new_updates
wrapped_filter_fn = lambda x, y: jnp.array(filter_fn(x, y))
params_to_filter = nest.map_structure_with_path(wrapped_filter_fn, params)
def _update_fn(g: jnp.ndarray, t: jnp.ndarray, m: jnp.ndarray) -> jnp.ndarray:
m = m.astype(g.dtype)
return g * (1. - m) + t * m
return jax.tree_map(_update_fn, updates, new_updates, params_to_filter)
class ScaleByLarsState(NamedTuple):
mu: jnp.ndarray
def scale_by_lars(
momentum: float = 0.9,
eta: float = 0.001,
filter_fn: Optional[FilterFn] = None) -> optax.GradientTransformation:
"""Rescales updates according to the LARS algorithm.
Does not include weight decay.
References:
[You et al, 2017](https://arxiv.org/abs/1708.03888)
Args:
momentum: momentum coeficient.
eta: LARS coefficient.
filter_fn: an optional filter function.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(params: optax.Params) -> ScaleByLarsState:
mu = jax.tree_map(jnp.zeros_like, params) # momentum
return ScaleByLarsState(mu=mu)
def update_fn(updates: optax.Updates, state: ScaleByLarsState,
params: optax.Params) -> Tuple[optax.Updates, ScaleByLarsState]:
def lars_adaptation(
update: jnp.ndarray,
param: jnp.ndarray,
) -> jnp.ndarray:
param_norm = jnp.linalg.norm(param)
update_norm = jnp.linalg.norm(update)
return update * jnp.where(
param_norm > 0.,
jnp.where(update_norm > 0,
(eta * param_norm / update_norm), 1.0), 1.0)
adapted_updates = jax.tree_map(lars_adaptation, updates, params)
adapted_updates = _partial_update(updates, adapted_updates, params,
filter_fn)
mu = jax.tree_map(lambda g, t: momentum * g + t,
state.mu, adapted_updates)
return mu, ScaleByLarsState(mu=mu)
return optax.GradientTransformation(init_fn, update_fn)
class AddWeightDecayState(NamedTuple):
"""Stateless transformation."""
def add_weight_decay(
weight_decay: float,
filter_fn: Optional[FilterFn] = None) -> optax.GradientTransformation:
"""Adds a weight decay to the update.
Args:
weight_decay: weight_decay coeficient.
filter_fn: an optional filter function.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(_) -> AddWeightDecayState:
return AddWeightDecayState()
def update_fn(
updates: optax.Updates,
state: AddWeightDecayState,
params: optax.Params,
) -> Tuple[optax.Updates, AddWeightDecayState]:
new_updates = jax.tree_map(lambda g, p: g + weight_decay * p, updates,
params)
new_updates = _partial_update(updates, new_updates, params, filter_fn)
return new_updates, state
return optax.GradientTransformation(init_fn, update_fn)
LarsState = List # Type for the lars optimizer
def lars(
learning_rate: float,
weight_decay: float = 0.,
momentum: float = 0.9,
eta: float = 0.001,
weight_decay_filter: Optional[FilterFn] = None,
lars_adaptation_filter: Optional[FilterFn] = None,
) -> optax.GradientTransformation:
"""Creates lars optimizer with weight decay.
References:
[You et al, 2017](https://arxiv.org/abs/1708.03888)
Args:
learning_rate: learning rate coefficient.
weight_decay: weight decay coefficient.
momentum: momentum coefficient.
eta: LARS coefficient.
weight_decay_filter: optional filter function to only apply the weight
decay on a subset of parameters. The filter function takes as input the
parameter path (as a tuple) and its associated update, and return a True
for params to apply the weight decay and False for params to not apply
the weight decay. When weight_decay_filter is set to None, the weight
decay is not applied to the bias, i.e. when the variable name is 'b', and
the weight decay is not applied to nornalization params, i.e. the
panultimate path contains 'norm'.
lars_adaptation_filter: similar to weight decay filter but for lars
adaptation
Returns:
An optax.GradientTransformation, i.e. a (init_fn, update_fn) tuple.
"""
if weight_decay_filter is None:
weight_decay_filter = lambda *_: True
if lars_adaptation_filter is None:
lars_adaptation_filter = lambda *_: True
return optax.chain(
add_weight_decay(
weight_decay=weight_decay, filter_fn=weight_decay_filter),
scale_by_lars(
momentum=momentum, eta=eta, filter_fn=lars_adaptation_filter),
optax.scale(-learning_rate),
)
| deepmind-research-master | byol/utils/optimizers.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet dataset with typical pre-processing."""
import enum
from typing import Generator, Mapping, Optional, Sequence, Text, Tuple
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
Batch = Mapping[Text, np.ndarray]
class Split(enum.Enum):
"""Imagenet dataset split."""
TRAIN = 1
TRAIN_AND_VALID = 2
VALID = 3
TEST = 4
@classmethod
def from_string(cls, name: Text) -> 'Split':
return {
'TRAIN': Split.TRAIN,
'TRAIN_AND_VALID': Split.TRAIN_AND_VALID,
'VALID': Split.VALID,
'VALIDATION': Split.VALID,
'TEST': Split.TEST
}[name.upper()]
@property
def num_examples(self):
return {
Split.TRAIN_AND_VALID: 1281167,
Split.TRAIN: 1271167,
Split.VALID: 10000,
Split.TEST: 50000
}[self]
class PreprocessMode(enum.Enum):
"""Preprocessing modes for the dataset."""
PRETRAIN = 1 # Generates two augmented views (random crop + augmentations).
LINEAR_TRAIN = 2 # Generates a single random crop.
EVAL = 3 # Generates a single center crop.
def normalize_images(images: jnp.ndarray) -> jnp.ndarray:
"""Normalize the image using ImageNet statistics."""
mean_rgb = (0.485, 0.456, 0.406)
stddev_rgb = (0.229, 0.224, 0.225)
normed_images = images - jnp.array(mean_rgb).reshape((1, 1, 1, 3))
normed_images = normed_images / jnp.array(stddev_rgb).reshape((1, 1, 1, 3))
return normed_images
def load(split: Split,
*,
preprocess_mode: PreprocessMode,
batch_dims: Sequence[int],
transpose: bool = False,
allow_caching: bool = False) -> Generator[Batch, None, None]:
"""Loads the given split of the dataset."""
start, end = _shard(split, jax.host_id(), jax.host_count())
total_batch_size = np.prod(batch_dims)
tfds_split = tfds.core.ReadInstruction(
_to_tfds_split(split), from_=start, to=end, unit='abs')
ds = tfds.load(
'imagenet2012:5.*.*',
split=tfds_split,
decoders={'image': tfds.decode.SkipDecoding()})
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = 48
options.experimental_threading.max_intra_op_parallelism = 1
if preprocess_mode is not PreprocessMode.EVAL:
options.experimental_deterministic = False
if jax.host_count() > 1 and allow_caching:
# Only cache if we are reading a subset of the dataset.
ds = ds.cache()
ds = ds.repeat()
ds = ds.shuffle(buffer_size=10 * total_batch_size, seed=0)
else:
if split.num_examples % total_batch_size != 0:
raise ValueError(f'Test/valid must be divisible by {total_batch_size}')
ds = ds.with_options(options)
def preprocess_pretrain(example):
view1 = _preprocess_image(example['image'], mode=preprocess_mode)
view2 = _preprocess_image(example['image'], mode=preprocess_mode)
label = tf.cast(example['label'], tf.int32)
return {'view1': view1, 'view2': view2, 'labels': label}
def preprocess_linear_train(example):
image = _preprocess_image(example['image'], mode=preprocess_mode)
label = tf.cast(example['label'], tf.int32)
return {'images': image, 'labels': label}
def preprocess_eval(example):
image = _preprocess_image(example['image'], mode=preprocess_mode)
label = tf.cast(example['label'], tf.int32)
return {'images': image, 'labels': label}
if preprocess_mode is PreprocessMode.PRETRAIN:
ds = ds.map(
preprocess_pretrain, num_parallel_calls=tf.data.experimental.AUTOTUNE)
elif preprocess_mode is PreprocessMode.LINEAR_TRAIN:
ds = ds.map(
preprocess_linear_train,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
else:
ds = ds.map(
preprocess_eval, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def transpose_fn(batch):
# We use the double-transpose-trick to improve performance for TPUs. Note
# that this (typically) requires a matching HWCN->NHWC transpose in your
# model code. The compiler cannot make this optimization for us since our
# data pipeline and model are compiled separately.
batch = dict(**batch)
if preprocess_mode is PreprocessMode.PRETRAIN:
batch['view1'] = tf.transpose(batch['view1'], (1, 2, 3, 0))
batch['view2'] = tf.transpose(batch['view2'], (1, 2, 3, 0))
else:
batch['images'] = tf.transpose(batch['images'], (1, 2, 3, 0))
return batch
for i, batch_size in enumerate(reversed(batch_dims)):
ds = ds.batch(batch_size)
if i == 0 and transpose:
ds = ds.map(transpose_fn) # NHWC -> HWCN
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
yield from tfds.as_numpy(ds)
def _to_tfds_split(split: Split) -> tfds.Split:
"""Returns the TFDS split appropriately sharded."""
# NOTE: Imagenet did not release labels for the test split used in the
# competition, we consider the VALID split the TEST split and reserve
# 10k images from TRAIN for VALID.
if split in (Split.TRAIN, Split.TRAIN_AND_VALID, Split.VALID):
return tfds.Split.TRAIN
else:
assert split == Split.TEST
return tfds.Split.VALIDATION
def _shard(split: Split, shard_index: int, num_shards: int) -> Tuple[int, int]:
"""Returns [start, end) for the given shard index."""
assert shard_index < num_shards
arange = np.arange(split.num_examples)
shard_range = np.array_split(arange, num_shards)[shard_index]
start, end = shard_range[0], (shard_range[-1] + 1)
if split == Split.TRAIN:
# Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000].
offset = Split.VALID.num_examples
start += offset
end += offset
return start, end
def _preprocess_image(
image_bytes: tf.Tensor,
mode: PreprocessMode,
) -> tf.Tensor:
"""Returns processed and resized images."""
if mode is PreprocessMode.PRETRAIN:
image = _decode_and_random_crop(image_bytes)
# Random horizontal flipping is optionally done in augmentations.preprocess.
elif mode is PreprocessMode.LINEAR_TRAIN:
image = _decode_and_random_crop(image_bytes)
image = tf.image.random_flip_left_right(image)
else:
image = _decode_and_center_crop(image_bytes)
# NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without
# clamping overshoots. This means values returned will be outside the range
# [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]).
assert image.dtype == tf.uint8
image = tf.image.resize(image, [224, 224], tf.image.ResizeMethod.BICUBIC)
image = tf.clip_by_value(image / 255., 0., 1.)
return image
def _decode_and_random_crop(image_bytes: tf.Tensor) -> tf.Tensor:
"""Make a random crop of 224."""
img_size = tf.image.extract_jpeg_shape(image_bytes)
area = tf.cast(img_size[1] * img_size[0], tf.float32)
target_area = tf.random.uniform([], 0.08, 1.0, dtype=tf.float32) * area
log_ratio = (tf.math.log(3 / 4), tf.math.log(4 / 3))
aspect_ratio = tf.math.exp(
tf.random.uniform([], *log_ratio, dtype=tf.float32))
w = tf.cast(tf.round(tf.sqrt(target_area * aspect_ratio)), tf.int32)
h = tf.cast(tf.round(tf.sqrt(target_area / aspect_ratio)), tf.int32)
w = tf.minimum(w, img_size[1])
h = tf.minimum(h, img_size[0])
offset_w = tf.random.uniform((),
minval=0,
maxval=img_size[1] - w + 1,
dtype=tf.int32)
offset_h = tf.random.uniform((),
minval=0,
maxval=img_size[0] - h + 1,
dtype=tf.int32)
crop_window = tf.stack([offset_h, offset_w, h, w])
image = tf.io.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def transpose_images(batch: Batch):
"""Transpose images for TPU training.."""
new_batch = dict(batch) # Avoid mutating in place.
if 'images' in batch:
new_batch['images'] = jnp.transpose(batch['images'], (3, 0, 1, 2))
else:
new_batch['view1'] = jnp.transpose(batch['view1'], (3, 0, 1, 2))
new_batch['view2'] = jnp.transpose(batch['view2'], (3, 0, 1, 2))
return new_batch
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
) -> tf.Tensor:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
padded_center_crop_size = tf.cast(
((224 / (224 + 32)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([
offset_height, offset_width, padded_center_crop_size,
padded_center_crop_size
])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
| deepmind-research-master | byol/utils/dataset.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks used in BYOL."""
from typing import Any, Mapping, Optional, Sequence, Text
import haiku as hk
import jax
import jax.numpy as jnp
class MLP(hk.Module):
"""One hidden layer perceptron, with normalization."""
def __init__(
self,
name: Text,
hidden_size: int,
output_size: int,
bn_config: Mapping[Text, Any],
):
super().__init__(name=name)
self._hidden_size = hidden_size
self._output_size = output_size
self._bn_config = bn_config
def __call__(self, inputs: jnp.ndarray, is_training: bool) -> jnp.ndarray:
out = hk.Linear(output_size=self._hidden_size, with_bias=True)(inputs)
out = hk.BatchNorm(**self._bn_config)(out, is_training=is_training)
out = jax.nn.relu(out)
out = hk.Linear(output_size=self._output_size, with_bias=False)(out)
return out
def check_length(length, value, name):
if len(value) != length:
raise ValueError(f'`{name}` must be of length 4 not {len(value)}')
class ResNetTorso(hk.Module):
"""ResNet model."""
def __init__(
self,
blocks_per_group: Sequence[int],
num_classes: Optional[int] = None,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
bottleneck: bool = True,
channels_per_group: Sequence[int] = (256, 512, 1024, 2048),
use_projection: Sequence[bool] = (True, True, True, True),
width_multiplier: int = 1,
name: Optional[str] = None,
):
"""Constructs a ResNet model.
Args:
blocks_per_group: A sequence of length 4 that indicates the number of
blocks created in each group.
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of three elements, `decay_rate`, `eps`, and
`cross_replica_axis`, to be passed on to the `BatchNorm` layers. By
default the `decay_rate` is `0.9` and `eps` is `1e-5`, and the axis is
`None`.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
False.
bottleneck: Whether the block should bottleneck or not. Defaults to True.
channels_per_group: A sequence of length 4 that indicates the number
of channels used for each block in each group.
use_projection: A sequence of length 4 that indicates whether each
residual block should use projection.
width_multiplier: An integer multiplying the number of channels per group.
name: Name of the module.
"""
super().__init__(name=name)
self.resnet_v2 = resnet_v2
bn_config = dict(bn_config or {})
bn_config.setdefault('decay_rate', 0.9)
bn_config.setdefault('eps', 1e-5)
bn_config.setdefault('create_scale', True)
bn_config.setdefault('create_offset', True)
# Number of blocks in each group for ResNet.
check_length(4, blocks_per_group, 'blocks_per_group')
check_length(4, channels_per_group, 'channels_per_group')
self.initial_conv = hk.Conv2D(
output_channels=64 * width_multiplier,
kernel_shape=7,
stride=2,
with_bias=False,
padding='SAME',
name='initial_conv')
if not self.resnet_v2:
self.initial_batchnorm = hk.BatchNorm(name='initial_batchnorm',
**bn_config)
self.block_groups = []
strides = (1, 2, 2, 2)
for i in range(4):
self.block_groups.append(
hk.nets.ResNet.BlockGroup(
channels=width_multiplier * channels_per_group[i],
num_blocks=blocks_per_group[i],
stride=strides[i],
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=bottleneck,
use_projection=use_projection[i],
name='block_group_%d' % (i)))
if self.resnet_v2:
self.final_batchnorm = hk.BatchNorm(name='final_batchnorm', **bn_config)
self.logits = hk.Linear(num_classes, w_init=jnp.zeros, name='logits')
def __call__(self, inputs, is_training, test_local_stats=False):
out = inputs
out = self.initial_conv(out)
if not self.resnet_v2:
out = self.initial_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = hk.max_pool(out,
window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1),
padding='SAME')
for block_group in self.block_groups:
out = block_group(out, is_training, test_local_stats)
if self.resnet_v2:
out = self.final_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = jnp.mean(out, axis=[1, 2])
return out
class TinyResNet(ResNetTorso):
"""Tiny resnet for local runs and tests."""
def __init__(self,
num_classes: Optional[int] = None,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
width_multiplier: int = 1,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, `decay_rate` and `eps` to be
passed on to the `BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to False.
width_multiplier: An integer multiplying the number of channels per group.
name: Name of the module.
"""
super().__init__(blocks_per_group=(1, 1, 1, 1),
channels_per_group=(8, 8, 8, 8),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=False,
width_multiplier=width_multiplier,
name=name)
class ResNet18(ResNetTorso):
"""ResNet18."""
def __init__(self,
num_classes: Optional[int] = None,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
width_multiplier: int = 1,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, `decay_rate` and `eps` to be
passed on to the `BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to False.
width_multiplier: An integer multiplying the number of channels per group.
name: Name of the module.
"""
super().__init__(blocks_per_group=(2, 2, 2, 2),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=False,
channels_per_group=(64, 128, 256, 512),
width_multiplier=width_multiplier,
name=name)
class ResNet34(ResNetTorso):
"""ResNet34."""
def __init__(self,
num_classes: Optional[int],
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
width_multiplier: int = 1,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, `decay_rate` and `eps` to be
passed on to the `BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to False.
width_multiplier: An integer multiplying the number of channels per group.
name: Name of the module.
"""
super().__init__(blocks_per_group=(3, 4, 6, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=False,
channels_per_group=(64, 128, 256, 512),
width_multiplier=width_multiplier,
name=name)
class ResNet50(ResNetTorso):
"""ResNet50."""
def __init__(self,
num_classes: Optional[int] = None,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
width_multiplier: int = 1,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, `decay_rate` and `eps` to be
passed on to the `BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to False.
width_multiplier: An integer multiplying the number of channels per group.
name: Name of the module.
"""
super().__init__(blocks_per_group=(3, 4, 6, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
width_multiplier=width_multiplier,
name=name)
class ResNet101(ResNetTorso):
"""ResNet101."""
def __init__(self,
num_classes: Optional[int],
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
width_multiplier: int = 1,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, `decay_rate` and `eps` to be
passed on to the `BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to False.
width_multiplier: An integer multiplying the number of channels per group.
name: Name of the module.
"""
super().__init__(blocks_per_group=(3, 4, 23, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
width_multiplier=width_multiplier,
name=name)
class ResNet152(ResNetTorso):
"""ResNet152."""
def __init__(self,
num_classes: Optional[int],
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
width_multiplier: int = 1,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, `decay_rate` and `eps` to be
passed on to the `BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to False.
width_multiplier: An integer multiplying the number of channels per group.
name: Name of the module.
"""
super().__init__(blocks_per_group=(3, 8, 36, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
width_multiplier=width_multiplier,
name=name)
class ResNet200(ResNetTorso):
"""ResNet200."""
def __init__(self,
num_classes: Optional[int],
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
width_multiplier: int = 1,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, `decay_rate` and `eps` to be
passed on to the `BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to False.
width_multiplier: An integer multiplying the number of channels per group.
name: Name of the module.
"""
super().__init__(blocks_per_group=(3, 24, 36, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
width_multiplier=width_multiplier,
name=name)
| deepmind-research-master | byol/utils/networks.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
from typing import Optional, Text
from absl import logging
import jax
import jax.numpy as jnp
def topk_accuracy(
logits: jnp.ndarray,
labels: jnp.ndarray,
topk: int,
ignore_label_above: Optional[int] = None,
) -> jnp.ndarray:
"""Top-num_codes accuracy."""
assert len(labels.shape) == 1, 'topk expects 1d int labels.'
assert len(logits.shape) == 2, 'topk expects 2d logits.'
if ignore_label_above is not None:
logits = logits[labels < ignore_label_above, :]
labels = labels[labels < ignore_label_above]
prds = jnp.argsort(logits, axis=1)[:, ::-1]
prds = prds[:, :topk]
total = jnp.any(prds == jnp.tile(labels[:, jnp.newaxis], [1, topk]), axis=1)
return total
def softmax_cross_entropy(
logits: jnp.ndarray,
labels: jnp.ndarray,
reduction: Optional[Text] = 'mean',
) -> jnp.ndarray:
"""Computes softmax cross entropy given logits and one-hot class labels.
Args:
logits: Logit output values.
labels: Ground truth one-hot-encoded labels.
reduction: Type of reduction to apply to loss.
Returns:
Loss value. If `reduction` is `none`, this has the same shape as `labels`;
otherwise, it is scalar.
Raises:
ValueError: If the type of `reduction` is unsupported.
"""
loss = -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1)
if reduction == 'sum':
return jnp.sum(loss)
elif reduction == 'mean':
return jnp.mean(loss)
elif reduction == 'none' or reduction is None:
return loss
else:
raise ValueError(f'Incorrect reduction mode {reduction}')
def l2_normalize(
x: jnp.ndarray,
axis: Optional[int] = None,
epsilon: float = 1e-12,
) -> jnp.ndarray:
"""l2 normalize a tensor on an axis with numerical stability."""
square_sum = jnp.sum(jnp.square(x), axis=axis, keepdims=True)
x_inv_norm = jax.lax.rsqrt(jnp.maximum(square_sum, epsilon))
return x * x_inv_norm
def l2_weight_regularizer(params):
"""Helper to do lasso on weights.
Args:
params: the entire param set.
Returns:
Scalar of the l2 norm of the weights.
"""
l2_norm = 0.
for mod_name, mod_params in params.items():
if 'norm' not in mod_name:
for param_k, param_v in mod_params.items():
if param_k != 'b' not in param_k: # Filter out biases
l2_norm += jnp.sum(jnp.square(param_v))
else:
logging.warning('Excluding %s/%s from optimizer weight decay!',
mod_name, param_k)
else:
logging.warning('Excluding %s from optimizer weight decay!', mod_name)
return 0.5 * l2_norm
def regression_loss(x: jnp.ndarray, y: jnp.ndarray) -> jnp.ndarray:
"""Byol's regression loss. This is a simple cosine similarity."""
normed_x, normed_y = l2_normalize(x, axis=-1), l2_normalize(y, axis=-1)
return jnp.sum((normed_x - normed_y)**2, axis=-1)
def bcast_local_devices(value):
"""Broadcasts an object to all local devices."""
devices = jax.local_devices()
def _replicate(x):
"""Replicate an object on each device."""
x = jnp.array(x)
return jax.device_put_sharded(len(devices) * [x], devices)
return jax.tree_util.tree_map(_replicate, value)
def get_first(xs):
"""Gets values from the first device."""
return jax.tree_map(lambda x: x[0], xs)
| deepmind-research-master | byol/utils/helpers.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data preprocessing and augmentation."""
import functools
from typing import Any, Mapping, Text
import jax
import jax.numpy as jnp
# typing
JaxBatch = Mapping[Text, jnp.ndarray]
ConfigDict = Mapping[Text, Any]
augment_config = dict(
view1=dict(
random_flip=True, # Random left/right flip
color_transform=dict(
apply_prob=1.0,
# Range of jittering
brightness=0.4,
contrast=0.4,
saturation=0.2,
hue=0.1,
# Probability of applying color jittering
color_jitter_prob=0.8,
# Probability of converting to grayscale
to_grayscale_prob=0.2,
# Shuffle the order of color transforms
shuffle=True),
gaussian_blur=dict(
apply_prob=1.0,
# Kernel size ~ image_size / blur_divider
blur_divider=10.,
# Kernel distribution
sigma_min=0.1,
sigma_max=2.0),
solarize=dict(apply_prob=0.0, threshold=0.5),
),
view2=dict(
random_flip=True,
color_transform=dict(
apply_prob=1.0,
brightness=0.4,
contrast=0.4,
saturation=0.2,
hue=0.1,
color_jitter_prob=0.8,
to_grayscale_prob=0.2,
shuffle=True),
gaussian_blur=dict(
apply_prob=0.1, blur_divider=10., sigma_min=0.1, sigma_max=2.0),
solarize=dict(apply_prob=0.2, threshold=0.5),
))
def postprocess(inputs: JaxBatch, rng: jnp.ndarray):
"""Apply the image augmentations to crops in inputs (view1 and view2)."""
def _postprocess_image(
images: jnp.ndarray,
rng: jnp.ndarray,
presets: ConfigDict,
) -> JaxBatch:
"""Applies augmentations in post-processing.
Args:
images: an NHWC tensor (with C=3), with float values in [0, 1].
rng: a single PRNGKey.
presets: a dict of presets for the augmentations.
Returns:
A batch of augmented images with shape NHWC, with keys view1, view2
and labels.
"""
flip_rng, color_rng, blur_rng, solarize_rng = jax.random.split(rng, 4)
out = images
if presets['random_flip']:
out = random_flip(out, flip_rng)
if presets['color_transform']['apply_prob'] > 0:
out = color_transform(out, color_rng, **presets['color_transform'])
if presets['gaussian_blur']['apply_prob'] > 0:
out = gaussian_blur(out, blur_rng, **presets['gaussian_blur'])
if presets['solarize']['apply_prob'] > 0:
out = solarize(out, solarize_rng, **presets['solarize'])
out = jnp.clip(out, 0., 1.)
return jax.lax.stop_gradient(out)
rng1, rng2 = jax.random.split(rng, num=2)
view1 = _postprocess_image(inputs['view1'], rng1, augment_config['view1'])
view2 = _postprocess_image(inputs['view2'], rng2, augment_config['view2'])
return dict(view1=view1, view2=view2, labels=inputs['labels'])
def _maybe_apply(apply_fn, inputs, rng, apply_prob):
should_apply = jax.random.uniform(rng, shape=()) <= apply_prob
return jax.lax.cond(should_apply, inputs, apply_fn, inputs, lambda x: x)
def _depthwise_conv2d(inputs, kernel, strides, padding):
"""Computes a depthwise conv2d in Jax.
Args:
inputs: an NHWC tensor with N=1.
kernel: a [H", W", 1, C] tensor.
strides: a 2d tensor.
padding: "SAME" or "VALID".
Returns:
The depthwise convolution of inputs with kernel, as [H, W, C].
"""
return jax.lax.conv_general_dilated(
inputs,
kernel,
strides,
padding,
feature_group_count=inputs.shape[-1],
dimension_numbers=('NHWC', 'HWIO', 'NHWC'))
def _gaussian_blur_single_image(image, kernel_size, padding, sigma):
"""Applies gaussian blur to a single image, given as NHWC with N=1."""
radius = int(kernel_size / 2)
kernel_size_ = 2 * radius + 1
x = jnp.arange(-radius, radius + 1).astype(jnp.float32)
blur_filter = jnp.exp(-x**2 / (2. * sigma**2))
blur_filter = blur_filter / jnp.sum(blur_filter)
blur_v = jnp.reshape(blur_filter, [kernel_size_, 1, 1, 1])
blur_h = jnp.reshape(blur_filter, [1, kernel_size_, 1, 1])
num_channels = image.shape[-1]
blur_h = jnp.tile(blur_h, [1, 1, 1, num_channels])
blur_v = jnp.tile(blur_v, [1, 1, 1, num_channels])
expand_batch_dim = len(image.shape) == 3
if expand_batch_dim:
image = image[jnp.newaxis, ...]
blurred = _depthwise_conv2d(image, blur_h, strides=[1, 1], padding=padding)
blurred = _depthwise_conv2d(blurred, blur_v, strides=[1, 1], padding=padding)
blurred = jnp.squeeze(blurred, axis=0)
return blurred
def _random_gaussian_blur(image, rng, kernel_size, padding, sigma_min,
sigma_max, apply_prob):
"""Applies a random gaussian blur."""
apply_rng, transform_rng = jax.random.split(rng)
def _apply(image):
sigma_rng, = jax.random.split(transform_rng, 1)
sigma = jax.random.uniform(
sigma_rng,
shape=(),
minval=sigma_min,
maxval=sigma_max,
dtype=jnp.float32)
return _gaussian_blur_single_image(image, kernel_size, padding, sigma)
return _maybe_apply(_apply, image, apply_rng, apply_prob)
def rgb_to_hsv(r, g, b):
"""Converts R, G, B values to H, S, V values.
Reference TF implementation:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/adjust_saturation_op.cc
Only input values between 0 and 1 are guaranteed to work properly, but this
function complies with the TF implementation outside of this range.
Args:
r: A tensor representing the red color component as floats.
g: A tensor representing the green color component as floats.
b: A tensor representing the blue color component as floats.
Returns:
H, S, V values, each as tensors of shape [...] (same as the input without
the last dimension).
"""
vv = jnp.maximum(jnp.maximum(r, g), b)
range_ = vv - jnp.minimum(jnp.minimum(r, g), b)
sat = jnp.where(vv > 0, range_ / vv, 0.)
norm = jnp.where(range_ != 0, 1. / (6. * range_), 1e9)
hr = norm * (g - b)
hg = norm * (b - r) + 2. / 6.
hb = norm * (r - g) + 4. / 6.
hue = jnp.where(r == vv, hr, jnp.where(g == vv, hg, hb))
hue = hue * (range_ > 0)
hue = hue + (hue < 0)
return hue, sat, vv
def hsv_to_rgb(h, s, v):
"""Converts H, S, V values to an R, G, B tuple.
Reference TF implementation:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/adjust_saturation_op.cc
Only input values between 0 and 1 are guaranteed to work properly, but this
function complies with the TF implementation outside of this range.
Args:
h: A float tensor of arbitrary shape for the hue (0-1 values).
s: A float tensor of the same shape for the saturation (0-1 values).
v: A float tensor of the same shape for the value channel (0-1 values).
Returns:
An (r, g, b) tuple, each with the same dimension as the inputs.
"""
c = s * v
m = v - c
dh = (h % 1.) * 6.
fmodu = dh % 2.
x = c * (1 - jnp.abs(fmodu - 1))
hcat = jnp.floor(dh).astype(jnp.int32)
rr = jnp.where(
(hcat == 0) | (hcat == 5), c, jnp.where(
(hcat == 1) | (hcat == 4), x, 0)) + m
gg = jnp.where(
(hcat == 1) | (hcat == 2), c, jnp.where(
(hcat == 0) | (hcat == 3), x, 0)) + m
bb = jnp.where(
(hcat == 3) | (hcat == 4), c, jnp.where(
(hcat == 2) | (hcat == 5), x, 0)) + m
return rr, gg, bb
def adjust_brightness(rgb_tuple, delta):
return jax.tree_map(lambda x: x + delta, rgb_tuple)
def adjust_contrast(image, factor):
def _adjust_contrast_channel(channel):
mean = jnp.mean(channel, axis=(-2, -1), keepdims=True)
return factor * (channel - mean) + mean
return jax.tree_map(_adjust_contrast_channel, image)
def adjust_saturation(h, s, v, factor):
return h, jnp.clip(s * factor, 0., 1.), v
def adjust_hue(h, s, v, delta):
# Note: this method exactly matches TF"s adjust_hue (combined with the hsv/rgb
# conversions) when running on GPU. When running on CPU, the results will be
# different if all RGB values for a pixel are outside of the [0, 1] range.
return (h + delta) % 1.0, s, v
def _random_brightness(rgb_tuple, rng, max_delta):
delta = jax.random.uniform(rng, shape=(), minval=-max_delta, maxval=max_delta)
return adjust_brightness(rgb_tuple, delta)
def _random_contrast(rgb_tuple, rng, max_delta):
factor = jax.random.uniform(
rng, shape=(), minval=1 - max_delta, maxval=1 + max_delta)
return adjust_contrast(rgb_tuple, factor)
def _random_saturation(rgb_tuple, rng, max_delta):
h, s, v = rgb_to_hsv(*rgb_tuple)
factor = jax.random.uniform(
rng, shape=(), minval=1 - max_delta, maxval=1 + max_delta)
return hsv_to_rgb(*adjust_saturation(h, s, v, factor))
def _random_hue(rgb_tuple, rng, max_delta):
h, s, v = rgb_to_hsv(*rgb_tuple)
delta = jax.random.uniform(rng, shape=(), minval=-max_delta, maxval=max_delta)
return hsv_to_rgb(*adjust_hue(h, s, v, delta))
def _to_grayscale(image):
rgb_weights = jnp.array([0.2989, 0.5870, 0.1140])
grayscale = jnp.tensordot(image, rgb_weights, axes=(-1, -1))[..., jnp.newaxis]
return jnp.tile(grayscale, (1, 1, 3)) # Back to 3 channels.
def _color_transform_single_image(image, rng, brightness, contrast, saturation,
hue, to_grayscale_prob, color_jitter_prob,
apply_prob, shuffle):
"""Applies color jittering to a single image."""
apply_rng, transform_rng = jax.random.split(rng)
perm_rng, b_rng, c_rng, s_rng, h_rng, cj_rng, gs_rng = jax.random.split(
transform_rng, 7)
# Whether the transform should be applied at all.
should_apply = jax.random.uniform(apply_rng, shape=()) <= apply_prob
# Whether to apply grayscale transform.
should_apply_gs = jax.random.uniform(gs_rng, shape=()) <= to_grayscale_prob
# Whether to apply color jittering.
should_apply_color = jax.random.uniform(cj_rng, shape=()) <= color_jitter_prob
# Decorator to conditionally apply fn based on an index.
def _make_cond(fn, idx):
def identity_fn(x, unused_rng, unused_param):
return x
def cond_fn(args, i):
def clip(args):
return jax.tree_map(lambda arg: jnp.clip(arg, 0., 1.), args)
out = jax.lax.cond(should_apply & should_apply_color & (i == idx), args,
lambda a: clip(fn(*a)), args,
lambda a: identity_fn(*a))
return jax.lax.stop_gradient(out)
return cond_fn
random_brightness_cond = _make_cond(_random_brightness, idx=0)
random_contrast_cond = _make_cond(_random_contrast, idx=1)
random_saturation_cond = _make_cond(_random_saturation, idx=2)
random_hue_cond = _make_cond(_random_hue, idx=3)
def _color_jitter(x):
rgb_tuple = tuple(jax.tree_map(jnp.squeeze, jnp.split(x, 3, axis=-1)))
if shuffle:
order = jax.random.permutation(perm_rng, jnp.arange(4, dtype=jnp.int32))
else:
order = range(4)
for idx in order:
if brightness > 0:
rgb_tuple = random_brightness_cond((rgb_tuple, b_rng, brightness), idx)
if contrast > 0:
rgb_tuple = random_contrast_cond((rgb_tuple, c_rng, contrast), idx)
if saturation > 0:
rgb_tuple = random_saturation_cond((rgb_tuple, s_rng, saturation), idx)
if hue > 0:
rgb_tuple = random_hue_cond((rgb_tuple, h_rng, hue), idx)
return jnp.stack(rgb_tuple, axis=-1)
out_apply = _color_jitter(image)
out_apply = jax.lax.cond(should_apply & should_apply_gs, out_apply,
_to_grayscale, out_apply, lambda x: x)
return jnp.clip(out_apply, 0., 1.)
def _random_flip_single_image(image, rng):
_, flip_rng = jax.random.split(rng)
should_flip_lr = jax.random.uniform(flip_rng, shape=()) <= 0.5
image = jax.lax.cond(should_flip_lr, image, jnp.fliplr, image, lambda x: x)
return image
def random_flip(images, rng):
rngs = jax.random.split(rng, images.shape[0])
return jax.vmap(_random_flip_single_image)(images, rngs)
def color_transform(images,
rng,
brightness=0.8,
contrast=0.8,
saturation=0.8,
hue=0.2,
color_jitter_prob=0.8,
to_grayscale_prob=0.2,
apply_prob=1.0,
shuffle=True):
"""Applies color jittering and/or grayscaling to a batch of images.
Args:
images: an NHWC tensor, with C=3.
rng: a single PRNGKey.
brightness: the range of jitter on brightness.
contrast: the range of jitter on contrast.
saturation: the range of jitter on saturation.
hue: the range of jitter on hue.
color_jitter_prob: the probability of applying color jittering.
to_grayscale_prob: the probability of converting the image to grayscale.
apply_prob: the probability of applying the transform to a batch element.
shuffle: whether to apply the transforms in a random order.
Returns:
A NHWC tensor of the transformed images.
"""
rngs = jax.random.split(rng, images.shape[0])
jitter_fn = functools.partial(
_color_transform_single_image,
brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue,
color_jitter_prob=color_jitter_prob,
to_grayscale_prob=to_grayscale_prob,
apply_prob=apply_prob,
shuffle=shuffle)
return jax.vmap(jitter_fn)(images, rngs)
def gaussian_blur(images,
rng,
blur_divider=10.,
sigma_min=0.1,
sigma_max=2.0,
apply_prob=1.0):
"""Applies gaussian blur to a batch of images.
Args:
images: an NHWC tensor, with C=3.
rng: a single PRNGKey.
blur_divider: the blurring kernel will have size H / blur_divider.
sigma_min: the minimum value for sigma in the blurring kernel.
sigma_max: the maximum value for sigma in the blurring kernel.
apply_prob: the probability of applying the transform to a batch element.
Returns:
A NHWC tensor of the blurred images.
"""
rngs = jax.random.split(rng, images.shape[0])
kernel_size = images.shape[1] / blur_divider
blur_fn = functools.partial(
_random_gaussian_blur,
kernel_size=kernel_size,
padding='SAME',
sigma_min=sigma_min,
sigma_max=sigma_max,
apply_prob=apply_prob)
return jax.vmap(blur_fn)(images, rngs)
def _solarize_single_image(image, rng, threshold, apply_prob):
def _apply(image):
return jnp.where(image < threshold, image, 1. - image)
return _maybe_apply(_apply, image, rng, apply_prob)
def solarize(images, rng, threshold=0.5, apply_prob=1.0):
"""Applies solarization.
Args:
images: an NHWC tensor (with C=3).
rng: a single PRNGKey.
threshold: the solarization threshold.
apply_prob: the probability of applying the transform to a batch element.
Returns:
A NHWC tensor of the transformed images.
"""
rngs = jax.random.split(rng, images.shape[0])
solarize_fn = functools.partial(
_solarize_single_image, threshold=threshold, apply_prob=apply_prob)
return jax.vmap(solarize_fn)(images, rngs)
| deepmind-research-master | byol/utils/augmentations.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Learning rate schedules."""
import jax.numpy as jnp
def target_ema(global_step: jnp.ndarray,
base_ema: float,
max_steps: int) -> jnp.ndarray:
decay = _cosine_decay(global_step, max_steps, 1.)
return 1. - (1. - base_ema) * decay
def learning_schedule(global_step: jnp.ndarray,
batch_size: int,
base_learning_rate: float,
total_steps: int,
warmup_steps: int) -> float:
"""Cosine learning rate scheduler."""
# Compute LR & Scaled LR
scaled_lr = base_learning_rate * batch_size / 256.
learning_rate = (
global_step.astype(jnp.float32) / int(warmup_steps) *
scaled_lr if warmup_steps > 0 else scaled_lr)
# Cosine schedule after warmup.
return jnp.where(
global_step < warmup_steps, learning_rate,
_cosine_decay(global_step - warmup_steps, total_steps - warmup_steps,
scaled_lr))
def _cosine_decay(global_step: jnp.ndarray,
max_steps: int,
initial_value: float) -> jnp.ndarray:
"""Simple implementation of cosine decay from TF1."""
global_step = jnp.minimum(global_step, max_steps)
cosine_decay_value = 0.5 * (1 + jnp.cos(jnp.pi * global_step / max_steps))
decayed_learning_rate = initial_value * cosine_decay_value
return decayed_learning_rate
| deepmind-research-master | byol/utils/schedules.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checkpoint saving and restoring utilities."""
import os
import time
from typing import Mapping, Text, Tuple, Union
from absl import logging
import dill
import jax
import jax.numpy as jnp
from byol.utils import helpers
class Checkpointer:
"""A checkpoint saving and loading class."""
def __init__(
self,
use_checkpointing: bool,
checkpoint_dir: Text,
save_checkpoint_interval: int,
filename: Text):
if (not use_checkpointing or
checkpoint_dir is None or
save_checkpoint_interval <= 0):
self._checkpoint_enabled = False
return
self._checkpoint_enabled = True
self._checkpoint_dir = checkpoint_dir
os.makedirs(self._checkpoint_dir, exist_ok=True)
self._filename = filename
self._checkpoint_path = os.path.join(self._checkpoint_dir, filename)
self._last_checkpoint_time = 0
self._checkpoint_every = save_checkpoint_interval
def maybe_save_checkpoint(
self,
experiment_state: Mapping[Text, jnp.ndarray],
step: int,
rng: jnp.ndarray,
is_final: bool):
"""Saves a checkpoint if enough time has passed since the previous one."""
current_time = time.time()
if (not self._checkpoint_enabled or
jax.host_id() != 0 or # Only checkpoint the first worker.
(not is_final and
current_time - self._last_checkpoint_time < self._checkpoint_every)):
return
checkpoint_data = dict(
experiment_state=jax.tree_map(
lambda x: jax.device_get(x[0]), experiment_state),
step=step,
rng=rng)
with open(self._checkpoint_path + '_tmp', 'wb') as checkpoint_file:
dill.dump(checkpoint_data, checkpoint_file, protocol=2)
try:
os.rename(self._checkpoint_path, self._checkpoint_path + '_old')
remove_old = True
except FileNotFoundError:
remove_old = False # No previous checkpoint to remove
os.rename(self._checkpoint_path + '_tmp', self._checkpoint_path)
if remove_old:
os.remove(self._checkpoint_path + '_old')
self._last_checkpoint_time = current_time
def maybe_load_checkpoint(
self) -> Union[Tuple[Mapping[Text, jnp.ndarray], int, jnp.ndarray], None]:
"""Loads a checkpoint if any is found."""
checkpoint_data = load_checkpoint(self._checkpoint_path)
if checkpoint_data is None:
logging.info('No existing checkpoint found at %s', self._checkpoint_path)
return None
step = checkpoint_data['step']
rng = checkpoint_data['rng']
experiment_state = jax.tree_map(
helpers.bcast_local_devices, checkpoint_data['experiment_state'])
del checkpoint_data
return experiment_state, step, rng
def load_checkpoint(checkpoint_path):
try:
with open(checkpoint_path, 'rb') as checkpoint_file:
checkpoint_data = dill.load(checkpoint_file)
logging.info('Loading checkpoint from %s, saved at step %d',
checkpoint_path, checkpoint_data['step'])
return checkpoint_data
except FileNotFoundError:
return None
| deepmind-research-master | byol/utils/checkpointing.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| deepmind-research-master | byol/configs/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config file for BYOL experiment."""
from byol.utils import dataset
# Preset values for certain number of training epochs.
_LR_PRESETS = {40: 0.45, 100: 0.45, 300: 0.3, 1000: 0.2}
_WD_PRESETS = {40: 1e-6, 100: 1e-6, 300: 1e-6, 1000: 1.5e-6}
_EMA_PRESETS = {40: 0.97, 100: 0.99, 300: 0.99, 1000: 0.996}
def get_config(num_epochs: int, batch_size: int):
"""Return config object, containing all hyperparameters for training."""
train_images_per_epoch = dataset.Split.TRAIN_AND_VALID.num_examples
assert num_epochs in [40, 100, 300, 1000]
config = dict(
random_seed=0,
num_classes=1000,
batch_size=batch_size,
max_steps=num_epochs * train_images_per_epoch // batch_size,
enable_double_transpose=True,
base_target_ema=_EMA_PRESETS[num_epochs],
network_config=dict(
projector_hidden_size=4096,
projector_output_size=256,
predictor_hidden_size=4096,
encoder_class='ResNet50', # Should match a class in utils/networks.
encoder_config=dict(
resnet_v2=False,
width_multiplier=1),
bn_config={
'decay_rate': .9,
'eps': 1e-5,
# Accumulate batchnorm statistics across devices.
# This should be equal to the `axis_name` argument passed
# to jax.pmap.
'cross_replica_axis': 'i',
'create_scale': True,
'create_offset': True,
}),
optimizer_config=dict(
weight_decay=_WD_PRESETS[num_epochs],
eta=1e-3,
momentum=.9,
),
lr_schedule_config=dict(
base_learning_rate=_LR_PRESETS[num_epochs],
warmup_steps=10 * train_images_per_epoch // batch_size,
),
evaluation_config=dict(
subset='test',
batch_size=100,
),
checkpointing_config=dict(
use_checkpointing=True,
checkpoint_dir='/tmp/byol',
save_checkpoint_interval=300,
filename='pretrain.pkl'
),
)
return config
| deepmind-research-master | byol/configs/byol.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config file for evaluation experiment."""
from typing import Text
from byol.utils import dataset
def get_config(checkpoint_to_evaluate: Text, batch_size: int):
"""Return config object for training."""
train_images_per_epoch = dataset.Split.TRAIN_AND_VALID.num_examples
config = dict(
random_seed=0,
enable_double_transpose=True,
max_steps=80 * train_images_per_epoch // batch_size,
num_classes=1000,
batch_size=batch_size,
checkpoint_to_evaluate=checkpoint_to_evaluate,
# If True, allows training without loading a checkpoint.
allow_train_from_scratch=False,
# Whether the backbone should be frozen (linear evaluation) or
# trainable (fine-tuning).
freeze_backbone=True,
optimizer_config=dict(
momentum=0.9,
nesterov=True,
),
lr_schedule_config=dict(
base_learning_rate=0.2,
warmup_steps=0,
),
network_config=dict( # Should match the evaluated checkpoint
encoder_class='ResNet50', # Should match a class in utils/networks.
encoder_config=dict(
resnet_v2=False,
width_multiplier=1),
bn_decay_rate=0.9,
),
evaluation_config=dict(
subset='test',
batch_size=100,
),
checkpointing_config=dict(
use_checkpointing=True,
checkpoint_dir='/tmp/byol',
save_checkpoint_interval=300,
filename='linear-eval.pkl'
),
)
return config
| deepmind-research-master | byol/configs/eval.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains a graph-based network to predict particle mobilities in glasses."""
import os
from absl import app
from absl import flags
from glassy_dynamics import train as train_using_tf
from glassy_dynamics import train_using_jax
FLAGS = flags.FLAGS
flags.DEFINE_string(
'data_directory',
'',
'Directory which contains the train and test datasets.')
flags.DEFINE_integer(
'time_index',
9,
'The time index of the target mobilities.')
flags.DEFINE_integer(
'max_files_to_load',
None,
'The maximum number of files to load from the train and test datasets.')
flags.DEFINE_string(
'checkpoint_path',
None,
'Path used to store a checkpoint of the best model.')
flags.DEFINE_boolean(
'use_jax',
False,
'Uses jax to train model.')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
train_file_pattern = os.path.join(FLAGS.data_directory, 'train/aggregated*')
test_file_pattern = os.path.join(FLAGS.data_directory, 'test/aggregated*')
train = train_using_jax if FLAGS.use_jax else train_using_tf
train.train_model(
train_file_pattern=train_file_pattern,
test_file_pattern=test_file_pattern,
max_files_to_load=FLAGS.max_files_to_load,
time_index=FLAGS.time_index,
checkpoint_path=FLAGS.checkpoint_path)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | glassy_dynamics/train_binary.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A graph neural network based model to predict particle mobilities.
The architecture and performance of this model is described in our publication:
"Unveiling the predictive power of static structure in glassy systems".
"""
import functools
from typing import Any, Dict, Text, Tuple, Optional
from graph_nets import graphs
from graph_nets import modules as gn_modules
from graph_nets import utils_tf
import sonnet as snt
import tensorflow.compat.v1 as tf
def make_graph_from_static_structure(
positions: tf.Tensor,
types: tf.Tensor,
box: tf.Tensor,
edge_threshold: float) -> graphs.GraphsTuple:
"""Returns graph representing the static structure of the glass.
Each particle is represented by a node in the graph. The particle type is
stored as a node feature.
Two particles at a distance less than the threshold are connected by an edge.
The relative distance vector is stored as an edge feature.
Args:
positions: particle positions with shape [n_particles, 3].
types: particle types with shape [n_particles].
box: dimensions of the cubic box that contains the particles with shape [3].
edge_threshold: particles at distance less than threshold are connected by
an edge.
"""
# Calculate pairwise relative distances between particles: shape [n, n, 3].
cross_positions = positions[tf.newaxis, :, :] - positions[:, tf.newaxis, :]
# Enforces periodic boundary conditions.
box_ = box[tf.newaxis, tf.newaxis, :]
cross_positions += tf.cast(cross_positions < -box_ / 2., tf.float32) * box_
cross_positions -= tf.cast(cross_positions > box_ / 2., tf.float32) * box_
# Calculates adjacency matrix in a sparse format (indices), based on the given
# distances and threshold.
distances = tf.norm(cross_positions, axis=-1)
indices = tf.where(distances < edge_threshold)
# Defines graph.
nodes = types[:, tf.newaxis]
senders = indices[:, 0]
receivers = indices[:, 1]
edges = tf.gather_nd(cross_positions, indices)
return graphs.GraphsTuple(
nodes=tf.cast(nodes, tf.float32),
n_node=tf.reshape(tf.shape(nodes)[0], [1]),
edges=tf.cast(edges, tf.float32),
n_edge=tf.reshape(tf.shape(edges)[0], [1]),
globals=tf.zeros((1, 1), dtype=tf.float32),
receivers=tf.cast(receivers, tf.int32),
senders=tf.cast(senders, tf.int32)
)
def apply_random_rotation(graph: graphs.GraphsTuple) -> graphs.GraphsTuple:
"""Returns randomly rotated graph representation.
The rotation is an element of O(3) with rotation angles multiple of pi/2.
This function assumes that the relative particle distances are stored in
the edge features.
Args:
graph: The graphs tuple as defined in `graph_nets.graphs`.
"""
# Transposes edge features, so that the axes are in the first dimension.
# Outputs a tensor of shape [3, n_particles].
xyz = tf.transpose(graph.edges)
# Random pi/2 rotation(s)
permutation = tf.random.shuffle(tf.constant([0, 1, 2], dtype=tf.int32))
xyz = tf.gather(xyz, permutation)
# Random reflections.
symmetry = tf.random_uniform([3], minval=0, maxval=2, dtype=tf.int32)
symmetry = 1 - 2 * tf.cast(tf.reshape(symmetry, [3, 1]), tf.float32)
xyz = xyz * symmetry
edges = tf.transpose(xyz)
return graph.replace(edges=edges)
class GraphBasedModel(snt.AbstractModule):
"""Graph based model which predicts particle mobilities from their positions.
This network encodes the nodes and edges of the input graph independently, and
then performs message-passing on this graph, updating its edges based on their
associated nodes, then updating the nodes based on the input nodes' features
and their associated updated edge features.
This update is repeated several times.
Afterwards the resulting node embeddings are decoded to predict the particle
mobility.
"""
def __init__(self,
n_recurrences: int,
mlp_sizes: Tuple[int],
mlp_kwargs: Optional[Dict[Text, Any]] = None,
name='Graph'):
"""Creates a new GraphBasedModel object.
Args:
n_recurrences: the number of message passing steps in the graph network.
mlp_sizes: the number of neurons in each layer of the MLP.
mlp_kwargs: additional keyword aguments passed to the MLP.
name: the name of the Sonnet module.
"""
super(GraphBasedModel, self).__init__(name=name)
self._n_recurrences = n_recurrences
if mlp_kwargs is None:
mlp_kwargs = {}
model_fn = functools.partial(
snt.nets.MLP,
output_sizes=mlp_sizes,
activate_final=True,
**mlp_kwargs)
final_model_fn = functools.partial(
snt.nets.MLP,
output_sizes=mlp_sizes + (1,),
activate_final=False,
**mlp_kwargs)
with self._enter_variable_scope():
self._encoder = gn_modules.GraphIndependent(
node_model_fn=model_fn,
edge_model_fn=model_fn)
if self._n_recurrences > 0:
self._propagation_network = gn_modules.GraphNetwork(
node_model_fn=model_fn,
edge_model_fn=model_fn,
# We do not use globals, hence we just pass the identity function.
global_model_fn=lambda: lambda x: x,
reducer=tf.unsorted_segment_sum,
edge_block_opt=dict(use_globals=False),
node_block_opt=dict(use_globals=False),
global_block_opt=dict(use_globals=False))
self._decoder = gn_modules.GraphIndependent(
node_model_fn=final_model_fn,
edge_model_fn=model_fn)
def _build(self, graphs_tuple: graphs.GraphsTuple) -> tf.Tensor:
"""Connects the model into the tensorflow graph.
Args:
graphs_tuple: input graph tensor as defined in `graphs_tuple.graphs`.
Returns:
tensor with shape [n_particles] containing the predicted particle
mobilities.
"""
encoded = self._encoder(graphs_tuple)
outputs = encoded
for _ in range(self._n_recurrences):
# Adds skip connections.
inputs = utils_tf.concat([outputs, encoded], axis=-1)
outputs = self._propagation_network(inputs)
decoded = self._decoder(outputs)
return tf.squeeze(decoded.nodes, axis=-1)
| deepmind-research-master | glassy_dynamics/graph_model.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for train."""
import os
import numpy as np
import tensorflow.compat.v1 as tf
from glassy_dynamics import train
class TrainTest(tf.test.TestCase):
def test_get_targets(self):
initial_positions = np.array([[0, 0, 0], [1, 2, 3]])
trajectory_target_positions = [
np.array([[1, 0, 0], [1, 2, 4]]),
np.array([[0, 1, 0], [1, 0, 3]]),
np.array([[0, 0, 5], [1, 2, 3]]),
]
expected_targets = np.array([7.0 / 3.0, 1.0])
targets = train.get_targets(initial_positions, trajectory_target_positions)
np.testing.assert_almost_equal(expected_targets, targets)
def test_load_data(self):
file_pattern = os.path.join(os.path.dirname(__file__), 'testdata',
'test_small.pickle')
with self.subTest('ContentAndShapesAreAsExpected'):
data = train.load_data(file_pattern, 0)
self.assertEqual(len(data), 1)
element = data[0]
self.assertTupleEqual(element.positions.shape, (20, 3))
self.assertTupleEqual(element.box.shape, (3,))
self.assertTupleEqual(element.targets.shape, (20,))
self.assertTupleEqual(element.types.shape, (20,))
with self.subTest('TargetsGrowAsAFunctionOfTime'):
previous_mean_target = 0.0
# Time index 9 refers to 1/e = 0.36 in the IS, and therefore it is between
# Time index 5 (0.4) and time index 6 (0.3).
for time_index in [0, 1, 2, 3, 4, 5, 9, 6, 7, 8]:
data = train.load_data(file_pattern, time_index)[0]
current_mean_target = data.targets.mean()
self.assertGreater(current_mean_target, previous_mean_target)
previous_mean_target = current_mean_target
class TensorflowTrainTest(tf.test.TestCase):
def test_get_loss_op(self):
"""Tests the correct calculation of the loss operations."""
prediction = tf.constant([0.0, 1.0, 2.0, 1.0, 2.0], dtype=tf.float32)
target = tf.constant([1.0, 25.0, 0.0, 4.0, 2.0], dtype=tf.float32)
types = tf.constant([0, 1, 0, 0, 0], dtype=tf.int32)
loss_ops = train.get_loss_ops(prediction, target, types)
loss = self.evaluate(loss_ops)
self.assertAlmostEqual(loss.l1_loss, 1.5)
self.assertAlmostEqual(loss.l2_loss, 14.0 / 4.0)
self.assertAlmostEqual(loss.correlation, -0.15289416)
def test_get_minimize_op(self):
"""Tests the minimize operation by minimizing a single variable."""
var = tf.Variable([1.0], name='test')
loss = var**2
minimize = train.get_minimize_op(loss, 1e-1)
with self.session():
tf.global_variables_initializer().run()
for _ in range(100):
minimize.run()
value = var.eval()
self.assertLess(abs(value[0]), 0.01)
def test_train_model(self):
"""Tests if we can overfit to a small test dataset."""
file_pattern = os.path.join(os.path.dirname(__file__), 'testdata',
'test_small.pickle')
best_correlation_value = train.train_model(
train_file_pattern=file_pattern,
test_file_pattern=file_pattern,
n_epochs=1000,
augment_data_using_rotations=False,
learning_rate=1e-4,
n_recurrences=2,
edge_threshold=5,
mlp_sizes=(32, 32),
measurement_store_interval=1000)
# The test dataset contains only a single sample with 20 particles.
# Therefore we expect the model to be able to memorize the targets perfectly
# if the model works correctly.
self.assertGreater(best_correlation_value, 0.99)
def test_apply_model(self):
"""Tests if we can apply a model to a small test dataset."""
checkpoint_path = os.path.join(os.path.dirname(__file__), 'checkpoints',
't044_s09.ckpt')
file_pattern = os.path.join(os.path.dirname(__file__), 'testdata',
'test_large.pickle')
predictions = train.apply_model(checkpoint_path=checkpoint_path,
file_pattern=file_pattern,
time_index=0)
data = train.load_data(file_pattern, 0)
targets = data[0].targets
correlation_value = np.corrcoef(predictions[0], targets)[0, 1]
self.assertGreater(correlation_value, 0.5)
if __name__ == '__main__':
tf.test.main()
| deepmind-research-master | glassy_dynamics/train_test.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for graph_model."""
import itertools
from absl.testing import parameterized
from graph_nets import graphs
import numpy as np
import tensorflow.compat.v1 as tf
from glassy_dynamics import graph_model
class GraphModelTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
"""Initializes a small tractable test (particle) system."""
super(GraphModelTest, self).setUp()
# Fixes random seed to ensure deterministic outputs.
tf.random.set_random_seed(1234)
# In this test we use a small tractable set of particles covering all corner
# cases:
# a) eight particles with different types,
# b) periodic box is not cubic,
# c) three disjoint cluster of particles separated by a threshold > 2,
# d) first two clusters overlap with the periodic boundary,
# e) first cluster is not fully connected,
# f) second cluster is fully connected,
# g) and third cluster is a single isolated particle.
#
# The formatting of the code below separates the three clusters by
# adding linebreaks after each cluster.
self._positions = np.array(
[[0.0, 0.0, 0.0], [2.5, 0.0, 0.0], [0.0, 1.5, 0.0], [0.0, 0.0, 9.0],
[0.0, 5.0, 0.0], [0.0, 5.0, 1.0], [3.0, 5.0, 0.0],
[2.0, 3.0, 3.0]])
self._types = np.array([0.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0,
0.0])
self._box = np.array([4.0, 10.0, 10.0])
# Creates the corresponding graph elements, assuming a threshold of 2 and
# the conventions described in `graph_nets.graphs`.
self._edge_threshold = 2
self._nodes = np.array(
[[0.0], [0.0], [1.0], [0.0],
[0.0], [1.0], [0.0],
[0.0]])
self._edges = np.array(
[[0.0, 0.0, 0.0], [-1.5, 0.0, 0.0], [0.0, 1.5, 0.0], [0.0, 0.0, -1.0],
[1.5, 0.0, 0.0], [0.0, 0.0, 0.0], [1.5, 0.0, -1.0],
[0.0, -1.5, 0.0], [0.0, 0.0, 0.0], [0.0, -1.5, -1.0],
[0.0, 0.0, 1.0], [-1.5, 0.0, 1.0], [0.0, 1.5, 1.0], [0.0, 0.0, 0.0],
[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0], [0.0, 0.0, 0.0], [-1.0, 0.0, -1.0],
[1.0, 0.0, 0.0], [1.0, 0.0, 1.0], [0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
self._receivers = np.array(
[0, 1, 2, 3, 0, 1, 3, 0, 2, 3, 0, 1, 2, 3,
4, 5, 6, 4, 5, 6, 4, 5, 6,
7])
self._senders = np.array(
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3,
4, 4, 4, 5, 5, 5, 6, 6, 6,
7])
def _get_graphs_tuple(self):
"""Returns a GraphsTuple containing a graph based on the test system."""
return graphs.GraphsTuple(
nodes=tf.constant(self._nodes, dtype=tf.float32),
edges=tf.constant(self._edges, dtype=tf.float32),
globals=tf.constant(np.array([[0.0]]), dtype=tf.float32),
receivers=tf.constant(self._receivers, dtype=tf.int32),
senders=tf.constant(self._senders, dtype=tf.int32),
n_node=tf.constant([len(self._nodes)], dtype=tf.int32),
n_edge=tf.constant([len(self._edges)], dtype=tf.int32))
def test_make_graph_from_static_structure(self):
graphs_tuple_op = graph_model.make_graph_from_static_structure(
tf.constant(self._positions, dtype=tf.float32),
tf.constant(self._types, dtype=tf.int32),
tf.constant(self._box, dtype=tf.float32),
self._edge_threshold)
graphs_tuple = self.evaluate(graphs_tuple_op)
self.assertLen(self._nodes, graphs_tuple.n_node)
self.assertLen(self._edges, graphs_tuple.n_edge)
np.testing.assert_almost_equal(graphs_tuple.nodes, self._nodes)
np.testing.assert_equal(graphs_tuple.senders, self._senders)
np.testing.assert_equal(graphs_tuple.receivers, self._receivers)
np.testing.assert_almost_equal(graphs_tuple.globals, np.array([[0.0]]))
np.testing.assert_almost_equal(graphs_tuple.edges, self._edges)
def _is_equal_up_to_rotation(self, x, y):
for axes in itertools.permutations([0, 1, 2]):
for mirrors in itertools.product([1, -1], repeat=3):
if np.allclose(x, y[:, axes] * mirrors):
return True
return False
def test_apply_random_rotation(self):
graphs_tuple = self._get_graphs_tuple()
rotated_graphs_tuple_op = graph_model.apply_random_rotation(graphs_tuple)
rotated_graphs_tuple = self.evaluate(rotated_graphs_tuple_op)
np.testing.assert_almost_equal(rotated_graphs_tuple.nodes, self._nodes)
np.testing.assert_almost_equal(rotated_graphs_tuple.senders, self._senders)
np.testing.assert_almost_equal(
rotated_graphs_tuple.receivers, self._receivers)
np.testing.assert_almost_equal(
rotated_graphs_tuple.globals, np.array([[0.0]]))
self.assertTrue(self._is_equal_up_to_rotation(rotated_graphs_tuple.edges,
self._edges))
@parameterized.named_parameters(('no_propagation', 0, (30,)),
('multi_propagation', 5, (15,)),
('multi_layer', 1, (20, 30)))
def test_GraphModel(self, n_recurrences, mlp_sizes):
graphs_tuple = self._get_graphs_tuple()
output_op = graph_model.GraphBasedModel(n_recurrences=n_recurrences,
mlp_sizes=mlp_sizes)(graphs_tuple)
self.assertListEqual(output_op.shape.as_list(), [len(self._types)])
# Tests if the model runs without crashing.
with self.session():
tf.global_variables_initializer().run()
output_op.eval()
if __name__ == '__main__':
tf.test.main()
| deepmind-research-master | glassy_dynamics/graph_model_test.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training pipeline for the prediction of particle mobilities in glasses."""
import collections
import enum
import pickle
from typing import Any, Dict, List, Optional, Text, Tuple, Sequence
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from glassy_dynamics import graph_model
tf.enable_resource_variables()
LossCollection = collections.namedtuple('LossCollection',
'l1_loss, l2_loss, correlation')
GlassSimulationData = collections.namedtuple('GlassSimulationData',
'positions, targets, types, box')
class ParticleType(enum.IntEnum):
"""The simulation contains two particle types, identified as type A and B.
The dataset encodes the particle type in an integer.
- 0 corresponds to particle type A.
- 1 corresponds to particle type B.
"""
A = 0
B = 1
def get_targets(
initial_positions: np.ndarray,
trajectory_target_positions: Sequence[np.ndarray]) -> np.ndarray:
"""Returns the averaged particle mobilities from the sampled trajectories.
Args:
initial_positions: the initial positions of the particles with shape
[n_particles, 3].
trajectory_target_positions: the absolute positions of the particles at the
target time for all sampled trajectories, each with shape
[n_particles, 3].
"""
targets = np.mean([np.linalg.norm(t - initial_positions, axis=-1)
for t in trajectory_target_positions], axis=0)
return targets.astype(np.float32)
def load_data(
file_pattern: Text,
time_index: int,
max_files_to_load: Optional[int] = None) -> List[GlassSimulationData]:
"""Returns a dictionary containing the training or test dataset.
The dictionary contains:
`positions`: `np.ndarray` containing the particle positions with shape
[n_particles, 3].
`targets`: `np.ndarray` containing particle mobilities with shape
[n_particles].
`types`: `np.ndarray` containing the particle types with shape with shape
[n_particles].
`box`: `np.ndarray` containing the dimensions of the periodic box with shape
[3].
Args:
file_pattern: pattern matching the files with the simulation data.
time_index: the time index of the targets.
max_files_to_load: the maximum number of files to load.
"""
filenames = tf.io.gfile.glob(file_pattern)
if max_files_to_load:
filenames = filenames[:max_files_to_load]
static_structures = []
for filename in filenames:
with tf.io.gfile.GFile(filename, 'rb') as f:
data = pickle.load(f)
static_structures.append(GlassSimulationData(
positions=data['positions'].astype(np.float32),
targets=get_targets(
data['positions'], data['trajectory_target_positions'][time_index]),
types=data['types'].astype(np.int32),
box=data['box'].astype(np.float32)))
return static_structures
def get_loss_ops(
prediction: tf.Tensor,
target: tf.Tensor,
types: tf.Tensor) -> LossCollection:
"""Returns L1/L2 loss and correlation for type A particles.
Args:
prediction: tensor with shape [n_particles] containing the predicted
particle mobilities.
target: tensor with shape [n_particles] containing the true particle
mobilities.
types: tensor with shape [n_particles] containing the particle types.
"""
# Considers only type A particles.
mask = tf.equal(types, ParticleType.A)
prediction = tf.boolean_mask(prediction, mask)
target = tf.boolean_mask(target, mask)
return LossCollection(
l1_loss=tf.reduce_mean(tf.abs(prediction - target)),
l2_loss=tf.reduce_mean((prediction - target)**2),
correlation=tf.squeeze(tfp.stats.correlation(
prediction[:, tf.newaxis], target[:, tf.newaxis])))
def get_minimize_op(
loss: tf.Tensor,
learning_rate: float,
grad_clip: Optional[float] = None) -> tf.Tensor:
"""Returns minimization operation.
Args:
loss: the loss tensor which is minimized.
learning_rate: the learning rate used by the optimizer.
grad_clip: all gradients are clipped to the given value if not None or 0.
"""
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(loss)
if grad_clip:
grads, _ = tf.clip_by_global_norm([g for g, _ in grads_and_vars], grad_clip)
grads_and_vars = [(g, pair[1]) for g, pair in zip(grads, grads_and_vars)]
minimize = optimizer.apply_gradients(grads_and_vars)
return minimize
def _log_stats_and_return_mean_correlation(
label: Text,
stats: Sequence[LossCollection]) -> float:
"""Logs performance statistics and returns mean correlation.
Args:
label: label printed before the combined statistics e.g. train or test.
stats: statistics calculated for each batch in a dataset.
Returns:
mean correlation
"""
for key in LossCollection._fields:
values = [getattr(s, key) for s in stats]
mean = np.mean(values)
std = np.std(values)
logging.info('%s: %s: %.4f +/- %.4f', label, key, mean, std)
return np.mean([s.correlation for s in stats])
def train_model(train_file_pattern: Text,
test_file_pattern: Text,
max_files_to_load: Optional[int] = None,
n_epochs: int = 1000,
time_index: int = 9,
augment_data_using_rotations: bool = True,
learning_rate: float = 1e-4,
grad_clip: Optional[float] = 1.0,
n_recurrences: int = 7,
mlp_sizes: Tuple[int] = (64, 64),
mlp_kwargs: Optional[Dict[Text, Any]] = None,
edge_threshold: float = 2.0,
measurement_store_interval: int = 1000,
checkpoint_path: Optional[Text] = None) -> float: # pytype: disable=annotation-type-mismatch
"""Trains GraphModel using tensorflow.
Args:
train_file_pattern: pattern matching the files with the training data.
test_file_pattern: pattern matching the files with the test data.
max_files_to_load: the maximum number of train and test files to load.
If None, all files will be loaded.
n_epochs: the number of passes through the training dataset (epochs).
time_index: the time index (0-9) of the target mobilities.
augment_data_using_rotations: data is augemented by using random rotations.
learning_rate: the learning rate used by the optimizer.
grad_clip: all gradients are clipped to the given value.
n_recurrences: the number of message passing steps in the graphnet.
mlp_sizes: the number of neurons in each layer of the MLP.
mlp_kwargs: additional keyword aguments passed to the MLP.
edge_threshold: particles at distance less than threshold are connected by
an edge.
measurement_store_interval: number of steps between storing objective values
(loss and correlation).
checkpoint_path: path used to store the checkpoint with the highest
correlation on the test set.
Returns:
Correlation on the test dataset of best model encountered during training.
"""
if mlp_kwargs is None:
mlp_kwargs = dict(initializers=dict(w=tf.variance_scaling_initializer(1.0),
b=tf.variance_scaling_initializer(0.1)))
# Loads train and test dataset.
dataset_kwargs = dict(
time_index=time_index,
max_files_to_load=max_files_to_load)
training_data = load_data(train_file_pattern, **dataset_kwargs)
test_data = load_data(test_file_pattern, **dataset_kwargs)
# Defines wrapper functions, which can directly be passed to the
# tf.data.Dataset.map function.
def _make_graph_from_static_structure(static_structure):
"""Converts static structure to graph, targets and types."""
return (graph_model.make_graph_from_static_structure(
static_structure.positions,
static_structure.types,
static_structure.box,
edge_threshold),
static_structure.targets,
static_structure.types)
def _apply_random_rotation(graph, targets, types):
"""Applies random rotations to the graph and forwards targets and types."""
return graph_model.apply_random_rotation(graph), targets, types
# Defines data-pipeline based on tf.data.Dataset following the official
# guideline: https://www.tensorflow.org/guide/datasets#consuming_numpy_arrays.
# We use initializable iterators to avoid embedding the training and test data
# directly into the graph.
# Instead we feed the data to the iterators during the initalization of the
# iterators before the main training loop.
placeholders = GlassSimulationData._make(
tf.placeholder(s.dtype, (None,) + s.shape) for s in training_data[0])
dataset = tf.data.Dataset.from_tensor_slices(placeholders)
dataset = dataset.map(_make_graph_from_static_structure)
dataset = dataset.cache()
dataset = dataset.shuffle(400)
# Augments data. This has to be done after calling dataset.cache!
if augment_data_using_rotations:
dataset = dataset.map(_apply_random_rotation)
dataset = dataset.repeat()
train_iterator = dataset.make_initializable_iterator()
dataset = tf.data.Dataset.from_tensor_slices(placeholders)
dataset = dataset.map(_make_graph_from_static_structure)
dataset = dataset.cache()
dataset = dataset.repeat()
test_iterator = dataset.make_initializable_iterator()
# Creates tensorflow graph.
# Note: We decouple the training and test datasets from the input pipeline
# by creating a new iterator from a string-handle placeholder with the same
# output types and shapes as the training dataset.
dataset_handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
dataset_handle, train_iterator.output_types, train_iterator.output_shapes)
graph, targets, types = iterator.get_next()
model = graph_model.GraphBasedModel(
n_recurrences, mlp_sizes, mlp_kwargs)
prediction = model(graph)
# Defines loss and minimization operations.
loss_ops = get_loss_ops(prediction, targets, types)
minimize_op = get_minimize_op(loss_ops.l2_loss, learning_rate, grad_clip)
best_so_far = -1
train_stats = []
test_stats = []
saver = tf.train.Saver()
with tf.train.SingularMonitoredSession() as session:
# Initializes train and test iterators with the training and test datasets.
# The obtained training and test string-handles can be passed to the
# dataset_handle placeholder to select the dataset.
train_handle = session.run(train_iterator.string_handle())
test_handle = session.run(test_iterator.string_handle())
feed_dict = {p: [x[i] for x in training_data]
for i, p in enumerate(placeholders)}
session.run(train_iterator.initializer, feed_dict=feed_dict)
feed_dict = {p: [x[i] for x in test_data]
for i, p in enumerate(placeholders)}
session.run(test_iterator.initializer, feed_dict=feed_dict)
# Trains model using stochatic gradient descent on the training dataset.
n_training_steps = len(training_data) * n_epochs
for i in range(n_training_steps):
feed_dict = {dataset_handle: train_handle}
train_loss, _ = session.run((loss_ops, minimize_op), feed_dict=feed_dict)
train_stats.append(train_loss)
if (i+1) % measurement_store_interval == 0:
# Evaluates model on test dataset.
for _ in range(len(test_data)):
feed_dict = {dataset_handle: test_handle}
test_stats.append(session.run(loss_ops, feed_dict=feed_dict))
# Outputs performance statistics on training and test dataset.
_log_stats_and_return_mean_correlation('Train', train_stats)
correlation = _log_stats_and_return_mean_correlation('Test', test_stats)
train_stats = []
test_stats = []
# Updates best model based on the observed correlation on the test
# dataset.
if correlation > best_so_far:
best_so_far = correlation
if checkpoint_path:
saver.save(session.raw_session(), checkpoint_path)
return best_so_far
def apply_model(checkpoint_path: Text,
file_pattern: Text,
max_files_to_load: Optional[int] = None,
time_index: int = 9) -> List[np.ndarray]:
"""Applies trained GraphModel using tensorflow.
Args:
checkpoint_path: path from which the model is loaded.
file_pattern: pattern matching the files with the data.
max_files_to_load: the maximum number of files to load.
If None, all files will be loaded.
time_index: the time index (0-9) of the target mobilities.
Returns:
Predictions of the model for all files.
"""
dataset_kwargs = dict(
time_index=time_index,
max_files_to_load=max_files_to_load)
data = load_data(file_pattern, **dataset_kwargs)
tf.reset_default_graph()
saver = tf.train.import_meta_graph(checkpoint_path + '.meta')
graph = tf.get_default_graph()
placeholders = GlassSimulationData(
positions=graph.get_tensor_by_name('Placeholder:0'),
targets=graph.get_tensor_by_name('Placeholder_1:0'),
types=graph.get_tensor_by_name('Placeholder_2:0'),
box=graph.get_tensor_by_name('Placeholder_3:0'))
prediction_tensor = graph.get_tensor_by_name('Graph_1/Squeeze:0')
correlation_tensor = graph.get_tensor_by_name('Squeeze:0')
dataset_handle = graph.get_tensor_by_name('Placeholder_4:0')
test_initalizer = graph.get_operation_by_name('MakeIterator_1')
test_string_handle = graph.get_tensor_by_name('IteratorToStringHandle_1:0')
with tf.Session() as session:
saver.restore(session, checkpoint_path)
handle = session.run(test_string_handle)
feed_dict = {p: [x[i] for x in data] for i, p in enumerate(placeholders)}
session.run(test_initalizer, feed_dict=feed_dict)
predictions = []
correlations = []
for _ in range(len(data)):
p, c = session.run((prediction_tensor, correlation_tensor),
feed_dict={dataset_handle: handle})
predictions.append(p)
correlations.append(c)
logging.info('Correlation: %.4f +/- %.4f',
np.mean(correlations),
np.std(correlations))
return predictions
| deepmind-research-master | glassy_dynamics/train.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Applies a graph-based network to predict particle mobilities in glasses."""
import os
from absl import app
from absl import flags
from glassy_dynamics import train
FLAGS = flags.FLAGS
flags.DEFINE_string(
'data_directory',
'',
'Directory which contains the train or test datasets.')
flags.DEFINE_integer(
'time_index',
9,
'The time index of the target mobilities.')
flags.DEFINE_integer(
'max_files_to_load',
None,
'The maximum number of files to load.')
flags.DEFINE_string(
'checkpoint_path',
'checkpoints/t044_s09.ckpt',
'Path used to load the model.')
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
file_pattern = os.path.join(FLAGS.data_directory, 'aggregated*')
train.apply_model(
checkpoint_path=FLAGS.checkpoint_path,
file_pattern=file_pattern,
max_files_to_load=FLAGS.max_files_to_load,
time_index=FLAGS.time_index)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | glassy_dynamics/apply_binary.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training pipeline for the prediction of particle mobilities in glasses."""
import enum
import functools
import logging
import pickle
import random
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
import optax
# Only used for file operations.
# You can use glob.glob and python's open function to replace the tf usage below
# on most platforms.
import tensorflow.compat.v1 as tf
class ParticleType(enum.IntEnum):
"""The simulation contains two particle types, identified as type A and B.
The dataset encodes the particle type in an integer.
- 0 corresponds to particle type A.
- 1 corresponds to particle type B.
"""
A = 0
B = 1
def make_graph_from_static_structure(positions, types, box, edge_threshold):
"""Returns graph representing the static structure of the glass.
Each particle is represented by a node in the graph. The particle type is
stored as a node feature.
Two particles at a distance less than the threshold are connected by an edge.
The relative distance vector is stored as an edge feature.
Args:
positions: particle positions with shape [n_particles, 3].
types: particle types with shape [n_particles].
box: dimensions of the cubic box that contains the particles with shape [3].
edge_threshold: particles at distance less than threshold are connected by
an edge.
"""
# Calculate pairwise relative distances between particles: shape [n, n, 3].
cross_positions = positions[None, :, :] - positions[:, None, :]
# Enforces periodic boundary conditions.
box_ = box[None, None, :]
cross_positions += (cross_positions < -box_ / 2.).astype(np.float32) * box_
cross_positions -= (cross_positions > box_ / 2.).astype(np.float32) * box_
# Calculates adjacency matrix in a sparse format (indices), based on the given
# distances and threshold.
distances = np.linalg.norm(cross_positions, axis=-1)
indices = np.where(distances < edge_threshold)
# Defines graph.
nodes = types[:, None]
senders = indices[0]
receivers = indices[1]
edges = cross_positions[indices]
return jraph.pad_with_graphs(jraph.GraphsTuple(
nodes=nodes.astype(np.float32),
n_node=np.reshape(nodes.shape[0], [1]),
edges=edges.astype(np.float32),
n_edge=np.reshape(edges.shape[0], [1]),
globals=np.zeros((1, 1), dtype=np.float32),
receivers=receivers.astype(np.int32),
senders=senders.astype(np.int32)
), n_node=4097, n_edge=200000)
def get_targets(initial_positions, trajectory_target_positions):
"""Returns the averaged particle mobilities from the sampled trajectories.
Args:
initial_positions: the initial positions of the particles with shape
[n_particles, 3].
trajectory_target_positions: the absolute positions of the particles at the
target time for all sampled trajectories, each with shape
[n_particles, 3].
"""
targets = np.mean([np.linalg.norm(t - initial_positions, axis=-1)
for t in trajectory_target_positions], axis=0)
return targets.astype(np.float32)
def load_data(file_pattern, time_index, max_files_to_load=None):
"""Returns a graphs and targets of the training or test dataset.
Args:
file_pattern: pattern matching the files with the simulation data.
time_index: the time index of the targets.
max_files_to_load: the maximum number of files to load.
"""
filenames = tf.io.gfile.glob(file_pattern)
if max_files_to_load:
filenames = filenames[:max_files_to_load]
graphs_and_targets = []
for filename in filenames:
with tf.io.gfile.GFile(filename, 'rb') as f:
data = pickle.load(f)
mask = (data['types'] == ParticleType.A).astype(np.int32)
# Mask dummy node due to padding
mask = np.concatenate([mask, np.zeros((1,), dtype=np.int32)], axis=-1)
targets = get_targets(
data['positions'], data['trajectory_target_positions'][time_index])
targets = np.concatenate(
[targets, np.zeros((1,), dtype=np.float32)], axis=-1)
graphs_and_targets.append(
(make_graph_from_static_structure(
data['positions'].astype(np.float32),
data['types'].astype(np.int32),
data['box'].astype(np.float32),
edge_threshold=2.0),
targets,
mask))
return graphs_and_targets
def apply_random_rotation(graph):
"""Returns randomly rotated graph representation.
The rotation is an element of O(3) with rotation angles multiple of pi/2.
This function assumes that the relative particle distances are stored in
the edge features.
Args:
graph: The graphs tuple as defined in `graph_nets.graphs`.
"""
# Transposes edge features, so that the axes are in the first dimension.
# Outputs a tensor of shape [3, n_particles].
xyz = np.transpose(graph.edges)
# Random pi/2 rotation(s)
permutation = np.array([0, 1, 2], dtype=np.int32)
np.random.shuffle(permutation)
xyz = xyz[permutation]
# Random reflections.
symmetry = np.random.randint(0, 2, [3])
symmetry = 1 - 2 * np.reshape(symmetry, [3, 1]).astype(np.float32)
xyz = xyz * symmetry
edges = np.transpose(xyz)
return graph._replace(edges=edges)
def network_definition(graph):
"""Defines a graph neural network.
Args:
graph: Graphstuple the network processes.
Returns:
Decoded nodes.
"""
model_fn = functools.partial(
hk.nets.MLP,
w_init=hk.initializers.VarianceScaling(1.0),
b_init=hk.initializers.VarianceScaling(1.0))
mlp_sizes = (64, 64)
num_message_passing_steps = 7
node_encoder = model_fn(output_sizes=mlp_sizes, activate_final=True)
edge_encoder = model_fn(output_sizes=mlp_sizes, activate_final=True)
node_decoder = model_fn(output_sizes=mlp_sizes + (1,), activate_final=False)
node_encoding = node_encoder(graph.nodes)
edge_encoding = edge_encoder(graph.edges)
graph = graph._replace(nodes=node_encoding, edges=edge_encoding)
update_edge_fn = jraph.concatenated_args(
model_fn(output_sizes=mlp_sizes, activate_final=True))
update_node_fn = jraph.concatenated_args(
model_fn(output_sizes=mlp_sizes, activate_final=True))
gn = jraph.InteractionNetwork(
update_edge_fn=update_edge_fn,
update_node_fn=update_node_fn,
include_sent_messages_in_node_update=True)
for _ in range(num_message_passing_steps):
graph = graph._replace(
nodes=jnp.concatenate([graph.nodes, node_encoding], axis=-1),
edges=jnp.concatenate([graph.edges, edge_encoding], axis=-1))
graph = gn(graph)
return jnp.squeeze(node_decoder(graph.nodes), axis=-1)
def train_model(train_file_pattern,
test_file_pattern,
max_files_to_load=None,
n_epochs=1000,
time_index=9,
learning_rate=1e-4,
grad_clip=1.0,
measurement_store_interval=1000,
checkpoint_path=None):
"""Trains GraphModel using tensorflow.
Args:
train_file_pattern: pattern matching the files with the training data.
test_file_pattern: pattern matching the files with the test data.
max_files_to_load: the maximum number of train and test files to load.
If None, all files will be loaded.
n_epochs: the number of passes through the training dataset (epochs).
time_index: the time index (0-9) of the target mobilities.
learning_rate: the learning rate used by the optimizer.
grad_clip: all gradients are clipped to the given value.
measurement_store_interval: number of steps between storing objective values
(loss and correlation).
checkpoint_path: ignored by this implementation.
"""
if checkpoint_path:
logging.warning('The checkpoint_path argument is ignored.')
random.seed(42)
np.random.seed(42)
# Loads train and test dataset.
dataset_kwargs = dict(
time_index=time_index,
max_files_to_load=max_files_to_load)
logging.info('Load training data')
training_data = load_data(train_file_pattern, **dataset_kwargs)
logging.info('Load test data')
test_data = load_data(test_file_pattern, **dataset_kwargs)
logging.info('Finished loading data')
network = hk.without_apply_rng(hk.transform(network_definition))
params = network.init(jax.random.PRNGKey(42), training_data[0][0])
opt_init, opt_update = optax.chain(
optax.clip_by_global_norm(grad_clip),
optax.scale_by_adam(0.9, 0.999, 1e-8),
optax.scale(-learning_rate))
opt_state = opt_init(params)
network_apply = jax.jit(network.apply)
@jax.jit
def loss_fn(params, graph, targets, mask):
decoded_nodes = network_apply(params, graph) * mask
return (jnp.sum((decoded_nodes - targets)**2 * mask) /
jnp.sum(mask))
@jax.jit
def update(params, opt_state, graph, targets, mask):
loss, grads = jax.value_and_grad(loss_fn)(params, graph, targets, mask)
updates, opt_state = opt_update(grads, opt_state)
return optax.apply_updates(params, updates), opt_state, loss
train_stats = []
i = 0
logging.info('Start training')
for epoch in range(n_epochs):
logging.info('Start epoch %r', epoch)
random.shuffle(training_data)
for graph, targets, mask in training_data:
graph = apply_random_rotation(graph)
params, opt_state, loss = update(params, opt_state, graph, targets, mask)
train_stats.append(loss)
if (i+1) % measurement_store_interval == 0:
logging.info('Start evaluation run')
test_stats = []
for test_graph, test_targets, test_mask in test_data:
predictions = network_apply(params, test_graph)
test_stats.append(np.corrcoef(
predictions[test_mask == 1], test_targets[test_mask == 1])[0, 1])
logging.info('Train loss %r', np.mean(train_stats))
logging.info('Test correlation %r', np.mean(test_stats))
train_stats = []
i += 1
| deepmind-research-master | glassy_dynamics/train_using_jax.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to compute human-normalized Atari scores.
The data used in this module is human and random performance data on Atari-57.
It comprises of evaluation scores (undiscounted returns), each averaged
over at least 3 episode runs, on each of the 57 Atari games. Each episode begins
with the environment already stepped with a uniform random number (between 1 and
30 inclusive) of noop actions.
The two agents are:
* 'random' (agent choosing its actions uniformly randomly on each step)
* 'human' (professional human game tester)
Scores are obtained by averaging returns over the episodes played by each agent,
with episode length capped to 108,000 frames (i.e. timeout after 30 minutes).
The term 'human-normalized' here means a linear per-game transformation of
a game score in such a way that 0 corresponds to random performance and 1
corresponds to human performance.
"""
import math
# Game: score-tuple dictionary. Each score tuple contains
# 0: score random (float) and 1: score human (float).
_ATARI_DATA = {
'alien': (227.8, 7127.7),
'amidar': (5.8, 1719.5),
'assault': (222.4, 742.0),
'asterix': (210.0, 8503.3),
'asteroids': (719.1, 47388.7),
'atlantis': (12850.0, 29028.1),
'bank_heist': (14.2, 753.1),
'battle_zone': (2360.0, 37187.5),
'beam_rider': (363.9, 16926.5),
'berzerk': (123.7, 2630.4),
'bowling': (23.1, 160.7),
'boxing': (0.1, 12.1),
'breakout': (1.7, 30.5),
'centipede': (2090.9, 12017.0),
'chopper_command': (811.0, 7387.8),
'crazy_climber': (10780.5, 35829.4),
'defender': (2874.5, 18688.9),
'demon_attack': (152.1, 1971.0),
'double_dunk': (-18.6, -16.4),
'enduro': (0.0, 860.5),
'fishing_derby': (-91.7, -38.7),
'freeway': (0.0, 29.6),
'frostbite': (65.2, 4334.7),
'gopher': (257.6, 2412.5),
'gravitar': (173.0, 3351.4),
'hero': (1027.0, 30826.4),
'ice_hockey': (-11.2, 0.9),
'jamesbond': (29.0, 302.8),
'kangaroo': (52.0, 3035.0),
'krull': (1598.0, 2665.5),
'kung_fu_master': (258.5, 22736.3),
'montezuma_revenge': (0.0, 4753.3),
'ms_pacman': (307.3, 6951.6),
'name_this_game': (2292.3, 8049.0),
'phoenix': (761.4, 7242.6),
'pitfall': (-229.4, 6463.7),
'pong': (-20.7, 14.6),
'private_eye': (24.9, 69571.3),
'qbert': (163.9, 13455.0),
'riverraid': (1338.5, 17118.0),
'road_runner': (11.5, 7845.0),
'robotank': (2.2, 11.9),
'seaquest': (68.4, 42054.7),
'skiing': (-17098.1, -4336.9),
'solaris': (1236.3, 12326.7),
'space_invaders': (148.0, 1668.7),
'star_gunner': (664.0, 10250.0),
'surround': (-10.0, 6.5),
'tennis': (-23.8, -8.3),
'time_pilot': (3568.0, 5229.2),
'tutankham': (11.4, 167.6),
'up_n_down': (533.4, 11693.2),
'venture': (0.0, 1187.5),
# Note the random agent score on Video Pinball is sometimes greater than the
# human score under other evaluation methods.
'video_pinball': (16256.9, 17667.9),
'wizard_of_wor': (563.5, 4756.5),
'yars_revenge': (3092.9, 54576.9),
'zaxxon': (32.5, 9173.3),
}
_RANDOM_COL = 0
_HUMAN_COL = 1
ATARI_GAMES = tuple(sorted(_ATARI_DATA.keys()))
def get_human_normalized_score(game: str, raw_score: float) -> float:
"""Converts game score to human-normalized score."""
game_scores = _ATARI_DATA.get(game, (math.nan, math.nan))
random, human = game_scores[_RANDOM_COL], game_scores[_HUMAN_COL]
return (raw_score - random) / (human - random)
| deepmind-research-master | tandem_dqn/atari_data.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN agent network components and implementation."""
import typing
from typing import Any, Callable, Tuple, Union
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
Network = hk.Transformed
Params = hk.Params
NetworkFn = Callable[..., Any]
class QNetworkOutputs(typing.NamedTuple):
q_values: jnp.ndarray
class QRNetworkOutputs(typing.NamedTuple):
q_values: jnp.ndarray
q_dist: jnp.ndarray
NUM_QUANTILES = 201
def _dqn_default_initializer(
num_input_units: int) -> hk.initializers.Initializer:
"""Default initialization scheme inherited from past implementations of DQN.
This scheme was historically used to initialize all weights and biases
in convolutional and linear layers of DQN-type agents' networks.
It initializes each weight as an independent uniform sample from [`-c`, `c`],
where `c = 1 / np.sqrt(num_input_units)`, and `num_input_units` is the number
of input units affecting a single output unit in the given layer, i.e. the
total number of inputs in the case of linear (dense) layers, and
`num_input_channels * kernel_width * kernel_height` in the case of
convolutional layers.
Args:
num_input_units: number of input units to a single output unit of the layer.
Returns:
Haiku weight initializer.
"""
max_val = np.sqrt(1 / num_input_units)
return hk.initializers.RandomUniform(-max_val, max_val)
def make_quantiles():
"""Quantiles for QR-DQN."""
return (jnp.arange(0, NUM_QUANTILES) + 0.5) / float(NUM_QUANTILES)
def conv(
num_features: int,
kernel_shape: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]],
name=None,
) -> NetworkFn:
"""Convolutional layer with DQN's legacy weight initialization scheme."""
def net_fn(inputs):
"""Function representing conv layer with DQN's legacy initialization."""
num_input_units = inputs.shape[-1] * kernel_shape[0] * kernel_shape[1]
initializer = _dqn_default_initializer(num_input_units)
layer = hk.Conv2D(
num_features,
kernel_shape=kernel_shape,
stride=stride,
w_init=initializer,
b_init=initializer,
padding='VALID',
name=name)
return layer(inputs)
return net_fn
def linear(num_outputs: int, with_bias=True, name=None) -> NetworkFn:
"""Linear layer with DQN's legacy weight initialization scheme."""
def net_fn(inputs):
"""Function representing linear layer with DQN's legacy initialization."""
initializer = _dqn_default_initializer(inputs.shape[-1])
layer = hk.Linear(
num_outputs,
with_bias=with_bias,
w_init=initializer,
b_init=initializer,
name=name)
return layer(inputs)
return net_fn
def linear_with_shared_bias(num_outputs: int, name=None) -> NetworkFn:
"""Linear layer with single shared bias instead of one bias per output."""
def layer_fn(inputs):
"""Function representing a linear layer with single shared bias."""
initializer = _dqn_default_initializer(inputs.shape[-1])
bias_free_linear = hk.Linear(
num_outputs, with_bias=False, w_init=initializer, name=name)
linear_output = bias_free_linear(inputs)
bias = hk.get_parameter('b', [1], inputs.dtype, init=initializer)
bias = jnp.broadcast_to(bias, linear_output.shape)
return linear_output + bias
return layer_fn
def dqn_torso() -> NetworkFn:
"""DQN convolutional torso.
Includes scaling from [`0`, `255`] (`uint8`) to [`0`, `1`] (`float32`)`.
Returns:
Network function that `haiku.transform` can be called on.
"""
def net_fn(inputs):
"""Function representing convolutional torso for a DQN Q-network."""
network = hk.Sequential([
lambda x: x.astype(jnp.float32) / 255.,
conv(32, kernel_shape=(8, 8), stride=(4, 4), name='conv1'),
jax.nn.relu,
conv(64, kernel_shape=(4, 4), stride=(2, 2), name='conv2'),
jax.nn.relu,
conv(64, kernel_shape=(3, 3), stride=(1, 1), name='conv3'),
jax.nn.relu,
hk.Flatten(),
])
return network(inputs)
return net_fn
def dqn_value_head(num_actions: int, shared_bias: bool = False) -> NetworkFn:
"""Regular DQN Q-value head with single hidden layer."""
last_layer = linear_with_shared_bias if shared_bias else linear
def net_fn(inputs):
"""Function representing value head for a DQN Q-network."""
network = hk.Sequential([
linear(512, name='linear1'),
jax.nn.relu,
last_layer(num_actions, name='output'),
])
return network(inputs)
return net_fn
def qr_atari_network(num_actions: int, quantiles: jnp.ndarray) -> NetworkFn:
"""QR-DQN network, expects `uint8` input."""
chex.assert_rank(quantiles, 1)
num_quantiles = len(quantiles)
def net_fn(inputs):
"""Function representing QR-DQN Q-network."""
network = hk.Sequential([
dqn_torso(),
dqn_value_head(num_quantiles * num_actions),
])
network_output = network(inputs)
q_dist = jnp.reshape(network_output, (-1, num_quantiles, num_actions))
q_values = jnp.mean(q_dist, axis=1)
q_values = jax.lax.stop_gradient(q_values)
return QRNetworkOutputs(q_dist=q_dist, q_values=q_values)
return net_fn
def double_dqn_atari_network(num_actions: int) -> NetworkFn:
"""DQN network with shared bias in final layer, expects `uint8` input."""
def net_fn(inputs):
"""Function representing DQN Q-network with shared bias output layer."""
network = hk.Sequential([
dqn_torso(),
dqn_value_head(num_actions, shared_bias=True),
])
return QNetworkOutputs(q_values=network(inputs))
return net_fn
def make_network(network_type: str, num_actions: int) -> Network:
"""Constructs network."""
if network_type == 'double_q':
network_fn = double_dqn_atari_network(num_actions)
elif network_type == 'qr':
quantiles = make_quantiles()
network_fn = qr_atari_network(num_actions, quantiles)
else:
raise ValueError('Unknown network "{}"'.format(network_type))
return hk.transform(network_fn)
| deepmind-research-master | tandem_dqn/networks.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Replay components for DQN-type agents."""
import collections
import typing
from typing import Any, Callable, Generic, Iterable, List, Mapping, Optional, Sequence, Text, Tuple, TypeVar
import dm_env
import numpy as np
import snappy
from tandem_dqn import parts
CompressedArray = Tuple[bytes, Tuple, np.dtype]
# Generic replay structure: Any flat named tuple.
ReplayStructure = TypeVar('ReplayStructure', bound=Tuple[Any, ...])
class Transition(typing.NamedTuple):
s_tm1: Optional[np.ndarray]
a_tm1: Optional[parts.Action]
r_t: Optional[float]
discount_t: Optional[float]
s_t: Optional[np.ndarray]
a_t: Optional[parts.Action] = None
mc_return_tm1: Optional[float] = None
class TransitionReplay(Generic[ReplayStructure]):
"""Uniform replay, with circular buffer storage for flat named tuples."""
def __init__(self,
capacity: int,
structure: ReplayStructure,
random_state: np.random.RandomState,
encoder: Optional[Callable[[ReplayStructure], Any]] = None,
decoder: Optional[Callable[[Any], ReplayStructure]] = None):
self._capacity = capacity
self._structure = structure
self._random_state = random_state
self._encoder = encoder or (lambda s: s)
self._decoder = decoder or (lambda s: s)
self._storage = [None] * capacity
self._num_added = 0
def add(self, item: ReplayStructure) -> None:
"""Adds single item to replay."""
self._storage[self._num_added % self._capacity] = self._encoder(item)
self._num_added += 1
def get(self, indices: Sequence[int]) -> List[ReplayStructure]:
"""Retrieves items by indices."""
return [self._decoder(self._storage[i]) for i in indices]
def sample(self, size: int) -> ReplayStructure:
"""Samples batch of items from replay uniformly, with replacement."""
indices = self._random_state.choice(self.size, size=size, replace=True)
samples = self.get(indices)
transposed = zip(*samples)
stacked = [np.stack(xs, axis=0) for xs in transposed]
return type(self._structure)(*stacked) # pytype: disable=not-callable
@property
def size(self) -> int:
"""Number of items currently contained in replay."""
return min(self._num_added, self._capacity)
@property
def capacity(self) -> int:
"""Total capacity of replay (max number of items stored at any one time)."""
return self._capacity
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves replay state as a dictionary (e.g. for serialization)."""
return {
'storage': self._storage,
'num_added': self._num_added,
}
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets replay state from a (potentially de-serialized) dictionary."""
self._storage = state['storage']
self._num_added = state['num_added']
class TransitionAccumulatorWithMCReturn:
"""Accumulates timesteps to transitions with MC returns."""
def __init__(self):
self._transitions = collections.deque()
self.reset()
def step(self, timestep_t: dm_env.TimeStep,
a_t: parts.Action) -> Iterable[Transition]:
"""Accumulates timestep and resulting action, maybe yields transitions."""
if timestep_t.first():
self.reset()
# There are no transitions on the first timestep.
if self._timestep_tm1 is None:
assert self._a_tm1 is None
if not timestep_t.first():
raise ValueError('Expected FIRST timestep, got %s.' % str(timestep_t))
self._timestep_tm1 = timestep_t
self._a_tm1 = a_t
return # Empty iterable.
self._transitions.append(
Transition(
s_tm1=self._timestep_tm1.observation,
a_tm1=self._a_tm1,
r_t=timestep_t.reward,
discount_t=timestep_t.discount,
s_t=timestep_t.observation,
a_t=a_t,
mc_return_tm1=None,
))
self._timestep_tm1 = timestep_t
self._a_tm1 = a_t
if timestep_t.last():
# Annotate all episode transitions with their MC returns.
mc_return = 0
mc_transitions = []
while self._transitions:
transition = self._transitions.pop()
mc_return = transition.discount_t * mc_return + transition.r_t
mc_transitions.append(transition._replace(mc_return_tm1=mc_return))
for transition in reversed(mc_transitions):
yield transition
else:
# Wait for episode end before yielding anything.
return
def reset(self) -> None:
"""Resets the accumulator. Following timestep is expected to be FIRST."""
self._transitions.clear()
self._timestep_tm1 = None
self._a_tm1 = None
def compress_array(array: np.ndarray) -> CompressedArray:
"""Compresses a numpy array with snappy."""
return snappy.compress(array), array.shape, array.dtype
def uncompress_array(compressed: CompressedArray) -> np.ndarray:
"""Uncompresses a numpy array with snappy given its shape and dtype."""
compressed_array, shape, dtype = compressed
byte_string = snappy.uncompress(compressed_array)
return np.frombuffer(byte_string, dtype=dtype).reshape(shape)
| deepmind-research-master | tandem_dqn/replay.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tandem DQN agent class."""
import typing
from typing import Any, Callable, Mapping, Set, Text
from absl import logging
import dm_env
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
from tandem_dqn import losses
from tandem_dqn import parts
from tandem_dqn import processors
from tandem_dqn import replay as replay_lib
class TandemTuple(typing.NamedTuple):
active: Any
passive: Any
def tandem_map(fn: Callable[..., Any], *args):
return TandemTuple(
active=fn(*[a.active for a in args]),
passive=fn(*[a.passive for a in args]))
def replace_module_params(source, target, modules):
"""Replace selected module params in target by corresponding source values."""
source, _ = hk.data_structures.partition(
lambda module, name, value: module in modules,
source)
return hk.data_structures.merge(target, source)
class TandemDqn(parts.Agent):
"""Tandem DQN agent."""
def __init__(
self,
preprocessor: processors.Processor,
sample_network_input: jnp.ndarray,
network: TandemTuple,
optimizer: TandemTuple,
loss: TandemTuple,
transition_accumulator: Any,
replay: replay_lib.TransitionReplay,
batch_size: int,
exploration_epsilon: Callable[[int], float],
min_replay_capacity_fraction: float,
learn_period: int,
target_network_update_period: int,
tied_layers: Set[str],
rng_key: parts.PRNGKey,
):
self._preprocessor = preprocessor
self._replay = replay
self._transition_accumulator = transition_accumulator
self._batch_size = batch_size
self._exploration_epsilon = exploration_epsilon
self._min_replay_capacity = min_replay_capacity_fraction * replay.capacity
self._learn_period = learn_period
self._target_network_update_period = target_network_update_period
# Initialize network parameters and optimizer.
self._rng_key, network_rng_key_active, network_rng_key_passive = (
jax.random.split(rng_key, 3))
active_params = network.active.init(
network_rng_key_active, sample_network_input[None, ...])
passive_params = network.passive.init(
network_rng_key_passive, sample_network_input[None, ...])
self._online_params = TandemTuple(
active=active_params, passive=passive_params)
self._target_params = self._online_params
self._opt_state = tandem_map(
lambda optim, params: optim.init(params),
optimizer, self._online_params)
# Other agent state: last action, frame count, etc.
self._action = None
self._frame_t = -1 # Current frame index.
# Stats.
stats = [
'loss_active',
'loss_passive',
'frac_diff_argmax',
'mc_error_active',
'mc_error_passive',
'mc_error_abs_active',
'mc_error_abs_passive',
]
self._statistics = {k: np.nan for k in stats}
# Define jitted loss, update, and policy functions here instead of as
# class methods, to emphasize that these are meant to be pure functions
# and should not access the agent object's state via `self`.
def network_outputs(rng_key, online_params, target_params, transitions):
"""Compute all potentially needed outputs of active and passive net."""
_, *apply_keys = jax.random.split(rng_key, 4)
outputs_tm1 = tandem_map(
lambda net, param: net.apply(param, apply_keys[0], transitions.s_tm1),
network, online_params)
outputs_t = tandem_map(
lambda net, param: net.apply(param, apply_keys[1], transitions.s_t),
network, online_params)
outputs_target_t = tandem_map(
lambda net, param: net.apply(param, apply_keys[2], transitions.s_t),
network, target_params)
return outputs_tm1, outputs_t, outputs_target_t
# Helper functions to define active and passive losses.
# Active and passive losses are allowed to depend on all active and passive
# outputs, but stop-gradient is used to prevent gradients from flowing
# from active loss to passive network params and vice versa.
def sg_active(x):
return TandemTuple(
active=jax.lax.stop_gradient(x.active), passive=x.passive)
def sg_passive(x):
return TandemTuple(
active=x.active, passive=jax.lax.stop_gradient(x.passive))
def compute_loss(online_params, target_params, transitions, rng_key):
rng_key, apply_key = jax.random.split(rng_key)
outputs_tm1, outputs_t, outputs_target_t = network_outputs(
apply_key, online_params, target_params, transitions)
_, loss_key_active, loss_key_passive = jax.random.split(rng_key, 3)
loss_active = loss.active(
sg_passive(outputs_tm1), sg_passive(outputs_t), outputs_target_t,
transitions, loss_key_active)
loss_passive = loss.passive(
sg_active(outputs_tm1), sg_active(outputs_t), outputs_target_t,
transitions, loss_key_passive)
# Logging stuff.
a_tm1 = transitions.a_tm1
mc_return_tm1 = transitions.mc_return_tm1
q_values = TandemTuple(
active=outputs_tm1.active.q_values,
passive=outputs_tm1.passive.q_values)
mc_error = jax.tree_map(
lambda q: losses.batch_mc_learning(q, a_tm1, mc_return_tm1),
q_values)
mc_error_abs = jax.tree_map(jnp.abs, mc_error)
q_argmax = jax.tree_map(lambda q: jnp.argmax(q, axis=-1), q_values)
argmax_diff = jnp.not_equal(q_argmax.active, q_argmax.passive)
batch_mean = lambda x: jnp.mean(x, axis=0)
logs = {
'loss_active': loss_active,
'loss_passive': loss_passive
}
logs.update(jax.tree_map(batch_mean, {
'frac_diff_argmax': argmax_diff,
'mc_error_active': mc_error.active,
'mc_error_passive': mc_error.passive,
'mc_error_abs_active': mc_error_abs.active,
'mc_error_abs_passive': mc_error_abs.passive,
}))
return loss_active + loss_passive, logs
def optim_update(optim, online_params, d_loss_d_params, opt_state):
updates, new_opt_state = optim.update(d_loss_d_params, opt_state)
new_online_params = optax.apply_updates(online_params, updates)
return new_opt_state, new_online_params
def compute_loss_grad(rng_key, online_params, target_params, transitions):
rng_key, grad_key = jax.random.split(rng_key)
(_, logs), d_loss_d_params = jax.value_and_grad(
compute_loss, has_aux=True)(
online_params, target_params, transitions, grad_key)
return rng_key, logs, d_loss_d_params
def update_active(rng_key, opt_state, online_params, target_params,
transitions):
"""Applies learning update for active network only."""
rng_key, logs, d_loss_d_params = compute_loss_grad(
rng_key, online_params, target_params, transitions)
new_opt_state_active, new_online_params_active = optim_update(
optimizer.active, online_params.active, d_loss_d_params.active,
opt_state.active)
new_opt_state = opt_state._replace(
active=new_opt_state_active)
new_online_params = online_params._replace(
active=new_online_params_active)
return rng_key, new_opt_state, new_online_params, logs
self._update_active = jax.jit(update_active)
def update_passive(rng_key, opt_state, online_params, target_params,
transitions):
"""Applies learning update for passive network only."""
rng_key, logs, d_loss_d_params = compute_loss_grad(
rng_key, online_params, target_params, transitions)
new_opt_state_passive, new_online_params_passive = optim_update(
optimizer.passive, online_params.passive, d_loss_d_params.passive,
opt_state.passive)
new_opt_state = opt_state._replace(
passive=new_opt_state_passive)
new_online_params = online_params._replace(
passive=new_online_params_passive)
return rng_key, new_opt_state, new_online_params, logs
self._update_passive = jax.jit(update_passive)
def update_active_passive(rng_key, opt_state, online_params,
target_params, transitions):
"""Applies learning update for both active & passive networks."""
rng_key, logs, d_loss_d_params = compute_loss_grad(
rng_key, online_params, target_params, transitions)
new_opt_state_active, new_online_params_active = optim_update(
optimizer.active, online_params.active, d_loss_d_params.active,
opt_state.active)
new_opt_state_passive, new_online_params_passive = optim_update(
optimizer.passive, online_params.passive, d_loss_d_params.passive,
opt_state.passive)
new_opt_state = TandemTuple(active=new_opt_state_active,
passive=new_opt_state_passive)
new_online_params = TandemTuple(active=new_online_params_active,
passive=new_online_params_passive)
return rng_key, new_opt_state, new_online_params, logs
self._update_active_passive = jax.jit(update_active_passive)
self._update = None # set_training_mode needs to be called to set this.
def sync_tied_layers(online_params):
"""Set tied layer params of passive to respective values of active."""
new_online_params_passive = replace_module_params(
source=online_params.active, target=online_params.passive,
modules=tied_layers)
return online_params._replace(passive=new_online_params_passive)
self._sync_tied_layers = jax.jit(sync_tied_layers)
def select_action(rng_key, network_params, s_t, exploration_epsilon):
"""Samples action from eps-greedy policy wrt Q-values at given state."""
rng_key, apply_key, policy_key = jax.random.split(rng_key, 3)
q_t = network.active.apply(network_params, apply_key,
s_t[None, ...]).q_values[0]
a_t = rlax.epsilon_greedy().sample(policy_key, q_t, exploration_epsilon)
return rng_key, a_t
self._select_action = jax.jit(select_action)
def step(self, timestep: dm_env.TimeStep) -> parts.Action:
"""Selects action given timestep and potentially learns."""
self._frame_t += 1
timestep = self._preprocessor(timestep)
if timestep is None: # Repeat action.
action = self._action
else:
action = self._action = self._act(timestep)
for transition in self._transition_accumulator.step(timestep, action):
self._replay.add(transition)
if self._replay.size < self._min_replay_capacity:
return action
if self._frame_t % self._learn_period == 0:
self._learn()
if self._frame_t % self._target_network_update_period == 0:
self._target_params = self._online_params
return action
def reset(self) -> None:
"""Resets the agent's episodic state such as frame stack and action repeat.
This method should be called at the beginning of every episode.
"""
self._transition_accumulator.reset()
processors.reset(self._preprocessor)
self._action = None
def _act(self, timestep) -> parts.Action:
"""Selects action given timestep, according to epsilon-greedy policy."""
s_t = timestep.observation
network_params = self._online_params.active
self._rng_key, a_t = self._select_action(
self._rng_key, network_params, s_t, self.exploration_epsilon)
return parts.Action(jax.device_get(a_t))
def _learn(self) -> None:
"""Samples a batch of transitions from replay and learns from it."""
logging.log_first_n(logging.INFO, 'Begin learning', 1)
transitions = self._replay.sample(self._batch_size)
self._rng_key, self._opt_state, self._online_params, logs = self._update(
self._rng_key,
self._opt_state,
self._online_params,
self._target_params,
transitions,
)
self._online_params = self._sync_tied_layers(self._online_params)
self._statistics.update(jax.device_get(logs))
def set_training_mode(self, mode: str):
"""Sets training mode to one of 'active', 'passive', or 'active_passive'."""
if mode == 'active':
self._update = self._update_active
elif mode == 'passive':
self._update = self._update_passive
elif mode == 'active_passive':
self._update = self._update_active_passive
@property
def online_params(self) -> TandemTuple:
"""Returns current parameters of Q-network."""
return self._online_params
@property
def statistics(self) -> Mapping[Text, float]:
"""Returns current agent statistics as a dictionary."""
# Check for DeviceArrays in values as this can be very slow.
assert all(not isinstance(x, jax.Array) for x in self._statistics.values())
return self._statistics
@property
def exploration_epsilon(self) -> float:
"""Returns epsilon value currently used by (eps-greedy) behavior policy."""
return self._exploration_epsilon(self._frame_t)
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves agent state as a dictionary (e.g. for serialization)."""
state = {
'rng_key': self._rng_key,
'frame_t': self._frame_t,
'opt_state_active': self._opt_state.active,
'online_params_active': self._online_params.active,
'target_params_active': self._target_params.active,
'opt_state_passive': self._opt_state.passive,
'online_params_passive': self._online_params.passive,
'target_params_passive': self._target_params.passive,
'replay': self._replay.get_state(),
}
return state
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets agent state from a (potentially de-serialized) dictionary."""
self._rng_key = state['rng_key']
self._frame_t = state['frame_t']
self._opt_state = TandemTuple(
active=jax.device_put(state['opt_state_active']),
passive=jax.device_put(state['opt_state_passive']))
self._online_params = TandemTuple(
active=jax.device_put(state['online_params_active']),
passive=jax.device_put(state['online_params_passive']))
self._target_params = TandemTuple(
active=jax.device_put(state['target_params_active']),
passive=jax.device_put(state['target_params_passive']))
self._replay.set_state(state['replay'])
| deepmind-research-master | tandem_dqn/agent.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Components for DQN."""
import abc
import collections
import csv
import os
import timeit
from typing import Any, Iterable, Mapping, Optional, Text, Tuple, Union
import dm_env
import jax
import jax.numpy as jnp
import numpy as np
import rlax
from tandem_dqn import networks
from tandem_dqn import processors
Action = int
Network = networks.Network
NetworkParams = networks.Params
PRNGKey = jnp.ndarray # A size 2 array.
class Agent(abc.ABC):
"""Agent interface."""
@abc.abstractmethod
def step(self, timestep: dm_env.TimeStep) -> Action:
"""Selects action given timestep and potentially learns."""
@abc.abstractmethod
def reset(self) -> None:
"""Resets the agent's episodic state such as frame stack and action repeat.
This method should be called at the beginning of every episode.
"""
@abc.abstractmethod
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves agent state as a dictionary (e.g. for serialization)."""
@abc.abstractmethod
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets agent state from a (potentially de-serialized) dictionary."""
@property
@abc.abstractmethod
def statistics(self) -> Mapping[Text, float]:
"""Returns current agent statistics as a dictionary."""
def run_loop(
agent: Agent,
environment: dm_env.Environment,
max_steps_per_episode: int = 0,
yield_before_reset: bool = False,
) -> Iterable[Tuple[dm_env.Environment, Optional[dm_env.TimeStep], Agent,
Optional[Action]]]:
"""Repeatedly alternates step calls on environment and agent.
At time `t`, `t + 1` environment timesteps and `t + 1` agent steps have been
seen in the current episode. `t` resets to `0` for the next episode.
Args:
agent: Agent to be run, has methods `step(timestep)` and `reset()`.
environment: Environment to run, has methods `step(action)` and `reset()`.
max_steps_per_episode: If positive, when time t reaches this value within an
episode, the episode is truncated.
yield_before_reset: Whether to additionally yield `(environment, None,
agent, None)` before the agent and environment is reset at the start of
each episode.
Yields:
Tuple `(environment, timestep_t, agent, a_t)` where
`a_t = agent.step(timestep_t)`.
"""
while True: # For each episode.
if yield_before_reset:
yield environment, None, agent, None,
t = 0
agent.reset()
timestep_t = environment.reset() # timestep_0.
while True: # For each step in the current episode.
a_t = agent.step(timestep_t)
yield environment, timestep_t, agent, a_t
# Update t after one environment step and agent step and relabel.
t += 1
a_tm1 = a_t
timestep_t = environment.step(a_tm1)
if max_steps_per_episode > 0 and t >= max_steps_per_episode:
assert t == max_steps_per_episode
timestep_t = timestep_t._replace(step_type=dm_env.StepType.LAST)
if timestep_t.last():
unused_a_t = agent.step(timestep_t) # Extra agent step, action ignored.
yield environment, timestep_t, agent, None
break
def generate_statistics(
trackers: Iterable[Any],
timestep_action_sequence: Iterable[Tuple[dm_env.Environment,
Optional[dm_env.TimeStep], Agent,
Optional[Action]]]
) -> Mapping[Text, Any]:
"""Generates statistics from a sequence of timestep and actions."""
# Only reset at the start, not between episodes.
for tracker in trackers:
tracker.reset()
for environment, timestep_t, agent, a_t in timestep_action_sequence:
for tracker in trackers:
tracker.step(environment, timestep_t, agent, a_t)
# Merge all statistics dictionaries into one.
statistics_dicts = (tracker.get() for tracker in trackers)
return dict(collections.ChainMap(*statistics_dicts))
class EpisodeTracker:
"""Tracks episode return and other statistics."""
def __init__(self):
self._num_steps_since_reset = None
self._num_steps_over_episodes = None
self._episode_returns = None
self._current_episode_rewards = None
self._current_episode_step = None
def step(
self,
environment: Optional[dm_env.Environment],
timestep_t: dm_env.TimeStep,
agent: Optional[Agent],
a_t: Optional[Action],
) -> None:
"""Accumulates statistics from timestep."""
del (environment, agent, a_t)
if timestep_t.first():
if self._current_episode_rewards:
raise ValueError('Current episode reward list should be empty.')
if self._current_episode_step != 0:
raise ValueError('Current episode step should be zero.')
else:
# First reward is invalid, all other rewards are appended.
self._current_episode_rewards.append(timestep_t.reward)
self._num_steps_since_reset += 1
self._current_episode_step += 1
if timestep_t.last():
self._episode_returns.append(sum(self._current_episode_rewards))
self._current_episode_rewards = []
self._num_steps_over_episodes += self._current_episode_step
self._current_episode_step = 0
def reset(self) -> None:
"""Resets all gathered statistics, not to be called between episodes."""
self._num_steps_since_reset = 0
self._num_steps_over_episodes = 0
self._episode_returns = []
self._current_episode_step = 0
self._current_episode_rewards = []
def get(self) -> Mapping[Text, Union[int, float, None]]:
"""Aggregates statistics and returns as a dictionary.
Here the convention is `episode_return` is set to `current_episode_return`
if a full episode has not been encountered. Otherwise it is set to
`mean_episode_return` which is the mean return of complete episodes only. If
no steps have been taken at all, `episode_return` is set to `NaN`.
Returns:
A dictionary of aggregated statistics.
"""
if self._episode_returns:
mean_episode_return = np.array(self._episode_returns).mean()
current_episode_return = sum(self._current_episode_rewards)
episode_return = mean_episode_return
else:
mean_episode_return = np.nan
if self._num_steps_since_reset > 0:
current_episode_return = sum(self._current_episode_rewards)
else:
current_episode_return = np.nan
episode_return = current_episode_return
return {
'mean_episode_return': mean_episode_return,
'current_episode_return': current_episode_return,
'episode_return': episode_return,
'num_episodes': len(self._episode_returns),
'num_steps_over_episodes': self._num_steps_over_episodes,
'current_episode_step': self._current_episode_step,
'num_steps_since_reset': self._num_steps_since_reset,
}
class StepRateTracker:
"""Tracks step rate, number of steps taken and duration since last reset."""
def __init__(self):
self._num_steps_since_reset = None
self._start = None
def step(
self,
environment: Optional[dm_env.Environment],
timestep_t: Optional[dm_env.TimeStep],
agent: Optional[Agent],
a_t: Optional[Action],
) -> None:
del (environment, timestep_t, agent, a_t)
self._num_steps_since_reset += 1
def reset(self) -> None:
self._num_steps_since_reset = 0
self._start = timeit.default_timer()
def get(self) -> Mapping[Text, float]:
duration = timeit.default_timer() - self._start
if self._num_steps_since_reset > 0:
step_rate = self._num_steps_since_reset / duration
else:
step_rate = np.nan
return {
'step_rate': step_rate,
'num_steps': self._num_steps_since_reset,
'duration': duration,
}
class UnbiasedExponentialWeightedAverageAgentTracker:
"""'Unbiased Constant-Step-Size Trick' from the Sutton and Barto RL book."""
def __init__(self, step_size: float, initial_agent: Agent):
self._initial_statistics = dict(initial_agent.statistics)
self._step_size = step_size
self.trace = 0.
self._statistics = dict(self._initial_statistics)
def step(
self,
environment: Optional[dm_env.Environment],
timestep_t: Optional[dm_env.TimeStep],
agent: Agent,
a_t: Optional[Action],
) -> None:
"""Accumulates agent statistics."""
del (environment, timestep_t, a_t)
self.trace = (1 - self._step_size) * self.trace + self._step_size
final_step_size = self._step_size / self.trace
assert 0 <= final_step_size <= 1
if final_step_size == 1:
# Since the self._initial_statistics is likely to be NaN and
# 0 * NaN == NaN just replace self._statistics on the first step.
self._statistics = dict(agent.statistics)
else:
self._statistics = jax.tree_map(
lambda s, x: (1 - final_step_size) * s + final_step_size * x,
self._statistics, agent.statistics)
def reset(self) -> None:
"""Resets statistics and internal state."""
self.trace = 0.
# get() may be called before step() so ensure statistics are initialized.
self._statistics = dict(self._initial_statistics)
def get(self) -> Mapping[Text, float]:
"""Returns current accumulated statistics."""
return self._statistics
def make_default_trackers(initial_agent: Agent):
return [
EpisodeTracker(),
StepRateTracker(),
UnbiasedExponentialWeightedAverageAgentTracker(
step_size=1e-3, initial_agent=initial_agent),
]
class EpsilonGreedyActor(Agent):
"""Agent that acts with a given set of Q-network parameters and epsilon.
Network parameters are set on the actor. The actor can be serialized,
ensuring determinism of execution (e.g. when checkpointing).
"""
def __init__(
self,
preprocessor: processors.Processor,
network: Network,
exploration_epsilon: float,
rng_key: PRNGKey,
):
self._preprocessor = preprocessor
self._rng_key = rng_key
self._action = None
self.network_params = None # Nest of arrays (haiku.Params), set externally.
def select_action(rng_key, network_params, s_t):
"""Samples action from eps-greedy policy wrt Q-values at given state."""
rng_key, apply_key, policy_key = jax.random.split(rng_key, 3)
q_t = network.apply(network_params, apply_key, s_t[None, ...]).q_values[0]
a_t = rlax.epsilon_greedy().sample(policy_key, q_t, exploration_epsilon)
return rng_key, a_t
self._select_action = jax.jit(select_action)
def step(self, timestep: dm_env.TimeStep) -> Action:
"""Selects action given a timestep."""
timestep = self._preprocessor(timestep)
if timestep is None: # Repeat action.
return self._action
s_t = timestep.observation
self._rng_key, a_t = self._select_action(self._rng_key, self.network_params,
s_t)
self._action = Action(jax.device_get(a_t))
return self._action
def reset(self) -> None:
"""Resets the agent's episodic state such as frame stack and action repeat.
This method should be called at the beginning of every episode.
"""
processors.reset(self._preprocessor)
self._action = None
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves agent state as a dictionary (e.g. for serialization)."""
# State contains network params to make agent easy to run from a checkpoint.
return {
'rng_key': self._rng_key,
'network_params': self.network_params,
}
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets agent state from a (potentially de-serialized) dictionary."""
self._rng_key = state['rng_key']
self.network_params = state['network_params']
@property
def statistics(self) -> Mapping[Text, float]:
return {}
class LinearSchedule:
"""Linear schedule, used for exploration epsilon in DQN agents."""
def __init__(self,
begin_value,
end_value,
begin_t,
end_t=None,
decay_steps=None):
if (end_t is None) == (decay_steps is None):
raise ValueError('Exactly one of end_t, decay_steps must be provided.')
self._decay_steps = decay_steps if end_t is None else end_t - begin_t
self._begin_t = begin_t
self._begin_value = begin_value
self._end_value = end_value
def __call__(self, t):
"""Implements a linear transition from a begin to an end value."""
frac = min(max(t - self._begin_t, 0), self._decay_steps) / self._decay_steps
return (1 - frac) * self._begin_value + frac * self._end_value
class NullWriter:
"""A placeholder logging object that does nothing."""
def write(self, *args, **kwargs) -> None:
pass
def close(self) -> None:
pass
class CsvWriter:
"""A logging object writing to a CSV file.
Each `write()` takes a `OrderedDict`, creating one column in the CSV file for
each dictionary key on the first call. Successive calls to `write()` must
contain the same dictionary keys.
"""
def __init__(self, fname: Text):
"""Initializes a `CsvWriter`.
Args:
fname: File name (path) for file to be written to.
"""
dirname = os.path.dirname(fname)
if not os.path.exists(dirname):
os.makedirs(dirname)
self._fname = fname
self._header_written = False
self._fieldnames = None
def write(self, values: collections.OrderedDict) -> None:
"""Appends given values as new row to CSV file."""
if self._fieldnames is None:
self._fieldnames = values.keys()
# Open a file in 'append' mode, so we can continue logging safely to the
# same file after e.g. restarting from a checkpoint.
with open(self._fname, 'a') as file:
# Always use same fieldnames to create writer, this way a consistency
# check is performed automatically on each write.
writer = csv.DictWriter(file, fieldnames=self._fieldnames)
# Write a header if this is the very first write.
if not self._header_written:
writer.writeheader()
self._header_written = True
writer.writerow(values)
def close(self) -> None:
"""Closes the `CsvWriter`."""
pass
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves `CsvWriter` state as a `dict` (e.g. for serialization)."""
return {
'header_written': self._header_written,
'fieldnames': self._fieldnames
}
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets `CsvWriter` state from a (potentially de-serialized) dictionary."""
self._header_written = state['header_written']
self._fieldnames = state['fieldnames']
class NullCheckpoint:
"""A placeholder checkpointing object that does nothing.
Can be used as a substitute for an actual checkpointing object when
checkpointing is disabled.
"""
def __init__(self):
self.state = AttributeDict()
def save(self) -> None:
pass
def can_be_restored(self) -> bool:
return False
def restore(self) -> None:
pass
class AttributeDict(dict):
"""A `dict` that supports getting, setting, deleting keys via attributes."""
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
del self[key]
| deepmind-research-master | tandem_dqn/parts.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses for TandemDQN."""
from typing import Any, Callable
import chex
import jax
import jax.numpy as jnp
import rlax
from tandem_dqn import networks
# Batch variants of double_q_learning and SARSA.
batch_double_q_learning = jax.vmap(rlax.double_q_learning)
batch_sarsa_learning = jax.vmap(rlax.sarsa)
# Batch variant of quantile_q_learning with fixed tau input across batch.
batch_quantile_q_learning = jax.vmap(
rlax.quantile_q_learning, in_axes=(0, None, 0, 0, 0, 0, 0, None))
def _mc_learning(
q_tm1: chex.Array,
a_tm1: chex.Numeric,
mc_return_tm1: chex.Array,
) -> chex.Numeric:
"""Calculates the MC return error."""
chex.assert_rank([q_tm1, a_tm1], [1, 0])
chex.assert_type([q_tm1, a_tm1], [float, int])
return mc_return_tm1 - q_tm1[a_tm1]
# Batch variant of MC learning.
batch_mc_learning = jax.vmap(_mc_learning)
def _qr_loss(q_tm1, q_t, q_target_t, transitions, rng_key):
"""Calculates QR-Learning loss from network outputs and transitions."""
del q_t, rng_key # Unused.
# Compute Q value distributions.
huber_param = 1.
quantiles = networks.make_quantiles()
losses = batch_quantile_q_learning(
q_tm1.q_dist,
quantiles,
transitions.a_tm1,
transitions.r_t,
transitions.discount_t,
q_target_t.q_dist, # No double Q-learning here.
q_target_t.q_dist,
huber_param,
)
loss = jnp.mean(losses)
return loss
def _sarsa_loss(q_tm1, q_t, transitions, rng_key):
"""Calculates SARSA loss from network outputs and transitions."""
del rng_key # Unused.
grad_error_bound = 1. / 32
td_errors = batch_sarsa_learning(
q_tm1.q_values,
transitions.a_tm1,
transitions.r_t,
transitions.discount_t,
q_t.q_values,
transitions.a_t
)
td_errors = rlax.clip_gradient(td_errors, -grad_error_bound, grad_error_bound)
losses = rlax.l2_loss(td_errors)
loss = jnp.mean(losses)
return loss
def _mc_loss(q_tm1, transitions, rng_key):
"""Calculates Monte-Carlo return loss, i.e. regression towards MC return."""
del rng_key # Unused.
errors = batch_mc_learning(q_tm1.q_values, transitions.a_tm1,
transitions.mc_return_tm1)
loss = jnp.mean(rlax.l2_loss(errors))
return loss
def _double_q_loss(q_tm1, q_t, q_target_t, transitions, rng_key):
"""Calculates Double Q-Learning loss from network outputs and transitions."""
del rng_key # Unused.
grad_error_bound = 1. / 32
td_errors = batch_double_q_learning(
q_tm1.q_values,
transitions.a_tm1,
transitions.r_t,
transitions.discount_t,
q_target_t.q_values,
q_t.q_values,
)
td_errors = rlax.clip_gradient(td_errors, -grad_error_bound, grad_error_bound)
losses = rlax.l2_loss(td_errors)
loss = jnp.mean(losses)
return loss
def _q_regression_loss(q_tm1, q_tm1_target):
"""Loss for regression of all action values towards targets."""
errors = q_tm1.q_values - jax.lax.stop_gradient(q_tm1_target.q_values)
loss = jnp.mean(rlax.l2_loss(errors))
return loss
def make_loss_fn(loss_type: str, active: bool) -> Callable[..., Any]:
"""Create active or passive loss function of given type."""
if active:
primary = lambda x: x.active
secondary = lambda x: x.passive
else:
primary = lambda x: x.passive
secondary = lambda x: x.active
def sarsa_loss_fn(q_tm1, q_t, q_target_t, transitions, rng_key):
"""SARSA loss using own networks."""
del q_t # Unused.
return _sarsa_loss(primary(q_tm1), primary(q_target_t), transitions,
rng_key)
def mc_loss_fn(q_tm1, q_t, q_target_t, transitions, rng_key):
"""MonteCarlo loss."""
del q_t, q_target_t
return _mc_loss(primary(q_tm1), transitions, rng_key)
def double_q_loss_fn(q_tm1, q_t, q_target_t, transitions, rng_key):
"""Regular DoubleQ loss using own networks."""
return _double_q_loss(primary(q_tm1), primary(q_t), primary(q_target_t),
transitions, rng_key)
def double_q_loss_v_fn(q_tm1, q_t, q_target_t, transitions, rng_key):
"""DoubleQ loss using other network's (target) value function."""
return _double_q_loss(primary(q_tm1), primary(q_t), secondary(q_target_t),
transitions, rng_key)
def double_q_loss_p_fn(q_tm1, q_t, q_target_t, transitions, rng_key):
"""DoubleQ loss using other network's (online) argmax policy."""
return _double_q_loss(primary(q_tm1), secondary(q_t), primary(q_target_t),
transitions, rng_key)
def double_q_loss_pv_fn(q_tm1, q_t, q_target_t, transitions, rng_key):
"""DoubleQ loss using other network's argmax policy & target value fn."""
return _double_q_loss(primary(q_tm1), secondary(q_t), secondary(q_target_t),
transitions, rng_key)
# Pure regression.
def q_regression_loss_fn(q_tm1, q_t, q_target_t, transitions, rng_key):
"""Pure regression of q_tm1(self) towards q_tm1(other)."""
del q_t, q_target_t, transitions, rng_key # Unused.
return _q_regression_loss(primary(q_tm1), secondary(q_tm1))
# QR loss.
def qr_loss_fn(q_tm1, q_t, q_target_t, transitions, rng_key):
"""QR-Q loss using own networks."""
return _qr_loss(primary(q_tm1), primary(q_t), primary(q_target_t),
transitions, rng_key)
if loss_type == 'double_q':
return double_q_loss_fn
elif loss_type == 'sarsa':
return sarsa_loss_fn
elif loss_type == 'mc_return':
return mc_loss_fn
elif loss_type == 'double_q_v':
return double_q_loss_v_fn
elif loss_type == 'double_q_p':
return double_q_loss_p_fn
elif loss_type == 'double_q_pv':
return double_q_loss_pv_fn
elif loss_type == 'q_regression':
return q_regression_loss_fn
elif loss_type == 'qr':
return qr_loss_fn
else:
raise ValueError('Unknown loss "{}"'.format(loss_type))
| deepmind-research-master | tandem_dqn/losses.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""dm_env environment wrapper around Gym Atari configured to be like Xitari.
Gym Atari is built on the Arcade Learning Environment (ALE), whereas Xitari is
an old fork of the ALE.
"""
# pylint: disable=g-bad-import-order
from typing import Optional, Tuple
import atari_py # pylint: disable=unused-import for gym to load Atari games.
import dm_env
from dm_env import specs
import gym
import numpy as np
from tandem_dqn import atari_data
_GYM_ID_SUFFIX = '-xitari-v1'
_SA_SUFFIX = '-sa'
def _game_id(game, sticky_actions):
return game + (_SA_SUFFIX if sticky_actions else '') + _GYM_ID_SUFFIX
def _register_atari_environments():
"""Registers Atari environments in Gym to be as similar to Xitari as possible.
Main difference from PongNoFrameSkip-v4, etc. is max_episode_steps is unset
and only the usual 57 Atari games are registered.
Additionally, sticky-actions variants of the environments are registered
with an '-sa' suffix.
"""
for sticky_actions in [False, True]:
for game in atari_data.ATARI_GAMES:
repeat_action_probability = 0.25 if sticky_actions else 0.0
gym.envs.registration.register(
id=_game_id(game, sticky_actions),
entry_point='gym.envs.atari:AtariEnv',
kwargs={ # Explicitly set all known arguments.
'game': game,
'mode': None, # Not necessarily the same as 0.
'difficulty': None, # Not necessarily the same as 0.
'obs_type': 'image',
'frameskip': 1, # Get every frame.
'repeat_action_probability': repeat_action_probability,
'full_action_space': False,
},
max_episode_steps=None, # No time limit, handled in run loop.
nondeterministic=False, # Xitari is deterministic.
)
_register_atari_environments()
class GymAtari(dm_env.Environment):
"""Gym Atari with a `dm_env.Environment` interface."""
def __init__(self, game, sticky_actions, seed):
self._gym_env = gym.make(_game_id(game, sticky_actions))
self._gym_env.seed(seed)
self._start_of_episode = True
def reset(self) -> dm_env.TimeStep:
"""Resets the environment and starts a new episode."""
observation = self._gym_env.reset()
lives = np.int32(self._gym_env.ale.lives())
timestep = dm_env.restart((observation, lives))
self._start_of_episode = False
return timestep
def step(self, action: np.int32) -> dm_env.TimeStep:
"""Updates the environment given an action and returns a timestep."""
# If the previous timestep was LAST then we call reset() on the Gym
# environment, otherwise step(). Although Gym environments allow you to step
# through episode boundaries (similar to dm_env) they emit a warning.
if self._start_of_episode:
step_type = dm_env.StepType.FIRST
observation = self._gym_env.reset()
discount = None
reward = None
done = False
else:
observation, reward, done, info = self._gym_env.step(action)
if done:
assert 'TimeLimit.truncated' not in info, 'Should never truncate.'
step_type = dm_env.StepType.LAST
discount = 0.
else:
step_type = dm_env.StepType.MID
discount = 1.
lives = np.int32(self._gym_env.ale.lives())
timestep = dm_env.TimeStep(
step_type=step_type,
observation=(observation, lives),
reward=reward,
discount=discount,
)
self._start_of_episode = done
return timestep
def observation_spec(self) -> Tuple[specs.Array, specs.Array]:
space = self._gym_env.observation_space
return (specs.Array(shape=space.shape, dtype=space.dtype, name='rgb'),
specs.Array(shape=(), dtype=np.int32, name='lives'))
def action_spec(self) -> specs.DiscreteArray:
space = self._gym_env.action_space
return specs.DiscreteArray(
num_values=space.n, dtype=np.int32, name='action')
def close(self):
self._gym_env.close()
class RandomNoopsEnvironmentWrapper(dm_env.Environment):
"""Adds a random number of noop actions at the beginning of each episode."""
def __init__(self,
environment: dm_env.Environment,
max_noop_steps: int,
min_noop_steps: int = 0,
noop_action: int = 0,
seed: Optional[int] = None):
"""Initializes the random noops environment wrapper."""
self._environment = environment
if max_noop_steps < min_noop_steps:
raise ValueError('max_noop_steps must be greater or equal min_noop_steps')
self._min_noop_steps = min_noop_steps
self._max_noop_steps = max_noop_steps
self._noop_action = noop_action
self._rng = np.random.RandomState(seed)
def reset(self):
"""Begins new episode.
This method resets the wrapped environment and applies a random number
of noop actions before returning the last resulting observation
as the first episode timestep. Intermediate timesteps emitted by the inner
environment (including all rewards and discounts) are discarded.
Returns:
First episode timestep corresponding to the timestep after a random number
of noop actions are applied to the inner environment.
Raises:
RuntimeError: if an episode end occurs while the inner environment
is being stepped through with the noop action.
"""
return self._apply_random_noops(initial_timestep=self._environment.reset())
def step(self, action):
"""Steps environment given action.
If beginning a new episode then random noops are applied as in `reset()`.
Args:
action: action to pass to environment conforming to action spec.
Returns:
`Timestep` from the inner environment unless beginning a new episode, in
which case this is the timestep after a random number of noop actions
are applied to the inner environment.
"""
timestep = self._environment.step(action)
if timestep.first():
return self._apply_random_noops(initial_timestep=timestep)
else:
return timestep
def _apply_random_noops(self, initial_timestep):
assert initial_timestep.first()
num_steps = self._rng.randint(self._min_noop_steps,
self._max_noop_steps + 1)
timestep = initial_timestep
for _ in range(num_steps):
timestep = self._environment.step(self._noop_action)
if timestep.last():
raise RuntimeError('Episode ended while applying %s noop actions.' %
num_steps)
# We make sure to return a FIRST timestep, i.e. discard rewards & discounts.
return dm_env.restart(timestep.observation)
## All methods except for reset and step redirect to the underlying env.
def observation_spec(self):
return self._environment.observation_spec()
def action_spec(self):
return self._environment.action_spec()
def reward_spec(self):
return self._environment.reward_spec()
def discount_spec(self):
return self._environment.discount_spec()
def close(self):
return self._environment.close()
| deepmind-research-master | tandem_dqn/gym_atari.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.