python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| brave-main | brave/models/brave/__init__.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnet, for the audio backbone."""
from typing import Any, Optional, Sequence, Type, Union
import haiku as hk
import jax
import jax.numpy as jnp
class BottleneckBlock(hk.Module):
"""Implements a bottleneck residual block (ResNet50 and ResNet101)."""
def __init__(self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
normalize_fn: Optional[Any] = None,
name: Optional[str] = None):
super(BottleneckBlock, self).__init__(name=name)
self._channels = channels
self._stride = stride
self._use_projection = use_projection
if normalize_fn is None:
# Make a quick wrapper function that just outputs None, so we need to do
# less checks if there is a normalizer_fn in the beginning.
normalize_fn = lambda name: None
self._normalize_fn = normalize_fn
self._norm_0 = self._normalize_fn
if self._use_projection:
self._proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
with_bias=False,
padding='SAME',
name='shortcut_conv')
self._conv_0 = hk.Conv2D(
output_channels=channels // 4,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_0')
self._norm_1 = self._normalize_fn
self._conv_1 = hk.Conv2D(
output_channels=channels // 4,
kernel_shape=3,
stride=stride,
with_bias=False,
padding='SAME',
name='conv_1')
# NOTE: Some implementations of ResNet50 v2 suggest initializing gamma/scale
# here to zeros.
self._norm_2 = self._normalize_fn
self._conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_2')
def __call__(self, inputs, is_training):
net = inputs
shortcut = inputs
for i, (conv_i, norm_i) in enumerate(
((self._conv_0, self._norm_0), (self._conv_1, self._norm_1),
(self._conv_2, self._norm_2))):
if norm_i is not None:
net = norm_i(net, is_training=is_training)
net = jax.nn.relu(net)
if i == 0 and self._use_projection:
shortcut = self._proj_conv(net)
# Now do the convs.
net = conv_i(net)
return net + shortcut
class BasicBlock(hk.Module):
"""Implements a basic residual block (ResNet18 and ResNet34)."""
# pylint:disable=g-bare-generic
def __init__(self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
normalize_fn: Optional[Any] = None,
name: Optional[str] = None):
super(BasicBlock, self).__init__(name=name)
self._channels = channels
self._stride = stride
self._use_projection = use_projection
if normalize_fn is None:
# Make a quick wrapper function that just outputs None, so we need to do
# less checks if there is a normalizer_fn in the beginning.
normalize_fn = lambda name: None
self._normalize_fn = normalize_fn
self._norm_0 = self._normalize_fn
if self._use_projection:
self._proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
with_bias=False,
padding='SAME',
name='shortcut_conv')
self._conv_0 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_0')
self._norm_1 = self._normalize_fn
self._conv_1 = hk.Conv2D(
output_channels=channels,
kernel_shape=3,
stride=stride,
with_bias=False,
padding='SAME',
name='conv_1')
def __call__(self, inputs, is_training):
net = inputs
shortcut = inputs
for i, (conv_i, norm_i) in enumerate(
((self._conv_0, self._norm_0), (self._conv_1, self._norm_1))):
if norm_i is not None:
net = norm_i(net, is_training=is_training)
net = jax.nn.relu(net)
if i == 0 and self._use_projection:
shortcut = self._proj_conv(net)
net = conv_i(net)
return net + shortcut
class ResNetUnit(hk.Module):
"""Unit (group of blocks) for ResNet."""
def __init__(self,
channels: int,
num_blocks: int,
stride: Union[int, Sequence[int]],
block_module: Type[BottleneckBlock],
normalize_fn: Optional[Any] = None,
name: Optional[str] = None,
remat: bool = False):
super(ResNetUnit, self).__init__(name=name)
self._channels = channels
self._num_blocks = num_blocks
self._stride = stride
self._normalize_fn = normalize_fn
self._block_module = block_module
self._remat = remat
def __call__(self, inputs, is_training):
input_channels = inputs.shape[-1]
self._blocks = []
for id_block in range(self._num_blocks):
use_projection = id_block == 0 and self._channels != input_channels
self._blocks.append(
self._block_module(
channels=self._channels,
stride=self._stride if id_block == 0 else 1,
use_projection=use_projection,
normalize_fn=self._normalize_fn,
name='block_%d' % id_block))
net = inputs
for block in self._blocks:
if self._remat:
# Note: we can ignore cell-var-from-loop because the lambda is evaluated
# inside every iteration of the loop. This is needed to go around the
# way variables are passed to jax.remat.
net = hk.remat(lambda x: block(x, is_training=is_training))(net) # pylint: disable=cell-var-from-loop
else:
net = block(net, is_training=is_training)
return net
class ResNetV2(hk.Module):
"""ResNetV2 model."""
VALID_ENDPOINTS = (
'resnet_stem',
'resnet_unit_0',
'resnet_unit_1',
'resnet_unit_2',
'resnet_unit_3',
'last_conv',
'virtex_embd',
'Embeddings',
'Classifier',
)
def __init__(self,
depth=50,
num_classes: Optional[int] = 1000,
add_time_dim: bool = False,
width_mult: int = 1,
normalize_fn: Optional[Any] = None,
name: Optional[str] = None,
remat: bool = False):
"""Creates ResNetV2 Haiku module.
Args:
depth: depth of the desired ResNet (18, 34, 50, 101, 152 or 202).
num_classes: (int) Number of outputs in final layer. If None will not add
a classification head and will return the output embedding.
add_time_dim: Add a temporal dimension in layer before last_conv.
width_mult: multiplier for channel width.
normalize_fn: normalization function.
name: Name of the module.
remat: Whether to rematerialize intermediate activations (saves memory).
"""
super(ResNetV2, self).__init__(name=name)
self._normalize_fn = normalize_fn
self._num_classes = num_classes
self._add_time_dim = add_time_dim
self._width_mult = width_mult
self._strides = [1, 2, 2, 2]
num_blocks = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
200: [3, 24, 36, 3],
}
if depth not in num_blocks:
raise ValueError(
f'`depth` should be in {list(num_blocks.keys())} ({depth} given).')
self._num_blocks = num_blocks[depth]
if depth >= 50:
self._block_module = BottleneckBlock
self._channels = [256, 512, 1024, 2048]
else:
self._block_module = BasicBlock
self._channels = [64, 128, 256, 512]
self._initial_conv = hk.Conv2D(
output_channels=64 * self._width_mult,
kernel_shape=7,
stride=2,
with_bias=False,
padding='SAME',
name='initial_conv')
if remat:
self._initial_conv = hk.remat(self._initial_conv)
self._block_groups = []
for i in range(4):
self._block_groups.append(
ResNetUnit(
channels=self._channels[i] * self._width_mult,
num_blocks=self._num_blocks[i],
block_module=self._block_module,
stride=self._strides[i],
normalize_fn=self._normalize_fn,
name='block_group_%d' % i,
remat=remat))
if num_classes is not None:
self._logits = hk.Linear(
output_size=num_classes, w_init=jnp.zeros, name='logits')
def __call__(self, inputs, is_training, final_endpoint='Embeddings'):
self._final_endpoint = final_endpoint
is_fpn = final_endpoint.startswith('bifpn')
if final_endpoint not in self.VALID_ENDPOINTS and not is_fpn:
raise ValueError(f'Unknown final endpoint {final_endpoint}')
end_point = 'resnet_stem'
net = self._initial_conv(inputs)
net = hk.max_pool(
net, window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME')
if self._final_endpoint == end_point:
if self._add_time_dim:
net = jnp.expand_dims(net, axis=1)
return net
outputs = []
for block_id, block_group in enumerate(self._block_groups):
end_point = f'resnet_unit_{block_id}'
net = block_group(net, is_training=is_training)
if self._final_endpoint == end_point:
if self._add_time_dim:
net = jnp.expand_dims(net, axis=1)
return net
outputs.append(net)
if self._normalize_fn is not None:
net = self._normalize_fn(net, is_training=is_training)
net = jax.nn.relu(net)
end_point = 'last_conv'
if self._final_endpoint == end_point:
if self._add_time_dim:
net = jnp.expand_dims(net, axis=1)
return net
# The actual representation
net = jnp.mean(net, axis=[1, 2])
end_point = 'Embeddings'
if self._final_endpoint == end_point:
return net
if self._num_classes is None:
raise ValueError
assert end_point == 'Classifier'
return self._logits(net)
| brave-main | brave/models/brave/resnet.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implement trainable Haiku modules for Brave."""
from typing import Optional
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from brave.datasets import datasets
from brave.models.brave import resnet
from brave.models.brave import tsm_resnet
DEFAULT_EMBEDDING_DIMS = 2048
DEFAULT_AUDIO_BACKBONE_DEPTH = 50
class ProjectAndPredict(hk.Module):
def __init__(self, output_dims: int, name: Optional[str] = None):
super(ProjectAndPredict, self).__init__(name=name)
self._output_dims = output_dims
def __call__(self, feats: chex.Array, is_training: bool) -> chex.Array:
z = Projector(self._output_dims)(feats, is_training)
h = Predictor(self._output_dims, name='predictor')(z, is_training)
return z, h
class AudioEmbedding(hk.Module):
"""Compute an audio embedding for spectrogram audio."""
def __init__(self, name: Optional[str] = None):
super(AudioEmbedding, self).__init__(name=name)
def __call__(self, view: datasets.View, is_training: bool) -> chex.Array:
assert view.audio is not None
net = resnet.ResNetV2(
depth=DEFAULT_AUDIO_BACKBONE_DEPTH,
normalize_fn=_default_normalize_fn,
num_classes=None)
audio = jnp.expand_dims(view.audio, axis=-1)
result = net(audio, is_training=is_training)
chex.assert_shape(result, (None, DEFAULT_EMBEDDING_DIMS))
return result
class VideoEmbedding(hk.Module):
"""Given a view, compute an embedding."""
def __init__(self, width_multiplier: int, name: Optional[str] = None):
super(VideoEmbedding, self).__init__(name=name)
self.width_multiplier = width_multiplier
def __call__(self, view: datasets.View, is_training: bool) -> chex.Array:
assert view.video is not None
chex.assert_shape(view.video, (None, None, None, None, 3)) # B, T, H, W, C
net = tsm_resnet.TSMResNetV2(
normalize_fn=_default_normalize_fn, width_mult=self.width_multiplier)
feats = net(view.video, is_training=is_training)
expected_output_dims = self.width_multiplier * DEFAULT_EMBEDDING_DIMS
chex.assert_shape(feats, (None, expected_output_dims))
return feats
class Projector(hk.Module):
"""Project backbone features into representation space."""
def __init__(self, output_dims: int, name: Optional[str] = None):
super(Projector, self).__init__(name=name)
self.output_dims = output_dims
def __call__(self, x: chex.Array, is_training: bool) -> chex.Array:
x = hk.Linear(4096)(x)
x = _default_normalize_fn(x, is_training)
x = jax.nn.relu(x)
x = hk.Linear(self.output_dims)(x)
x = _default_normalize_fn(x, is_training)
return x
class Predictor(hk.Module):
"""Take projected vector and predict the projected space of another view."""
def __init__(self, output_dims: int, name: Optional[str] = None):
super(Predictor, self).__init__(name=name)
self.output_dims = output_dims
def __call__(self, x: chex.Array, is_training: bool) -> chex.Array:
"""Project a projected z to predict another projected z."""
x = hk.Linear(4096)(x)
x = _default_normalize_fn(x, is_training)
x = jax.nn.relu(x)
return hk.Linear(self.output_dims)(x)
def _default_normalize_fn(x: chex.Array, is_training: bool):
return hk.BatchNorm(
create_scale=True,
create_offset=True,
decay_rate=0.9,
cross_replica_axis='i',
)(x, is_training)
| brave-main | brave/models/brave/modules.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implement the standard evaluation procedure for video embeddings."""
from typing import Sequence
import chex
import tensorflow as tf
import tensorflow_datasets as tfds
from brave.datasets import augmentations
from brave.datasets import datasets
from brave.datasets import media_sequences
from brave.evaluate import eval_datasets
from brave.evaluate import evaluate
DEFAULT_EVAL_BATCH_SIZE = 1
DEFAULT_EVAL_NUM_TRAIN_EPOCHS = 10
DEFAULT_TRAIN_MIN_CROP_WINDOW_AREA = 0.3
DEFAULT_TRAIN_MAX_CROP_WINDOW_AREA = 1.0
DEFAULT_TRAIN_MIN_CROP_WINDOW_ASPECT_RATIO = 0.5
DEFAULT_TRAIN_MAX_CROP_WINDOW_ASPECT_RATIO = 2.0
DEFAULT_TEST_INITIAL_RESIZE = 256
DEFAULT_TEST_NUM_TEMPORAL_CROPS = 10
DEFAULT_TEST_NUM_SPATIAL_CROPS = 3
@chex.dataclass
class VideoConfig:
num_frames: int
image_size: int
video_step: int
def evaluate_video_embedding(
train_dataset_shards: Sequence[str],
test_dataset_shards: Sequence[str],
embedding_fn: evaluate.EmbeddingFn,
config: VideoConfig,
svm_regularization: float,
batch_size: int = DEFAULT_EVAL_BATCH_SIZE,
shard_reader: media_sequences.ShardReaderFn = media_sequences
.tf_record_shard_reader,
) -> evaluate.EvaluationResults:
"""Standardized evaluation for embeddings."""
train_ds = eval_datasets.random_sampling_dataset(
train_dataset_shards,
image_size=config.image_size,
num_video_frames=config.num_frames,
video_step=config.video_step,
min_crop_window_area=DEFAULT_TRAIN_MIN_CROP_WINDOW_AREA,
max_crop_window_area=DEFAULT_TRAIN_MAX_CROP_WINDOW_AREA,
min_crop_window_aspect_ratio=DEFAULT_TRAIN_MIN_CROP_WINDOW_ASPECT_RATIO,
max_crop_window_aspect_ratio=DEFAULT_TRAIN_MAX_CROP_WINDOW_ASPECT_RATIO,
shuffle=True,
shard_reader=shard_reader)
train_ds = train_ds.map(_transform_train, num_parallel_calls=tf.data.AUTOTUNE)
train_ds = train_ds.repeat(DEFAULT_EVAL_NUM_TRAIN_EPOCHS)
train_ds = train_ds.batch(batch_size)
train_ds = tfds.as_numpy(train_ds)
test_ds = eval_datasets.multiple_crop_dataset(
test_dataset_shards,
num_temporal_crops=DEFAULT_TEST_NUM_TEMPORAL_CROPS,
num_spatial_crops=DEFAULT_TEST_NUM_SPATIAL_CROPS,
num_video_frames=config.num_frames,
video_step=config.video_step,
initial_resize=DEFAULT_TEST_INITIAL_RESIZE,
center_crop_size=config.image_size,
shuffle=False,
shard_reader=shard_reader)
test_ds = test_ds.map(_transform_test, num_parallel_calls=tf.data.AUTOTUNE)
test_ds = test_ds.batch(batch_size)
test_ds = tfds.as_numpy(test_ds)
group_size = DEFAULT_TEST_NUM_TEMPORAL_CROPS * DEFAULT_TEST_NUM_SPATIAL_CROPS
return evaluate.linear_svm_classifier(
train_ds,
test_ds,
embedding_fn,
test_predictions_group_size=group_size,
svm_regularization=svm_regularization)
def _transform_train(batch: datasets.MiniBatch) -> datasets.MiniBatch:
"""Transform the train set."""
def augment(view):
view = augmentations.normalize_video(view)
view = augmentations.random_horizontal_flip_video(view)
view = augmentations.random_color_augment_video(
view, prob_color_augment=0.8, prob_color_drop=0.2)
return view
return datasets.MiniBatch(views={
view_name: augment(view) for view_name, view in batch.views.items()
})
def _transform_test(batch: datasets.MiniBatch) -> datasets.MiniBatch:
"""Transform the test set."""
return datasets.MiniBatch(
views={
view_name: augmentations.normalize_video(view)
for view_name, view in batch.views.items()
})
| brave-main | brave/evaluate/evaluate_video_embedding.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for evaluate."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from brave.datasets import datasets
from brave.evaluate import evaluate
class EvaluateTest(parameterized.TestCase):
def test_evaluate_with_linear_svm_classifier(self):
train_dataset = _fake_dataset()
def embedding_fn(view):
batch_size = view.video.shape[0]
return np.zeros([batch_size, 10])
test_dataset = train_dataset
result = evaluate.linear_svm_classifier(train_dataset, test_dataset,
embedding_fn)
self.assertAlmostEqual(result.test.top_one_accuracy, 0.33333333333)
self.assertAlmostEqual(result.test.top_five_accuracy, 1.0)
def test_compute_accuracy_metrics(self):
labels = np.array([[0], [2], [1], [0]])
predictions = np.array([
[1.00, 0.00, 0.00, 0.00, 0.00, 0.00], # top-1 correct
[0.10, 0.10, 0.80, 0.00, 0.00, 0.00], # top-1 correct
[0.01, 0.02, 0.03, 0.04, 0.05, 0.85], # top-5 correct
[0.01, 0.02, 0.03, 0.04, 0.05, 0.85], # top-6 correct
])
results = evaluate._compute_accuracy_metrics(labels, predictions)
# Two our of four are correct
self.assertEqual(results.top_one_accuracy, 2.0 / 4.0)
self.assertEqual(results.top_five_accuracy, 3.0 / 4.0)
def test_average_test_predictions_by_group(self):
predictions = np.array([
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0], # clip 1 example 1
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0], # clip 2 example 1
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0], # clip 1 example 2
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0], # clip 2 example 2
])
labels = np.array([[0], [0], [1], [1]])
avg_predictions, new_labels = evaluate._average_test_predictions_by_group(
2, predictions, labels)
expected_predictions = np.array([
[0.5, 0.5, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
])
expected_labels = np.array([[0], [1]])
np.testing.assert_almost_equal(expected_predictions, avg_predictions)
np.testing.assert_almost_equal(expected_labels, new_labels)
with self.assertRaises(ValueError):
# 4 is not divisible by 3.
evaluate._average_test_predictions_by_group(3, predictions, labels)
def _fake_dataset():
return [
datasets.MiniBatch(
views={
'default':
datasets.View(
video=np.zeros([3, 2, 4, 4, 3]),
audio=None,
labels=np.array([[0], [1], [2]]),
)
},)
]
if __name__ == '__main__':
absltest.main()
| brave-main | brave/evaluate/evaluate_test.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| brave-main | brave/evaluate/__init__.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Package providing functions for evaluation."""
from typing import Callable, Iterable, NamedTuple, Tuple
from absl import logging
import chex
import numpy as np
import sklearn
from sklearn import preprocessing
import sklearn.svm
from brave.datasets import datasets
DEFAULT_BATCH_SIZE = 1
DEFAULT_LOG_INTERVAL = 100
DEFAULT_REGULARIZATION_PARAMETER = 1.0
EmbeddingFn = Callable[[datasets.View], chex.Array]
@chex.dataclass
class ClassificationResults:
"""Classification evaluation results.
Attributes:
top_one_accuracy: How often most confident predition is correct in the range
[0.0, 1.0].
top_five_accuracy: How often the correct result is in the top five
predictions in the range [0.0, 1.0].
"""
top_one_accuracy: float
top_five_accuracy: float
class EvaluationResults(NamedTuple):
test: ClassificationResults
train: ClassificationResults
def linear_svm_classifier(
train_dataset: Iterable[datasets.MiniBatch],
test_dataset: Iterable[datasets.MiniBatch],
embedding_fn: EmbeddingFn,
test_predictions_group_size: int = 1,
svm_regularization: float = DEFAULT_REGULARIZATION_PARAMETER,
) -> EvaluationResults:
"""Evaluate the given embedding function for a classification dataset.
Args:
train_dataset: The dataset to fit the linear model (must contain a view
called 'default').
test_dataset: The dataset to test the linear model accuracy with (must
contain a view called 'default').
embedding_fn: The embedding maps the 'default' view in the dataset batches
to an embedding, which is used as the space for the classifier.
test_predictions_group_size: In some evaluation regimes, we break up the
test data into many clips. For example, taking 10 temporal crops and 3
spatial crops will result in each sample in the test set being split
into 30 separate examples. When evaluating, we then wish to regroup
these together and then average the prediction in order to compute the
correct metrics. This requires that the dataset be in the correct order,
so that adjacent samples in the test dataset belong to the same group.
As a basic check we ensure that the labels for each of the test samples
in a group have the same label.
svm_regularization: The regularization constant to use in the SVM. Please
see the accompanying paper for more information on selecting this value
correctly.
Returns:
The accuracy achieved by the model / embedding combination
"""
logging.info('Computing train embeddings.')
train_embeddings, train_labels = _compute_embeddings(train_dataset,
embedding_fn)
logging.info('Computed %d train embeddings.', train_embeddings.shape[0])
logging.info('Computing test embeddings.')
test_embeddings, test_labels = _compute_embeddings(test_dataset, embedding_fn)
logging.info('Computed %d test embeddings.', test_embeddings.shape[0])
logging.info('Learning a rescaler.')
scaler = preprocessing.StandardScaler().fit(train_embeddings)
logging.info('Rescaling features.')
train_embeddings = scaler.transform(train_embeddings)
test_embeddings = scaler.transform(test_embeddings)
logging.info('Fitting an SVM with regularization %f.', svm_regularization)
classifier = sklearn.svm.LinearSVC(C=svm_regularization)
classifier.fit(train_embeddings, train_labels)
logging.info('Computing predictions.')
train_predictions = classifier.decision_function(train_embeddings)
test_predictions = classifier.decision_function(test_embeddings)
logging.info('Average over groups of size: %d.', test_predictions_group_size)
test_predictions, test_labels = _average_test_predictions_by_group(
test_predictions_group_size, test_predictions, test_labels)
logging.info('Computing metrics.')
return EvaluationResults(
test=_compute_accuracy_metrics(test_labels, test_predictions),
train=_compute_accuracy_metrics(train_labels, train_predictions),
)
def _compute_embeddings(
dataset: Iterable[datasets.MiniBatch],
embedding_fn: EmbeddingFn) -> Tuple[chex.Array, chex.Array]:
"""Compute embeddings and labels for the given embedding function."""
embeddings, labels = [], []
for i, batch in enumerate(dataset):
if i % DEFAULT_LOG_INTERVAL == 0:
logging.info('Completed %d embedding batches.', i)
if 'default' not in batch.views:
raise ValueError(
f'View named `default` not found, but is required. Got {batch.views.keys()}.'
)
view = batch.views['default']
if view.labels is None:
raise ValueError('Labels must be present for evaluation runs.')
embeddings.append(embedding_fn(view))
labels.append(view.labels)
return np.concatenate(embeddings, axis=0), np.concatenate(labels, axis=0)
def _compute_accuracy_metrics(labels: chex.Array,
predictions: chex.Array) -> ClassificationResults:
"""Compute accuracy metrics."""
sorted_predictions = np.argsort(predictions, axis=1)
assert len(labels.shape) == len(sorted_predictions.shape) == 2
top1_predictions = sorted_predictions[:, -1:]
top5_predictions = sorted_predictions[:, -5:]
return ClassificationResults(
top_one_accuracy=np.mean(top1_predictions == labels),
top_five_accuracy=np.mean(np.max(top5_predictions == labels, 1)),
)
def _average_test_predictions_by_group(
group_size: int, predictions: chex.Array,
labels: chex.Array) -> Tuple[chex.Array, chex.Array]:
"""Average contiguous predictions together."""
if predictions.shape[0] % group_size != 0:
raise ValueError('Predictions must be divisible by group size.')
predictions = predictions.reshape((-1, group_size) +
tuple(predictions.shape[1:]))
labels = labels.reshape((-1, group_size) + tuple(labels.shape[1:]))
averaged_predictions = predictions.mean(axis=1)
# The labels in each group should be identical, an easy way to check this
# is that the min and max are identical.
labels_min = labels.min(axis=1)
labels_max = labels.max(axis=1)
np.testing.assert_equal(labels_min, labels_max)
return averaged_predictions, labels_min
| brave-main | brave/evaluate/evaluate.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implement datasets specifically for use in evaluation."""
from typing import Dict, Sequence
import tensorflow as tf
from brave.datasets import datasets as brave_datasets
from brave.datasets import media_sequences
from brave.datasets import sampling
from brave.datasets import time_sampling
from brave.datasets import video_sampling
def random_sampling_dataset(
shards: Sequence[str],
*,
num_video_frames: int,
video_step: int,
image_size: int,
min_crop_window_area: float,
max_crop_window_area: float,
min_crop_window_aspect_ratio: float,
max_crop_window_aspect_ratio: float,
shuffle: bool = False,
shard_reader: media_sequences.ShardReaderFn = media_sequences
.tf_record_shard_reader
) -> tf.data.Dataset:
"""A dataset that uses random cropping.
For each clip in the underlying data, we return a single cropped sample, which
has been sampled at random. We uniformly sample a start location of the video,
and uniformly sample a crop window of shape `crop_size` X `crop_size`.
Args:
shards: The paths containing the dataset shards to read.
num_video_frames: The required number of frames in the resulting videos.
video_step: The gap between the frames sampled into the output video.
image_size: Returned videos will have resolution `image_size` X
`image_size`.
min_crop_window_area: The minimum area of the source clip that the random
crop must occupy.
max_crop_window_area: The maximum area of the source clip that the random
crop may occupy.
min_crop_window_aspect_ratio: The minimum aspect ratio that the sampled crop
window is allowed to have (before resizing).
max_crop_window_aspect_ratio: The maximum aspect ratio that the sampled crop
window is allowed to have (before resizing).
shuffle: Whther or not to shuffle the resulting dataset.
shard_reader: The reader taking shard paths and returning a dataset over
encoded records.
Returns:
A tensorflow dataset with no batch dimensions, containing a single view
name 'default' containing the results of the random sampling.
"""
def sampler(sequence):
return _random_sampler(
sequence, num_video_frames=num_video_frames, video_step=video_step)
def decoder(sequences):
return _random_cropping_decoder(
sequences,
image_size=image_size,
min_crop_window_area=min_crop_window_area,
max_crop_window_area=max_crop_window_area,
min_crop_window_aspect_ratio=min_crop_window_aspect_ratio,
max_crop_window_aspect_ratio=max_crop_window_aspect_ratio,
)
return brave_datasets.multi_view_dataset(
shards,
features=[
media_sequences.FeatureKind.VIDEO, media_sequences.FeatureKind.LABELS
],
view_sampler=sampler,
view_decoder=decoder,
shuffle=shuffle,
shard_reader=shard_reader)
def multiple_crop_dataset(
shards: Sequence[str],
*,
num_temporal_crops: int,
num_spatial_crops: int,
num_video_frames: int,
video_step: int,
initial_resize: int,
center_crop_size: int,
shuffle: bool,
shard_reader: media_sequences.ShardReaderFn = media_sequences
.tf_record_shard_reader
) -> tf.data.Dataset:
"""A dataset giving many deterministic crops batched together for each clip.
Args:
shards: The sharded paths for to the source data to load.
num_temporal_crops: The number of temporal crops to perform. If this is one,
then the crop starts at the beginning of the sequence. If it is two, the
first crop starts at the beginning, and the second is as close to the end
as possible. For values greater than two, the crops are evenly spaced
between those two endpoints.
num_spatial_crops: Crop the video horizontally in this number of crops. The
sampling logic is the same as the temporal sampling.
num_video_frames: The number of video frames in each returned clip.
video_step: The gap between video frames as sampled from the source data.
initial_resize: When reading raw data, the videos are first resized so that
their shortest edge matches this value.
center_crop_size: After the initial resize, crops of this size are sampled
from the center of the video (note that when there are multiple spatial
crops, these are sampled according to the logic given above.
shuffle: Whether or not to shuffle the resulting data.
shard_reader: The reader taking shards and returning a tf.data.Dataset over
serialized records.
Returns:
A dataset where, for each clip in the source data, `num_spatial_crops` X
`num_temporal_crops` individual samples are returned, each containing
exaclty one clip.
"""
def sampler(sequence):
return {'default': sequence}
def decoder(sequences):
return _multi_crop_decoder(
sequences,
num_temporal_crops=num_temporal_crops,
num_spatial_crops=num_spatial_crops,
num_video_frames=num_video_frames,
video_step=video_step,
initial_resize=initial_resize,
center_crop_size=center_crop_size)
ds = brave_datasets.multi_view_dataset(
shards,
features=[
media_sequences.FeatureKind.VIDEO, media_sequences.FeatureKind.LABELS
],
view_sampler=sampler,
view_decoder=decoder,
shuffle=shuffle,
shard_reader=shard_reader)
# The decoder above adds a batch dimension for each of the multiple crops.
# For consistency with the other datasets, we now remove it.
ds = ds.unbatch()
return ds
def _random_sampler(
sequence: media_sequences.EncodedSequence, num_video_frames: int,
video_step: int) -> Dict[str, media_sequences.EncodedSequence]:
"""Random sample the given number of frames.
Args:
sequence: The sequence to sample from.
num_video_frames: The number of frames to sample.
video_step: The gap between frames as sampled from the sequence.
Returns:
A single sequence encoded as 'default'.
"""
min_frames_required = (num_video_frames - 1) * video_step + 1
sequence = media_sequences.extend_sequence(sequence, min_frames_required)
result = time_sampling.random_sample_sequence_using_video(
num_video_frames=num_video_frames,
video_frame_step=video_step,
sequence=sequence)
return {'default': result.sequence}
def _random_cropping_decoder(
sequences: Dict[str, media_sequences.EncodedSequence],
*,
image_size: int,
min_crop_window_area: float,
max_crop_window_area: float,
min_crop_window_aspect_ratio: float,
max_crop_window_aspect_ratio: float,
) -> Dict[str, brave_datasets.View]:
"""Randomly crop from an underlying sequence."""
result = {}
for view_name, sequence in sequences.items():
image_shape = tf.image.extract_jpeg_shape(sequence.jpeg_encoded_images[0])
crop_window = video_sampling.random_sample_crop_window(
image_shape,
min_area=min_crop_window_area,
max_area=max_crop_window_area,
min_aspect_ratio=min_crop_window_aspect_ratio,
max_aspect_ratio=max_crop_window_aspect_ratio)
video = video_sampling.decode_crop_resize_images(
sequence.jpeg_encoded_images,
crop_window,
image_size=(image_size, image_size))
result[view_name] = brave_datasets.View(
video=video, labels=sequence.labels, audio=None)
return result
def _multi_crop_decoder(
sequences: Dict[str, media_sequences.EncodedSequence],
num_temporal_crops: int, num_spatial_crops: int, num_video_frames: int,
video_step: int, initial_resize: int,
center_crop_size: int) -> Dict[str, brave_datasets.View]:
"""Sample a sequence multiple times, spatially and temporally."""
result = {}
for view_name, sequence in sequences.items():
result[view_name] = _multi_crop_view_decoder(sequence, num_temporal_crops,
num_spatial_crops,
num_video_frames, video_step,
initial_resize,
center_crop_size)
return result
def _multi_crop_view_decoder(sequence, num_temporal_crops, num_spatial_crops,
num_video_frames, video_step, initial_resize,
center_crop_size) -> brave_datasets.View:
"""Extract multiple temporal and spatial crops from a sequence.
Args:
sequence: The sequence to sample from.
num_temporal_crops: The number of temporal crops to take.
num_spatial_crops: The number of spatial crops to take. These crops are
currently always taken horizontally.
num_video_frames: The number of video frames each resulting sample will
contain.
video_step: The step between the video frames in the resulting sequence.
initial_resize: When decoding the video, the frames will first be resized so
that their shortest edge has this size.
center_crop_size: When decoding the videos, the videos are first resized by
`initial_resize`. We then split the video horizontally into
`num_spatial_crops` crops, each crop having width `center_crop_size`. The
height for each of the crops is always smpapled from the center of the
(resized) video to a matching size of `center_crop_size`.
Returns:
A view containing videos of shape (N, T, H, W, 3), where N =
`num_spatial_crops` X `num_temporal_crops`.
T is `num_video_frames`, H = W = `center_crop_size`.
"""
min_frames_required = (num_video_frames - 1) * video_step + 1
sequence = media_sequences.extend_sequence(sequence, min_frames_required)
sequences = _extract_temporal_crops(sequence, num_temporal_crops,
num_video_frames, video_step)
videos = []
for subsequence in sequences:
video = tf.map_fn(
tf.io.decode_jpeg,
subsequence.jpeg_encoded_images,
fn_output_signature=tf.uint8)
video = video_sampling.resize_min(video, initial_resize)
video_height = tf.shape(video)[-3]
video_width = tf.shape(video)[-2]
horizontal_indices = sampling.compute_linearly_spaced_sample_indices(
video_width, num_spatial_crops, center_crop_size, step=1)
vertical_indices = sampling.compute_linearly_spaced_sample_indices(
video_height, num_spatial_crops, center_crop_size, step=1)
for vidx, hidx in zip(vertical_indices, horizontal_indices):
v_start = vidx.start_index
v_end = vidx.start_index + center_crop_size
h_start = hidx.start_index
h_end = hidx.start_index + center_crop_size
video_sample = video[:, v_start:v_end, h_start:h_end, :]
video_sample = tf.cast(video_sample, tf.float32)
videos.append(video_sample)
return brave_datasets.View(
video=tf.stack(videos, axis=0),
audio=None,
labels=tf.tile(sequence.labels[tf.newaxis], (len(videos), 1)))
def _extract_temporal_crops(
sequence, num_temporal_crops, num_video_frames,
video_frame_step) -> Sequence[media_sequences.EncodedSequence]:
sequence_length = tf.shape(sequence.jpeg_encoded_images)[0]
temporal_crop_indices = sampling.compute_linearly_spaced_sample_indices(
sequence_length, num_temporal_crops, num_video_frames, video_frame_step)
return [
time_sampling.get_subsequence_by_video_indices(sequence, indices)
for indices in temporal_crop_indices
]
| brave-main | brave/evaluate/eval_datasets.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for evaluate video embedding."""
import tempfile
from absl.testing import absltest
from absl.testing import parameterized
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from brave.datasets import datasets
from brave.datasets import fixtures
from brave.evaluate import evaluate_video_embedding
class EvaluateVideoEmbeddingTest(parameterized.TestCase):
def test_evaluate_embedding(self):
with tempfile.TemporaryDirectory() as fixture_dir:
shards = fixtures.write_tf_record_dataset_fixture(fixture_dir)
rng = jax.random.PRNGKey(0)
def fake_embedding(view, is_training):
del is_training
video = view.video
chex.assert_rank(video, 5) # B, T, H, W, C
flat_video = jnp.reshape(video, (video.shape[0], -1))
feats = flat_video[..., :16]
chex.assert_shape(feats, (None, 16))
return hk.Linear(2048)(feats)
fake_embedding_fn = hk.transform(fake_embedding)
view = datasets.View(
labels=None,
audio=None,
video=np.zeros((1, 2, 8, 8, 3)),
)
params = fake_embedding_fn.init(rng, view, True)
def embedding_fn(view):
return fake_embedding_fn.apply(params, rng, view, False)
train_shards = shards
test_shards = shards
config = evaluate_video_embedding.VideoConfig(
num_frames=2,
image_size=8,
video_step=1,
)
results = evaluate_video_embedding.evaluate_video_embedding(
train_shards,
test_shards,
embedding_fn,
config,
svm_regularization=1.0)
self.assertLessEqual(results.test.top_one_accuracy, 1.0)
self.assertGreaterEqual(results.test.top_one_accuracy, 0.0)
if __name__ == '__main__':
absltest.main()
| brave-main | brave/evaluate/evaluate_video_embedding_test.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for eval datasets."""
import tempfile
from absl.testing import absltest
from brave.datasets import fixtures
from brave.evaluate import eval_datasets
class DatasetsTest(absltest.TestCase):
def test_multiview_sampling_dataset(self):
with tempfile.TemporaryDirectory() as fixture_dir:
shards = fixtures.write_tf_record_dataset_fixture(fixture_dir)
ds = eval_datasets.multiple_crop_dataset(
shards,
num_temporal_crops=3,
num_spatial_crops=2,
num_video_frames=4,
video_step=2,
initial_resize=224,
center_crop_size=128,
shuffle=False)
seen_batches = 0
for batch in ds:
self.assertEqual(batch.views['default'].video.shape, (4, 128, 128, 3))
self.assertEqual(batch.views['default'].labels.shape, (1,))
seen_batches += 1
self.assertEqual(seen_batches, 18)
def test_random_sampling_dataset(self):
with tempfile.TemporaryDirectory() as fixture_dir:
shards = fixtures.write_tf_record_dataset_fixture(fixture_dir)
ds = eval_datasets.random_sampling_dataset(
shards,
num_video_frames=3,
video_step=2,
image_size=128,
min_crop_window_area=0.4,
max_crop_window_area=0.6,
min_crop_window_aspect_ratio=0.3,
max_crop_window_aspect_ratio=0.6)
for batch in ds:
self.assertEqual(batch.views['default'].video.shape, (3, 128, 128, 3))
if __name__ == '__main__':
absltest.main()
| brave-main | brave/evaluate/eval_datasets_test.py |
""" This script generates docs in markdown format for randomkit.* """
import numpy
import re
def getDocStrings(funcNames, exceptions):
for funcName in funcNames:
func = getattr(numpy.random, funcName, None)
if not func:
print("Could not find numpy docstring for %s" % (funcName,))
continue
if funcName[0] == '_':
continue
if funcName in exceptions:
continue
docLines = func.__doc__.strip().split("\n")
funcSig = re.sub("=[^,)]+", "", docLines[0])
funcSig = re.sub(",?\s*size", "", funcSig)
funcSig = re.sub("\(", "([output], ", funcSig)
funcSig = "randomkit." + funcSig
doc = "\n".join(x.strip() for x in docLines[1:])
doc = re.sub(">>>", " $", doc)
doc = re.sub("\.\.\. ", " ", doc)
doc = re.sub("#", "", doc)
doc = re.sub("\]_", "]", doc)
doc = re.sub('.+ : .+',"* \g<0>", doc)
doc = re.sub('\.\. \[(\d+?)\]', "\g<1>.", doc)
doc = re.sub('(.+?)\n-+', "####\g<1>", doc)
doc = re.sub('`(.+)\n<(.+)>`_', "1. \g<1>, \g<2>", doc)
doc = re.sub(':math:`(.+?)`', "\\\\\(\g<1>\\\\\)", doc)
doc = re.sub('\.\. math::(.+?)\n\n', "$$\g<1>$$\n\n", doc, flags=re.S)
doc = re.sub(' \$ (.+)\n([^ ])', ' $ \g<1>\n\n\g<2>', doc)
doc = re.sub('^([^ \n].+?)\n \$', '\g<1>\n\n $', doc, flags=re.M)
doc += "\n"
yield funcName, funcSig, doc
def writeMDdoc(funcNames, funcInfo, introFile, docFile):
with open(introFile, 'r') as f:
introduction = f.read()
with open(docFile, 'w') as f:
f.write(introduction+"\n")
f.write("#List of distributions\n")
for name, sig, doc in funcInfo:
f.write("##"+name+"\n")
f.write(sig+"\n")
f.write(doc+"\n")
print("Generated doc for " + name)
if __name__ == "__main__":
introFile = "doc/intro.md"
docFile = "README.md"
funcNames = dir(numpy.random)
excluded = ['RandomState', 'seed', 'set_state', 'get_state', 'choice', 'rand',
'randn', 'Tester', 'operator','warnings', 'info','test','bench', 'permutation',
'np', 'absolute_import', 'division', 'mtrand', 'print_function',
'random_integers', 'ranf', 'sample', 'shuffle']
funcInfo = list(getDocStrings(funcNames, excluded))
writeMDdoc(funcNames, funcInfo, introFile, docFile) | torch-randomkit-master | luasrc/generateMdDocs.py |
""" This script tries to generate docs and call tests for randomkit.* """
import scipy
import re
docFile = "doc/randomkit.html"
testFile = "tests/testCalls.lua"
exclude = ['ffi', '_check1DParams']
randomkitFuncsPath = '/Users/daniel.horgan/randomkit_funcs'
def funcTest(name, sig, doc):
match = re.match(r"(.*)\((.*)\)", sig)
func = match.group(1)
args = [x for x in match.group(2).split(",") if x.strip()]
numArgs = len(args)
yield """function myTests.test_%s()""" % (name,)
# Call with scalar args, and no result tensor
testArgs = ["0.5"] * (numArgs - 1)
if name == 'zipf':
testArgs = ["1.5"] * (numArgs - 1)
yield """ tester:assert(%s(%s))""" % (func, ", ".join(testArgs))
# Call with scalar args and a result tensor
testArgs = ["torch.Tensor(10)"] + testArgs
yield """ tester:assert(%s(%s))""" % (func, ", ".join(testArgs))
# Call with 1D tensor args and no result tensor
testArgs = ["torch.Tensor(10):fill(0.5)"] * (numArgs - 1)
if name == 'zipf':
testArgs = ["torch.Tensor(10):fill(1.5)"] * (numArgs - 1)
yield """ tester:assert(%s(%s))""" % (func, ", ".join(testArgs))
# Call with 1D tensor args and a 1D result tensor
testArgs = ["torch.Tensor(10)"] + testArgs
yield """ tester:assert(%s(%s))""" % (func, ", ".join(testArgs))
# Call with 2D tensor args and no result tensor
testArgs = ["torch.Tensor(3, 4):fill(0.5)"] * (numArgs - 1)
if name == 'zipf':
testArgs = ["torch.Tensor(3, 4):fill(1.5)"] * (numArgs - 1)
yield """ tester:assert(%s(%s))""" % (func, ", ".join(testArgs))
# Call with 2D tensor args and a 2D result tensor
testArgs = ["torch.Tensor(2, 6)"] + testArgs
yield """ tester:assert(%s(%s))""" % (func, ", ".join(testArgs))
# Call with one arg number and the rest 2D tensors, and no result tensor
# Call with 2D tensor args and no result tensor
testArgs = ["torch.Tensor(3, 4):fill(0.5)"] * (numArgs - 1)
if len(testArgs) > 1:
testArgs[0] = "0.5"
if name == 'zipf':
testArgs = ["torch.Tensor(3, 4):fill(1.5)"] * (numArgs - 1)
testArgs[0] = "1.5"
yield """ tester:assert(%s(%s))""" % (func, ", ".join(testArgs))
# Call with one arg number and the rest tensors, and a 2D result tensor
testArgs = ["torch.Tensor(2, 6)"] + testArgs
yield """ tester:assert(%s(%s))""" % (func, ", ".join(testArgs))
# Call with one too many params - should break
testArgs = ["0.5"] * numArgs
yield """ tester:assertError(function() %s(%s) end)""" % (func, ", ".join(testArgs))
yield """end"""
def funcDoc(name, sig, doc):
yield "<hr /><a id='%s'>" % (name,)
yield "<h2>%s</h2>" % (sig,)
yield "<pre>"
yield doc
yield "</pre>"
def genIndex(funcNames):
index = "<h1>torch-randomkit</h1><ul>"
for funcName in funcNames:
index += "<li><a href='#%s'>%s</a></li>" % (funcName, funcName)
index += "</ul>"
return index
def funcNames():
with open(randomkitFuncsPath, 'r') as f:
for l in f.readlines():
yield l.strip()
def getDocStrings(funcNames):
for funcName in funcNames:
func = getattr(scipy.random, funcName, None)
if not func:
print("Could not find scipy docstring for %s" % (funcName,))
continue
docLines = func.__doc__.strip().split("\n")
funcSig = re.sub("=[^,)]+", "", docLines[0])
funcSig = re.sub(",?\s*size", "", funcSig)
funcSig = re.sub("\(", "([output], ", funcSig)
funcSig = "randomkit." + funcSig
doc = "\n".join(x.strip() for x in docLines[1:])
yield funcName, funcSig, doc
def writeHTMLdoc(funcNames, funcInfo):
with open(docFile, 'w') as f:
f.write("<html>")
index = genIndex(funcNames)
f.write(index)
for name, sig, doc in funcInfo:
for line in funcDoc(name, sig, doc):
f.write(line)
print("Generated doc for " + name)
f.write("</html>")
def writeCallTests(funcNames, funcInfo):
with open(testFile, 'w') as f:
f.write("""
require 'randomkit'
local myTests = {}
local tester = torch.Tester()
""")
for name, sig, doc in funcInfo:
for line in funcTest(name, sig, doc):
f.write(line + "\n")
print("Generated tests for " + name)
f.write("""
tester:add(myTests)
tester:run()
""")
funcNames = sorted(list(set(funcNames()) - set(exclude)))
funcInfo = list(getDocStrings(funcNames))
writeHTMLdoc(funcNames, funcInfo)
writeCallTests(funcNames, funcInfo)
| torch-randomkit-master | luasrc/generateDocsAndTests.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to compute human-normalized Atari scores.
The data used in this module is human and random performance data on Atari-57.
It comprises of evaluation scores (undiscounted returns), each averaged
over at least 3 episode runs, on each of the 57 Atari games. Each episode begins
with the environment already stepped with a uniform random number (between 1 and
30 inclusive) of noop actions.
The two agents are:
* 'random' (agent choosing its actions uniformly randomly on each step)
* 'human' (professional human game tester)
Scores are obtained by averaging returns over the episodes played by each agent,
with episode length capped to 108,000 frames (i.e. timeout after 30 minutes).
The term 'human-normalized' here means a linear per-game transformation of
a game score in such a way that 0 corresponds to random performance and 1
corresponds to human performance.
"""
# pylint: disable=g-bad-import-order
import math
# Game: score-tuple dictionary. Each score tuple contains
# 0: score random (float) and 1: score human (float).
_ATARI_DATA = {
'alien': (227.8, 7127.7),
'amidar': (5.8, 1719.5),
'assault': (222.4, 742.0),
'asterix': (210.0, 8503.3),
'asteroids': (719.1, 47388.7),
'atlantis': (12850.0, 29028.1),
'bank_heist': (14.2, 753.1),
'battle_zone': (2360.0, 37187.5),
'beam_rider': (363.9, 16926.5),
'berzerk': (123.7, 2630.4),
'bowling': (23.1, 160.7),
'boxing': (0.1, 12.1),
'breakout': (1.7, 30.5),
'centipede': (2090.9, 12017.0),
'chopper_command': (811.0, 7387.8),
'crazy_climber': (10780.5, 35829.4),
'defender': (2874.5, 18688.9),
'demon_attack': (152.1, 1971.0),
'double_dunk': (-18.6, -16.4),
'enduro': (0.0, 860.5),
'fishing_derby': (-91.7, -38.7),
'freeway': (0.0, 29.6),
'frostbite': (65.2, 4334.7),
'gopher': (257.6, 2412.5),
'gravitar': (173.0, 3351.4),
'hero': (1027.0, 30826.4),
'ice_hockey': (-11.2, 0.9),
'jamesbond': (29.0, 302.8),
'kangaroo': (52.0, 3035.0),
'krull': (1598.0, 2665.5),
'kung_fu_master': (258.5, 22736.3),
'montezuma_revenge': (0.0, 4753.3),
'ms_pacman': (307.3, 6951.6),
'name_this_game': (2292.3, 8049.0),
'phoenix': (761.4, 7242.6),
'pitfall': (-229.4, 6463.7),
'pong': (-20.7, 14.6),
'private_eye': (24.9, 69571.3),
'qbert': (163.9, 13455.0),
'riverraid': (1338.5, 17118.0),
'road_runner': (11.5, 7845.0),
'robotank': (2.2, 11.9),
'seaquest': (68.4, 42054.7),
'skiing': (-17098.1, -4336.9),
'solaris': (1236.3, 12326.7),
'space_invaders': (148.0, 1668.7),
'star_gunner': (664.0, 10250.0),
'surround': (-10.0, 6.5),
'tennis': (-23.8, -8.3),
'time_pilot': (3568.0, 5229.2),
'tutankham': (11.4, 167.6),
'up_n_down': (533.4, 11693.2),
'venture': (0.0, 1187.5),
# Note the random agent score on Video Pinball is sometimes greater than the
# human score under other evaluation methods.
'video_pinball': (16256.9, 17667.9),
'wizard_of_wor': (563.5, 4756.5),
'yars_revenge': (3092.9, 54576.9),
'zaxxon': (32.5, 9173.3),
}
_RANDOM_COL = 0
_HUMAN_COL = 1
ATARI_GAMES = tuple(sorted(_ATARI_DATA.keys()))
def get_human_normalized_score(game: str, raw_score: float) -> float:
"""Converts game score to human-normalized score."""
game_scores = _ATARI_DATA.get(game, (math.nan, math.nan))
random, human = game_scores[_RANDOM_COL], game_scores[_HUMAN_COL]
return (raw_score - random) / (human - random)
| dqn_zoo-master | dqn_zoo/atari_data.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common functions and classes for testing."""
# pylint: disable=g-bad-import-order
from absl import flags
import dm_env
from dm_env import specs
from dqn_zoo import parts
FLAGS = flags.FLAGS
class DummyAgent(parts.Agent):
"""Agent that returns a dummy action.
Records whether it took a step or reset on a tape.
"""
def __init__(self, tape):
self._tape = tape
def reset(self):
self._tape.append('Agent reset')
def step(self, timestep):
del timestep
self._tape.append('Agent step')
return 0
def get_state(self):
return {}
def set_state(self, state):
del state
@property
def statistics(self):
return {}
class DummyEnvironment(dm_env.Environment):
"""Environment that ignores actions and generates dummy timesteps.
Records whether it took a step or reset on a tape.
"""
def __init__(self, tape, episode_length):
self._tape = tape
self._episode_length = episode_length
def reset(self):
self._t = 0
self._tape.append('Environment reset')
step_type = dm_env.StepType.FIRST
return dm_env.TimeStep(
step_type=step_type, reward=0.0, discount=0.0, observation=1.0
)
def step(self, action):
self._tape.append('Environment step (%s)' % action)
self._t += 1
if self._t == 0:
step_type = dm_env.StepType.FIRST
elif self._t == self._episode_length:
step_type = dm_env.StepType.LAST
self._t = -1
else:
step_type = dm_env.StepType.MID
discount = 0.0 if step_type == dm_env.StepType.LAST else 1.0
return dm_env.TimeStep(
step_type=step_type, reward=2.0, discount=discount, observation=1.0
)
def action_spec(self):
return specs.Array(shape=(), dtype=int)
def observation_spec(self):
return specs.Array(shape=(), dtype=float)
| dqn_zoo-master | dqn_zoo/test_utils.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for atari_data."""
# pylint: disable=g-bad-import-order
import math
from dqn_zoo import atari_data
from absl.testing import absltest
class AtariDataTest(absltest.TestCase):
def test_num_games(self):
self.assertLen(atari_data.ATARI_GAMES, 57)
def test_monotonic_scores(self):
# Test that for each game a higher raw score implies a higher normalized
# score, which implicitly tests that
# a) all game data is present
# b) human score > random score for each game.
for game in atari_data.ATARI_GAMES:
low_score = atari_data.get_human_normalized_score(game, 10.0)
high_score = atari_data.get_human_normalized_score(game, 1000.0)
self.assertGreater(high_score, low_score)
def test_returns_nan_for_unknown_games(self):
score = atari_data.get_human_normalized_score('unknown_game', 10.0)
self.assertTrue(math.isnan(score))
if __name__ == '__main__':
absltest.main()
| dqn_zoo-master | dqn_zoo/atari_data_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for processors."""
# pylint: disable=g-bad-import-order
import collections
import hashlib
import typing
import chex
import dm_env
from dm_env import test_utils
import numpy as np
from dqn_zoo import gym_atari
from dqn_zoo import processors
from absl.testing import absltest
from absl.testing import parameterized
F = dm_env.StepType.FIRST
M = dm_env.StepType.MID
L = dm_env.StepType.LAST
class FixedPaddedBufferTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.buffer = processors.FixedPaddedBuffer(length=4, initial_index=-1)
def test_basic(self):
self.assertEqual([None, None, None, 1], self.buffer(1))
self.assertEqual([2, None, None, None], self.buffer(2))
self.assertEqual([2, 3, None, None], self.buffer(3))
self.assertEqual([2, 3, 4, None], self.buffer(4))
self.assertEqual([2, 3, 4, 5], self.buffer(5))
self.assertEqual([6, None, None, None], self.buffer(6))
def test_reset(self):
for i in range(3):
self.buffer(i)
self.buffer.reset()
self.assertEqual([None, None, None, -1], self.buffer(-1))
def make_timesteps_from_step_types(step_types):
def make_timestep(step_type):
return dm_env.TimeStep(
step_type=step_type,
observation=0,
reward=None if step_type == dm_env.StepType.FIRST else 0,
discount=None if step_type == dm_env.StepType.FIRST else 0,
)
return [make_timestep(st) for st in step_types]
class TimestepBufferConditionTest(parameterized.TestCase):
def test_basic(self):
step_types_and_expected = [
([None, None, None, F], True),
([M, None, None, None], False),
([M, M, None, None], False),
([M, M, M, None], False),
([M, M, M, M], True),
([M, None, None, None], False),
([M, M, None, None], False),
([M, M, M, None], False),
([M, M, M, M], True),
([M, None, None, None], False),
([M, L, None, None], True),
]
processor = processors.TimestepBufferCondition(period=4)
for step_types, expected in step_types_and_expected:
timesteps = make_timesteps_from_step_types(step_types)
self.assertEqual(expected, processor(timesteps))
@parameterized.parameters(
# Can't have F & L occur in same sequence.
[
[[None, None, F], [F, None, None], [F, M, None], [F, M, L]],
],
# Can't have two F's occur in same sequence.
[
[[None, None, F], [F, None, None], [F, M, None], [F, M, F]],
],
)
def test_errors_with_multiple_first_or_last(self, step_types_list):
processor = processors.TimestepBufferCondition(period=3)
for step_types in step_types_list[:-1]:
timesteps = make_timesteps_from_step_types(step_types)
_ = processor(timesteps)
last_timesteps = make_timesteps_from_step_types(step_types_list[-1])
with self.assertRaisesRegex(RuntimeError, 'at most one FIRST or LAST'):
_ = processor(last_timesteps)
def test_errors_if_no_reset_after_last(self):
step_types_list = [
([None, None, None, F]),
([M, None, None, None]),
([M, L, None, None]),
([M, L, F, None]),
]
processor = processors.TimestepBufferCondition(period=3)
for step_types in step_types_list[:-1]:
timesteps = make_timesteps_from_step_types(step_types)
_ = processor(timesteps)
last_timesteps = make_timesteps_from_step_types(step_types_list[-1])
with self.assertRaisesRegex(RuntimeError, 'Should have reset'):
_ = processor(last_timesteps)
def make_timestep_from_step_type_string(step_type_str, observation):
if step_type_str == 'f':
return dm_env.restart(observation=observation)
elif step_type_str == 'm':
return dm_env.transition(reward=0, observation=observation)
elif step_type_str == 'l':
return dm_env.termination(reward=0, observation=observation)
else:
raise ValueError('Unknown step type string %s.' % step_type_str)
class ActionRepeatsTest(absltest.TestCase):
"""Tests action repeats can be implemented."""
def setUp(self):
super().setUp()
num_repeats = 4
self.processor = processors.Sequential(
processors.FixedPaddedBuffer(length=num_repeats, initial_index=-1),
processors.ConditionallySubsample(
processors.TimestepBufferCondition(period=num_repeats)
),
processors.Maybe(
processors.Sequential(
processors.none_to_zero_pad,
processors.named_tuple_sequence_stack,
),
),
)
def test_basic(self):
sequence = [
('f', '0001'),
('m', None),
('m', None),
('m', None),
('m', '2345'),
('m', None),
('l', '6700'),
('f', '0008'),
('m', None),
]
prev_timestep = None
for i, (step_type_str, expected_obs_str) in enumerate(sequence, start=1):
if prev_timestep and prev_timestep.last():
self.processor.reset()
timestep = make_timestep_from_step_type_string(step_type_str, i)
processed = self.processor(timestep)
if processed is None:
obs_str = None
else:
obs_str = ''.join(str(o) for o in processed.observation)
self.assertEqual(expected_obs_str, obs_str)
prev_timestep = timestep
def test_exception_raised_if_reset_not_called_between_last_and_first(self):
sequence = list('fmmmmmlfm')
with self.assertRaisesRegex(RuntimeError, 'reset'):
for i, step_type_str in enumerate(sequence, start=1):
timestep = make_timestep_from_step_type_string(step_type_str, i)
self.processor(timestep)
Pair = collections.namedtuple('Pair', ['a', 'b'])
class ApplyToNamedTupleFieldTest(absltest.TestCase):
def test_basic_usage(self):
pair = Pair(a=1, b=2)
processor = processors.ApplyToNamedTupleField('a', lambda x: x + 10)
self.assertEqual(Pair(a=11, b=2), processor(pair))
class ZeroDiscountOnLifeLossTest(parameterized.TestCase):
@parameterized.named_parameters(
('no_loss', 'fmmmmm', 'n11111', '333333', 'fmmmmm', 'n11111'),
('one_loss', 'fmmmmm', 'n11111', '333222', 'fmmmmm', 'n11011'),
('two_losses', 'fmmmmm', 'n11111', '332211', 'fmmmmm', 'n10101'),
('episode_end', 'fmmlfm', '1110n1', '333355', 'fmmlfm', '1110n1'),
('episode_end_same', 'fmmlfm', '1110n1', '333333', 'fmmlfm', '1110n1'),
)
def test_basic(
self,
input_step_types_str,
input_discounts_str,
input_lives_str,
expected_step_types_str,
expected_discounts_str,
):
processor = processors.ZeroDiscountOnLifeLoss()
step_type_map = {'f': F, 'm': M, 'l': L}
for timestep_part in zip(
input_step_types_str,
input_discounts_str,
input_lives_str,
expected_step_types_str,
expected_discounts_str,
):
(
input_step_type,
input_discount,
input_lives,
expected_step_type,
expected_discount,
) = timestep_part
input_timestep = dm_env.TimeStep(
step_type=step_type_map[input_step_type],
reward=8,
discount=None if input_discount == 'n' else float(input_discount),
observation=(9, int(input_lives)),
)
output_timestep = processor(input_timestep)
self.assertEqual(
step_type_map[expected_step_type], output_timestep.step_type
)
self.assertEqual(
None if expected_discount == 'n' else float(expected_discount),
output_timestep.discount,
)
class ReduceStepTypeTest(parameterized.TestCase):
@parameterized.parameters(
([0, 0, 0, F], F),
([M, M, M, L], L),
([M, M, L, 0], L),
([M, M, M, M], M),
)
def test_valid_cases(self, step_types, expected_step_type):
self.assertEqual(
expected_step_type,
processors.reduce_step_type(np.asarray(step_types), debug=True),
)
@parameterized.parameters(
([0, 0, 0, M],),
([0, 0, 0, L],),
([M, 0, 0, 0],),
([L, 0, 0, M],),
([M, L, F, M],),
)
def test_invalid_cases(self, step_types):
with self.assertRaises(ValueError):
processors.reduce_step_type(np.asarray(step_types), debug=True)
class AggregateRewardsTest(parameterized.TestCase):
@parameterized.parameters(
([None], None),
([0, None], None),
([0, 0, None], None),
([0, 0, 0, None], None),
([0], 0),
([1], 1),
([1, 2], 3),
([1, 2, 3], 6),
([1, -2, 3], 2),
)
def test_basic(self, rewards, expected):
self.assertEqual(
expected, processors.aggregate_rewards(rewards, debug=True)
)
@parameterized.parameters(
([1.0, None],),
([0.0, 1.0, None],),
([1.0, 0.0, None],),
)
def test_error_raised_in_debug_with_none_and_no_zero_padding(self, rewards):
with self.assertRaisesRegex(ValueError, 'None.*FIRST'):
processors.aggregate_rewards(rewards, debug=True)
class AggregateDiscountsTest(parameterized.TestCase):
@parameterized.parameters(
([None], None),
([0, None], None),
([0, 0, None], None),
([0, 0, 0, None], None),
([0], 0),
([1], 1),
([1, 1], 1),
([1, 1, 1], 1),
([1, 1, 0], 0),
)
def test_basic(self, discounts, expected):
self.assertEqual(
expected, processors.aggregate_discounts(discounts, debug=True)
)
@parameterized.parameters(
([1.0, None],),
([0.0, 1.0, None],),
([1.0, 0.0, None],),
)
def test_error_raised_in_debug_with_none_and_no_zero_padding(self, discounts):
with self.assertRaisesRegex(ValueError, 'None.*FIRST'):
processors.aggregate_discounts(discounts, debug=True)
class ClipRewardTest(parameterized.TestCase):
@parameterized.parameters(
(0, 0),
(1, 1),
(-1, -1),
(-2.5, -2),
(2.5, 2),
(None, None),
)
def test_basic(self, reward, expected):
self.assertEqual(expected, processors.clip_reward(2)(reward))
class AgentWithPreprocessing:
"""Agent that does standard Atari preprocessing.
Returns actions `0, 1, ..., num_actions, 0, 1, ...` unless the processor
returns `None` in which case the agent repeats the previous action.
"""
def __init__(self, num_actions):
self._processor = processors.atari()
self._num_actions = num_actions
self._action = None
def reset(self):
processors.reset(self._processor)
self._action = None
def step(self, timestep):
processed_timestep = self._processor(timestep)
# Repeat previous action if processed timestep is None.
if processed_timestep is None:
return self._action
# This block would normally contain the forward pass through the network.
if self._action is None:
self._action = 0
else:
self._action = (self._action + 1) % self._num_actions
return self._action
class AtariTest(absltest.TestCase):
def test_can_use_in_an_agent(self):
"""Example of using Atari processor on the agent side."""
env = gym_atari.GymAtari('pong', seed=1)
action_spec = env.action_spec()
agent = AgentWithPreprocessing(num_actions=action_spec.num_values)
agent.reset()
timestep = env.reset()
actions = []
for _ in range(20):
action = agent.step(timestep)
timestep = env.step(action)
assert not timestep.last()
actions.append(action)
self.assertEqual(
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4], actions
)
def test_default_on_fixed_input(self):
"""End-to-end test on fixed input.
This is to test (mainly observation) processors do not change due to updates
in underlying library functions.
"""
# Create environment just for the observation spec.
env = gym_atari.GymAtari('pong', seed=1)
rgb_spec, unused_lives_spec = env.observation_spec()
random_state = np.random.RandomState(seed=1)
# Generate timesteps with fixed data to feed into processor.
def generate_rgb_obs():
return random_state.randint(
0, 256, size=rgb_spec.shape, dtype=rgb_spec.dtype
)
step_types = [F, M, M, M, M]
rewards = [None, 0.5, 0.2, 0, 0.1]
discounts = [None, 0.9, 0.9, 0.9, 0.9]
rgb_obs = [generate_rgb_obs() for _ in range(len(step_types))]
lives_obs = [3, 3, 3, 3, 3]
timesteps = []
for i in range(len(step_types)):
timesteps.append(
dm_env.TimeStep(
step_type=step_types[i],
reward=rewards[i],
discount=discounts[i],
observation=(rgb_obs[i], lives_obs[i]),
)
)
def hash_array(array):
return hashlib.sha256(array).hexdigest()
# Make sure generated observation data is fixed and the random number
# generator has not changed from underneath us, causing the test to fail.
hash_rgb_obs = [hash_array(obs) for obs in rgb_obs]
expected_hashes = [
'250557b2184381fc2ec541fc313127050098fce825a6e98a728c2993874db300',
'db8054ca287971a0e1264bfbc5642233085f1b27efbca9082a29f5be8a24c552',
'7016e737a257fcdb77e5f23daf96d94f9820bd7361766ca7b1401ec90984ef71',
'356dfcf0c6eaa4e2b5e80f4611375c0131435cc22e6a413b573818d7d084e9b2',
'73078bedd438422ad1c3dda6718aa1b54f6163f571d2c26ed714c515a6372159',
]
assert hash_rgb_obs == expected_hashes, (hash_rgb_obs, expected_hashes)
# Run timesteps through processor.
processor = processors.atari()
for timestep in timesteps:
processed = processor(timestep)
# Assert the returned timestep is not None, and tell pytype.
self.assertIsNotNone(processed)
processed = typing.cast(dm_env.TimeStep, processed)
# Compare with expected timestep, just the hash for the observation.
self.assertEqual(dm_env.StepType.MID, processed.step_type)
self.assertAlmostEqual(0.5 + 0.2 + 0.0 + 0.1, processed.reward)
self.assertAlmostEqual(0.9**4 * 0.99, processed.discount)
processed_obs_hash = hash_array(processed.observation.flatten())
# Note the algorithm used for image resizing can have a noticeable impact on
# learning performance. This test helps ensure changes to image processing
# are intentional.
self.assertEqual(
'0d158a8f45aa09aa6fad0354d2eb1fc0e3f57add88e772f3b71f54819d8200aa',
processed_obs_hash,
)
class AtariEnvironmentWrapperTest(parameterized.TestCase):
@parameterized.named_parameters(
('grayscaling', True, 'grayscale', (84, 84, 4)),
('no_grayscaling', False, 'RGB', (84, 84, 3, 4)),
)
def test_atari_grayscaling_observation_spec(
self, grayscaling, expected_name, expected_shape
):
env = gym_atari.GymAtari('pong', seed=1)
env = processors.AtariEnvironmentWrapper(
environment=env, grayscaling=grayscaling
)
spec = env.observation_spec()
self.assertEqual(spec.shape, expected_shape)
self.assertEqual(spec.name, expected_name)
@parameterized.named_parameters(
('grayscaling', True, (84, 84, 4)),
('no_grayscaling', False, (84, 84, 3, 4)),
)
def test_atari_grayscaling_observation_shape(
self, grayscaling, expected_shape
):
env = gym_atari.GymAtari('pong', seed=1)
env = processors.AtariEnvironmentWrapper(
environment=env, grayscaling=grayscaling
)
timestep = env.reset()
for _ in range(10):
assert not timestep.step_type.last()
chex.assert_shape(timestep.observation, expected_shape)
timestep = env.step(0)
class AtariEnvironmentWrapperInterfaceTest(
test_utils.EnvironmentTestMixin, absltest.TestCase
):
def make_object_under_test(self):
env = gym_atari.GymAtari('pong', seed=1)
return processors.AtariEnvironmentWrapper(environment=env)
if __name__ == '__main__':
absltest.main()
| dqn_zoo-master | dqn_zoo/processors_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DQN components."""
# pylint: disable=g-bad-import-order
import collections
import math
from unittest import mock
from dqn_zoo import parts
from dqn_zoo import test_utils
from absl.testing import absltest
class LinearScheduleTest(absltest.TestCase):
def test_descent(self):
"""Checks basic linear decay schedule."""
schedule = parts.LinearSchedule(
begin_t=5, decay_steps=7, begin_value=1.0, end_value=0.3
)
for step in range(20):
val = schedule(step)
if step <= 5:
self.assertEqual(1.0, val)
elif step >= 12:
self.assertEqual(0.3, val)
else:
self.assertAlmostEqual(1.0 - ((step - 5) / 7) * 0.7, val)
def test_ascent(self):
"""Checks basic linear ascent schedule."""
schedule = parts.LinearSchedule(
begin_t=5, end_t=12, begin_value=-0.4, end_value=0.4
)
for step in range(20):
val = schedule(step)
if step <= 5:
self.assertEqual(-0.4, val)
elif step >= 12:
self.assertEqual(0.4, val)
else:
self.assertAlmostEqual(-0.4 + ((step - 5) / 7) * 0.8, val)
def test_constant(self):
"""Checks constant schedule."""
schedule = parts.LinearSchedule(
begin_t=5, decay_steps=7, begin_value=0.5, end_value=0.5
)
for step in range(20):
val = schedule(step)
self.assertAlmostEqual(0.5, val)
def test_error_wrong_end_args(self):
"""Checks error in case none or both of end_t, decay_steps are given."""
with self.assertRaisesRegex(ValueError, 'Exactly one of'):
_ = parts.LinearSchedule(begin_value=0.0, end_value=1.0, begin_t=5)
with self.assertRaisesRegex(ValueError, 'Exactly one of'):
_ = parts.LinearSchedule(
begin_value=0.0, end_value=1.0, begin_t=5, end_t=12, decay_steps=7
)
class RunLoopTest(absltest.TestCase):
def test_basic(self):
"""Tests sequence of agent and environment interactions in typical usage."""
tape = []
agent = test_utils.DummyAgent(tape)
environment = test_utils.DummyEnvironment(tape, episode_length=4)
episode_index = 0
t = 0 # steps = t + 1
max_steps = 14
loop_outputs = parts.run_loop(
agent, environment, max_steps_per_episode=100, yield_before_reset=True
)
for unused_env, timestep_t, unused_agent, unused_a_t in loop_outputs:
tape.append((episode_index, t, timestep_t is None))
if timestep_t is None:
tape.append('Episode begin')
continue
if timestep_t.last():
tape.append('Episode end')
episode_index += 1
if t + 1 >= max_steps:
tape.append('Maximum number of steps reached')
break
t += 1
expected_tape = [
(0, 0, True),
'Episode begin',
'Agent reset',
'Environment reset',
'Agent step',
(0, 0, False),
'Environment step (0)',
'Agent step',
(0, 1, False),
'Environment step (0)',
'Agent step',
(0, 2, False),
'Environment step (0)',
'Agent step',
(0, 3, False),
'Environment step (0)',
'Agent step',
(0, 4, False),
'Episode end',
(1, 5, True),
'Episode begin',
'Agent reset',
'Environment reset',
'Agent step',
(1, 5, False),
'Environment step (0)',
'Agent step',
(1, 6, False),
'Environment step (0)',
'Agent step',
(1, 7, False),
'Environment step (0)',
'Agent step',
(1, 8, False),
'Environment step (0)',
'Agent step',
(1, 9, False),
'Episode end',
(2, 10, True),
'Episode begin',
'Agent reset',
'Environment reset',
'Agent step',
(2, 10, False),
'Environment step (0)',
'Agent step',
(2, 11, False),
'Environment step (0)',
'Agent step',
(2, 12, False),
'Environment step (0)',
'Agent step',
(2, 13, False),
'Maximum number of steps reached',
]
self.assertEqual(expected_tape, tape)
class CsvWriterTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.mock_open = mock.patch.object(__builtins__, 'open').start()
self.fake_file = mock.Mock()
self.mock_open.return_value.__enter__.return_value = self.fake_file # pytype: disable=attribute-error # py39-upgrade
mock.patch('os.path.exists').start().return_value = True
def tearDown(self):
super().tearDown()
mock.patch.stopall()
def test_file_writes(self):
"""Tests that file is opened and written correctly."""
writer = parts.CsvWriter('test.csv')
self.mock_open.assert_not_called()
self.fake_file.write.assert_not_called()
writer.write(collections.OrderedDict([('a', 1), ('b', 2)]))
self.mock_open.assert_called_once_with('test.csv', mock.ANY)
self.assertSequenceEqual(
[mock.call('a,b\r\n'), mock.call('1,2\r\n')],
self.fake_file.write.call_args_list,
)
writer.write(collections.OrderedDict([('a', 3), ('b', 4)]))
self.assertSequenceEqual(
[mock.call('test.csv', mock.ANY), mock.call('test.csv', mock.ANY)],
self.mock_open.call_args_list,
)
self.assertSequenceEqual(
[mock.call('a,b\r\n'), mock.call('1,2\r\n'), mock.call('3,4\r\n')],
self.fake_file.write.call_args_list,
)
def test_deserialize_after_header(self):
"""Tests that no header is written unnecessarily after deserialization."""
writer1 = parts.CsvWriter('test.csv')
writer1.write(collections.OrderedDict([('a', 1), ('b', 2)]))
self.assertSequenceEqual(
[mock.call('a,b\r\n'), mock.call('1,2\r\n')],
self.fake_file.write.call_args_list,
)
writer2 = parts.CsvWriter('test.csv')
writer2.set_state(writer1.get_state())
writer2.write(collections.OrderedDict([('a', 3), ('b', 4)]))
self.assertSequenceEqual(
[mock.call('a,b\r\n'), mock.call('1,2\r\n'), mock.call('3,4\r\n')],
self.fake_file.write.call_args_list,
)
def test_deserialize_before_header(self):
"""Tests that header is written after deserialization if not written yet."""
writer1 = parts.CsvWriter('test.csv')
self.fake_file.write.assert_not_called()
writer2 = parts.CsvWriter('test.csv')
writer2.set_state(writer1.get_state())
writer2.write(collections.OrderedDict([('a', 1), ('b', 2)]))
self.assertSequenceEqual(
[mock.call('a,b\r\n'), mock.call('1,2\r\n')],
self.fake_file.write.call_args_list,
)
def test_error_new_keys(self):
"""Tests that an error is thrown when an unexpected key occurs."""
writer = parts.CsvWriter('test.csv')
writer.write(collections.OrderedDict([('a', 1), ('b', 2)]))
with self.assertRaisesRegex(ValueError, 'fields not in fieldnames'):
writer.write(collections.OrderedDict([('a', 3), ('b', 4), ('c', 5)]))
def test_missing_keys(self):
"""Tests that when a key is missing, an empty value is used."""
writer = parts.CsvWriter('test.csv')
writer.write(collections.OrderedDict([('a', 1), ('b', 2), ('c', 3)]))
writer.write(collections.OrderedDict([('a', 4), ('c', 6)]))
self.assertSequenceEqual(
[mock.call('a,b,c\r\n'), mock.call('1,2,3\r\n'), mock.call('4,,6\r\n')],
self.fake_file.write.call_args_list,
)
def test_insertion_order_of_fields_preserved(self):
"""Tests that when a key is missing, an empty value is used."""
writer = parts.CsvWriter('test.csv')
writer.write(collections.OrderedDict([('c', 3), ('a', 1), ('b', 2)]))
writer.write(collections.OrderedDict([('b', 5), ('c', 6), ('a', 4)]))
self.assertSequenceEqual(
[
mock.call('c,a,b\r\n'),
mock.call('3,1,2\r\n'),
mock.call('6,4,5\r\n'),
],
self.fake_file.write.call_args_list,
)
def test_create_dir(self):
"""Tests that a csv file dir is created if it doesn't exist yet."""
with mock.patch('os.path.exists') as fake_exists, mock.patch(
'os.makedirs'
) as fake_makedirs:
fake_exists.return_value = False
dirname = '/some/sub/dir'
_ = parts.CsvWriter(dirname + '/test.csv')
fake_exists.assert_called_once_with(dirname)
fake_makedirs.assert_called_once_with(dirname)
class AgentWithStatistics(parts.Agent):
def __init__(self, statistics):
self._statistics = statistics
def step(self, timestep):
return parts.Action(0)
def reset(self) -> None:
pass
def get_state(self):
return {}
def set_state(self, state):
pass
@property
def statistics(self):
return self._statistics
@statistics.setter
def statistics(self, value):
self._statistics = value
class UnbiasedExponentialWeightedAverageAgentTrackerTest(absltest.TestCase):
def setUp(self):
super().setUp()
sample_statistics = dict(a=math.nan, b=0)
self.agent = AgentWithStatistics(sample_statistics)
self.tracker = parts.UnbiasedExponentialWeightedAverageAgentTracker(
step_size=0.1, initial_agent=self.agent
)
def test_average_equals_input_on_first_step(self):
statistics = {'a': 1, 'b': 2}
self.agent.statistics = statistics
self.tracker.step(None, None, self.agent, None)
self.assertEqual(statistics, self.tracker.get())
def test_trace_strictly_increases_from_0_to_1(self):
self.assertEqual(0, self.tracker.trace)
for i in range(100):
prev_trace = self.tracker.trace
self.agent.statistics = {'a': i, 'b': 2 * i}
self.tracker.step(None, None, self.agent, None)
self.assertGreater(self.tracker.trace, prev_trace)
self.assertLess(self.tracker.trace, 1)
self.assertAlmostEqual(1, self.tracker.trace, places=4)
if __name__ == '__main__':
absltest.main()
| dqn_zoo-master | dqn_zoo/parts_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for transition replay components."""
# pylint: disable=g-bad-import-order
import collections
import copy
import itertools
from typing import Any, Mapping, Sequence, Text
import chex
import dm_env
import numpy as np
from dqn_zoo import replay as replay_lib
from absl.testing import absltest
from absl.testing import parameterized
Pair = collections.namedtuple('Pair', ['a', 'b'])
ReplayStructure = collections.namedtuple('ReplayStructure', ['value'])
## Tests for regular (unprioritized) transition replay.
class UniformDistributionTest(absltest.TestCase):
def setUp(self):
super().setUp()
random_state = np.random.RandomState(seed=1)
self.dist = replay_lib.UniformDistribution(random_state)
def test_add_and_sample_one(self):
self.dist.add([2])
self.assertEqual(2, self.dist.sample(1))
self.assertTrue(*self.dist.check_valid())
def test_adding_existing_id_raises_an_error(self):
self.dist.add([2, 5])
with self.assertRaisesRegex(IndexError, 'Cannot add ID'):
self.dist.add([6, 5])
def test_remove(self):
self.dist.add([2, 5, 7])
self.dist.remove([5])
self.assertNotIn(5, self.dist.sample(100))
self.assertTrue(*self.dist.check_valid())
def test_remove_final_id(self):
# IDs are removed by swapping with the final one in a list and popping, so
# check this works when the ID being removed is the last one.
self.dist.add([2, 5, 7])
self.dist.remove([7])
self.assertEqual(2, self.dist.size)
self.assertNotIn(7, self.dist.sample(100))
self.assertTrue(*self.dist.check_valid())
def test_removing_nonexistent_id_raises_an_error(self):
self.dist.add([2, 5])
with self.assertRaisesRegex(IndexError, 'Cannot remove ID'):
self.dist.remove([7])
def test_size(self):
self.dist.add([2, 5, 3])
self.assertEqual(3, self.dist.size)
self.dist.remove([5])
self.assertEqual(2, self.dist.size)
def test_get_state_and_set_state(self):
self.dist.add([2, 5, 3, 8])
self.dist.remove([5])
self.assertTrue(*self.dist.check_valid())
state = copy.deepcopy(self.dist.get_state())
new_dist = replay_lib.UniformDistribution(np.random.RandomState(seed=1))
new_dist.set_state(state)
self.assertTrue(new_dist.check_valid())
self.assertEqual(new_dist.size, self.dist.size)
new_state = new_dist.get_state()
chex.assert_trees_all_close(state, new_state)
class TransitionReplayTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.capacity = 10
self.replay = replay_lib.TransitionReplay(
capacity=self.capacity,
structure=Pair(a=None, b=None),
random_state=np.random.RandomState(1),
)
self.items = [
Pair(a=1, b=2),
Pair(a=11, b=22),
Pair(a=111, b=222),
Pair(a=1111, b=2222),
]
for item in self.items:
self.replay.add(item)
def test_size(self):
self.assertLen(self.items, self.replay.size)
def test_capacity(self):
self.assertEqual(self.capacity, self.replay.capacity)
def test_sample(self):
num_samples = 2
samples = self.replay.sample(num_samples)
self.assertEqual((num_samples,), samples.a.shape)
def test_invariants(self):
capacity = 10
num_samples = 3
replay = replay_lib.TransitionReplay(
capacity=capacity,
structure=Pair(a=None, b=None),
random_state=np.random.RandomState(1),
)
for i in range(31):
replay.add(Pair(a=i, b=2 * i))
self.assertLessEqual(replay.size, capacity)
if i > 2:
self.assertEqual(replay.sample(num_samples).a.shape, (num_samples,))
ids = list(replay.ids())
self.assertLen(ids, min(i + 1, capacity))
self.assertEqual(ids, sorted(ids))
self.assertEqual(ids[0] + len(ids) - 1, ids[-1])
def test_get_state_and_set_state(self):
replay = replay_lib.TransitionReplay(
capacity=self.capacity,
structure=Pair(a=None, b=None),
random_state=np.random.RandomState(1),
)
for i in range(100):
replay.add(Pair(a=i, b=i))
replay.sample(10)
self.assertTrue(*replay.check_valid())
state = copy.deepcopy(replay.get_state())
new_replay = replay_lib.TransitionReplay(
capacity=self.capacity,
structure=Pair(a=None, b=None),
random_state=np.random.RandomState(1),
)
new_replay.set_state(state)
self.assertTrue(*new_replay.check_valid())
self.assertEqual(new_replay.size, replay.size)
new_state = new_replay.get_state()
chex.assert_trees_all_close(state, new_state)
class NStepTransitionAccumulatorTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.n = 3
self.accumulator = replay_lib.NStepTransitionAccumulator(self.n)
self.num_timesteps = 10
self.step_types = [dm_env.StepType.FIRST] + [dm_env.StepType.MID] * (
self.num_timesteps - 1
)
self.states = list(range(self.num_timesteps))
self.discounts = np.linspace(0.9, 1.0, self.num_timesteps, endpoint=False)
self.rewards = np.linspace(-5, 5, self.num_timesteps, endpoint=False)
self.actions = [i % 4 for i in range(self.num_timesteps)]
self.accumulator_output = []
for i in range(self.num_timesteps):
timestep = dm_env.TimeStep(
step_type=self.step_types[i],
observation=self.states[i],
discount=self.discounts[i],
reward=self.rewards[i],
)
self.accumulator_output.append(
list(self.accumulator.step(timestep, self.actions[i]))
)
def test_no_transitions_returned_for_first_n_steps(self):
self.assertEqual([[]] * self.n, self.accumulator_output[: self.n])
self.assertNotEqual([], self.accumulator_output[self.n])
def test_states_accumulation(self):
actual_s_tm1 = [
tr.s_tm1 for tr in itertools.chain(*self.accumulator_output)
]
actual_s_t = [tr.s_t for tr in itertools.chain(*self.accumulator_output)]
expected_s_tm1 = self.states[: -self.n]
expected_s_t = self.states[self.n :]
np.testing.assert_array_equal(expected_s_tm1, actual_s_tm1)
np.testing.assert_array_equal(expected_s_t, actual_s_t)
def test_discount_accumulation(self):
expected = []
for i in range(len(self.discounts) - self.n):
# Offset by 1 since first discount is unused.
expected.append(np.prod(self.discounts[i + 1 : i + 1 + self.n]))
actual = [tr.discount_t for tr in itertools.chain(*self.accumulator_output)]
np.testing.assert_allclose(expected, actual)
def test_reward_accumulation(self):
expected = []
for i in range(len(self.discounts) - self.n):
# Offset by 1 since first discount and reward is unused.
discounts = np.concatenate(
[[1.0], self.discounts[i + 1 : i + 1 + self.n - 1]]
)
cumulative_discounts = np.cumprod(discounts)
rewards = self.rewards[i + 1 : i + 1 + self.n]
expected.append(np.sum(cumulative_discounts * rewards))
actual = [tr.r_t for tr in itertools.chain(*self.accumulator_output)]
np.testing.assert_allclose(expected, actual)
def test_correct_action_is_stored_in_transition(self):
expected = self.actions[: -self.n]
actual = [tr.a_tm1 for tr in itertools.chain(*self.accumulator_output)]
np.testing.assert_array_equal(expected, actual)
def test_reset(self):
self.accumulator.reset()
transitions = self.accumulator.step(
timestep_t=dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
observation=-1,
discount=1.0,
reward=3,
),
a_t=1,
)
self.assertEqual([], list(transitions))
def test_consistent_with_transition_accumulator(self):
n_step_transition_accumulator = replay_lib.NStepTransitionAccumulator(1)
transition_accumulator = replay_lib.TransitionAccumulator()
# Add the same timesteps to both accumulators.
for i in range(self.num_timesteps):
timestep = dm_env.TimeStep(
step_type=self.step_types[i],
observation=self.states[i],
discount=self.discounts[i],
reward=self.rewards[i],
)
transitions = list(transition_accumulator.step(timestep, self.actions[i]))
n_step_transitions = list(
n_step_transition_accumulator.step(timestep, self.actions[i])
)
self.assertEqual(transitions, n_step_transitions)
def test_all_remaining_transitions_yielded_when_timestep_is_last(self):
f = dm_env.StepType.FIRST
m = dm_env.StepType.MID
l = dm_env.StepType.LAST
n = 3
accumulator = replay_lib.NStepTransitionAccumulator(n)
step_types = [f, m, m, m, m, m, l, f, m, m, m, m, f, m]
num_timesteps = len(step_types)
states = list(range(num_timesteps))
discounts = np.arange(1, num_timesteps + 1) / num_timesteps
rewards = np.ones(num_timesteps)
actions = list(range(num_timesteps, 0, -1))
accumulator_output = []
for i in range(num_timesteps):
timestep = dm_env.TimeStep(
step_type=step_types[i],
observation=states[i],
discount=discounts[i],
reward=rewards[i],
)
accumulator_output.append(list(accumulator.step(timestep, actions[i])))
output_lengths = [len(output) for output in accumulator_output]
expected_output_lengths = [0, 0, 0, 1, 1, 1, n, 0, 0, 0, 1, 1, 0, 0]
self.assertEqual(expected_output_lengths, output_lengths)
# Get transitions yielded at the end of an episode.
end_index = expected_output_lengths.index(n)
episode_end_transitions = accumulator_output[end_index]
# Check the start and end states are correct.
# Normal n-step transition
self.assertEqual(episode_end_transitions[0].s_t, end_index)
self.assertEqual(episode_end_transitions[0].s_tm1, end_index - n)
# (n - 1)-step transition.
self.assertEqual(episode_end_transitions[1].s_t, end_index)
self.assertEqual(episode_end_transitions[1].s_tm1, end_index - (n - 1))
# (n - 2)-step transition.
self.assertEqual(episode_end_transitions[2].s_t, end_index)
self.assertEqual(episode_end_transitions[2].s_tm1, end_index - (n - 2))
def test_transitions_returned_if_episode_length_less_than_n(self):
f = dm_env.StepType.FIRST
m = dm_env.StepType.MID
l = dm_env.StepType.LAST
n = 4
accumulator = replay_lib.NStepTransitionAccumulator(n)
step_types = [f, m, l]
num_timesteps = len(step_types)
states = list(range(num_timesteps))
discounts = np.ones(num_timesteps)
rewards = np.ones(num_timesteps)
actions = np.ones(num_timesteps)
accumulator_output = []
for i in range(num_timesteps):
timestep = dm_env.TimeStep(
step_type=step_types[i],
observation=states[i],
discount=discounts[i],
reward=rewards[i],
)
accumulator_output.append(list(accumulator.step(timestep, actions[i])))
output_lengths = [len(output) for output in accumulator_output]
output_states = [
[(tr.s_tm1, tr.s_t) for tr in output] for output in accumulator_output
]
# Expect a 1-step transition and a 2-step transition after LAST timestep.
expected_output_lengths = [0, 0, 2]
expected_output_states = [[], [], [(0, 2), (1, 2)]]
self.assertEqual(expected_output_lengths, output_lengths)
self.assertEqual(expected_output_states, output_states)
## Tests for prioritized replay.
def add(replay, values, priorities=None):
priorities = [0.0] * len(values) if priorities is None else priorities
for v, p in zip(values, priorities):
replay.add(ReplayStructure(value=v), priority=p)
def get(replay, ids):
return [x.value for x in replay.get(ids)]
def make_replay(
capacity=10,
structure=None,
priority_exponent=0.8,
importance_sampling_exponent=lambda t: 0.6,
uniform_sample_probability=0.1,
normalize_weights=True,
seed=None,
):
return replay_lib.PrioritizedTransitionReplay(
capacity=capacity,
structure=structure or ReplayStructure(value=None),
priority_exponent=priority_exponent,
importance_sampling_exponent=importance_sampling_exponent,
uniform_sample_probability=uniform_sample_probability,
normalize_weights=normalize_weights,
random_state=np.random.RandomState(seed),
)
def sample_replay_bin_count(replay, num, sample_size):
all_values = []
for _ in range(num):
samples, unused_indices, unused_weights = replay.sample(size=sample_size)
all_values.append(samples.value)
return np.bincount(np.array(all_values).flatten())
def make_distribution(
min_capacity=0,
max_capacity=None,
priority_exponent=0.8,
uniform_sample_probability=0.1,
seed=1,
):
random_state = np.random.RandomState(seed)
return replay_lib.PrioritizedDistribution(
max_capacity=max_capacity,
min_capacity=min_capacity,
priority_exponent=priority_exponent,
uniform_sample_probability=uniform_sample_probability,
random_state=random_state,
)
def sample_distribution_bin_count(distribution, num, sample_size):
all_values = []
for _ in range(num):
indices, unused_probabilities = distribution.sample(size=sample_size)
all_values.extend(indices)
counter = collections.Counter(all_values)
sampled_indices = sorted(counter.keys())
counts = np.array([counter[idx] for idx in sampled_indices])
return sampled_indices, counts
class PrioritizedDistributionTest(absltest.TestCase):
def test_adding_ids_that_already_exist_raises_an_exception(self):
dist = make_distribution()
dist.add_priorities(ids=[1, 2], priorities=[0.1, 0.2])
with self.assertRaisesRegex(IndexError, 'already exists'):
dist.add_priorities(ids=[2], priorities=[0.2])
def test_size_is_correct_with_max_capacity(self):
capacity = 7
dist = make_distribution(min_capacity=capacity, max_capacity=capacity)
self.assertTrue(*dist.check_valid())
self.assertEqual(0, dist.size)
# Setting 3 new priorities counts.
dist.add_priorities(ids=[2, 3, 5], priorities=[0.2, 0.3, 0.5])
self.assertTrue(*dist.check_valid())
self.assertEqual(3, dist.size)
# Overwriting existing priority does not increase size.
dist.update_priorities(ids=[3], priorities=[1.0])
self.assertTrue(*dist.check_valid())
self.assertEqual(3, dist.size)
# Add priority for a new index increases size, even if priority is 0.
dist.add_priorities(ids=[4], priorities=[0.0])
self.assertTrue(*dist.check_valid())
self.assertEqual(4, dist.size)
# Add priorities up to capacity.
dist.add_priorities(ids=[7, 1, 0], priorities=[2.0, 3.0, 4.0])
self.assertTrue(*dist.check_valid())
self.assertEqual(7, dist.size)
# Add priorities beyond capacity.
with self.assertRaisesRegex(ValueError, 'max capacity would be exceeded'):
dist.add_priorities(ids=[9, 8], priorities=[2.0, 3.0])
self.assertTrue(*dist.check_valid())
self.assertEqual(7, dist.size)
self.assertSequenceEqual(list(dist.ids()), [2, 3, 5, 4, 7, 1, 0])
def test_capacity_does_not_grow_unless_needed(self):
dist = make_distribution(min_capacity=4, max_capacity=None)
dist.add_priorities(ids=[0], priorities=[0.0])
self.assertEqual(1, dist.size)
self.assertEqual(4, dist.capacity)
dist.add_priorities(ids=[1, 2], priorities=[1.0, 2.0])
self.assertEqual(3, dist.size)
self.assertEqual(4, dist.capacity)
dist.add_priorities(ids=[3], priorities=[3.0])
self.assertEqual(4, dist.size)
self.assertEqual(4, dist.capacity)
dist.add_priorities(
ids=[4, 5, 6, 7, 8], priorities=[4.0, 5.0, 6.0, 7.0, 8.0]
)
self.assertEqual(9, dist.size)
self.assertEqual(9, dist.capacity)
def test_capacity_grows_automatically_as_ids_are_added(self):
dist = make_distribution(min_capacity=0, max_capacity=None)
self.assertEqual(0, dist.capacity)
self.assertTrue(*dist.check_valid())
id_iter = itertools.count()
def add_priorities(num_ids):
ids = list(itertools.islice(id_iter, num_ids))
priorities = [float(i) for i in ids]
dist.add_priorities(ids, priorities)
# Add zero IDs, a bit contrived, but should not raise an error.
add_priorities(num_ids=0)
self.assertEqual(0, dist.capacity)
self.assertTrue(*dist.check_valid())
# Add one ID.
add_priorities(num_ids=1)
self.assertEqual(0 + 1, dist.capacity)
self.assertTrue(*dist.check_valid())
# Add another ID.
add_priorities(num_ids=1)
self.assertEqual(1 + 1, dist.capacity)
self.assertTrue(*dist.check_valid())
# Add another ID, capacity grows to 4 as capacity doubles.
add_priorities(num_ids=1)
self.assertEqual(2 * 2, dist.capacity)
self.assertEqual(3, dist.size)
self.assertTrue(*dist.check_valid())
# Add 6 IDs, capacity grows to 4 + 5 as doubling is not sufficient.
add_priorities(num_ids=6)
self.assertEqual(4 + 5, dist.capacity)
self.assertEqual(9, dist.size)
self.assertTrue(*dist.check_valid())
def test_min_capacity_is_respected(self):
min_capacity = 3
dist = make_distribution(min_capacity=min_capacity)
self.assertEqual(min_capacity, dist.capacity)
self.assertEqual(0, dist.size)
def test_capacity_correct_after_increasing_capacity(self):
min_capacity = 4
dist = make_distribution(min_capacity=min_capacity)
self.assertEqual(min_capacity, dist.capacity)
self.assertEqual(0, dist.size)
new_capacity = 7
dist.ensure_capacity(new_capacity)
self.assertEqual(new_capacity, dist.capacity)
self.assertEqual(0, dist.size)
self.assertTrue(*dist.check_valid())
def test_increasing_capacity_beyond_max_capacity_raises_an_error(self):
dist = make_distribution(max_capacity=7)
dist.ensure_capacity(3)
with self.assertRaisesRegex(ValueError, 'cannot exceed max_capacity'):
dist.ensure_capacity(9)
def test_setting_capacity_lower_than_current_capacity_does_nothing(self):
min_capacity = 4
dist = make_distribution(min_capacity=min_capacity)
self.assertEqual(min_capacity, dist.capacity)
dist.ensure_capacity(2)
# Capacity should remain the same.
self.assertEqual(min_capacity, dist.capacity)
self.assertTrue(*dist.check_valid())
def test_changing_capacity_does_not_alter_existing_ids(self):
ids = [2, 3, 5]
priorities = [0.2, 0.3, 0.5]
dist = make_distribution(min_capacity=len(ids), priority_exponent=1.0)
dist.add_priorities(ids, priorities)
dist.ensure_capacity(10)
same_ids = sorted(dist.ids())
same_priorities = list(dist.get_exponentiated_priorities(same_ids))
self.assertSequenceEqual(ids, same_ids)
self.assertSequenceEqual(priorities, same_priorities)
self.assertTrue(*dist.check_valid())
def test_new_size_greater_than_2x_capacity_with_max_capacity_set(self):
ids = [2, 3, 5]
priorities = [0.2, 0.3, 0.5]
dist = make_distribution(min_capacity=len(ids), max_capacity=100)
dist.add_priorities(ids, priorities)
# Add more IDs and priorities, beyond 2x current capacity.
dist.add_priorities([6, 7, 8, 9], [0.6, 0.7, 0.8, 0.9])
self.assertTrue(*dist.check_valid())
self.assertEqual(7, dist.capacity)
def test_get_state_and_set_state(self):
ids = [2, 3, 5]
priorities = [0.2, 0.3, 0.5]
dist = make_distribution(priority_exponent=1.0, min_capacity=9)
dist.add_priorities(ids, priorities)
self.assertTrue(*dist.check_valid())
state = copy.deepcopy(dist.get_state())
new_dist = make_distribution(priority_exponent=1.0, min_capacity=9)
new_dist.set_state(state)
self.assertTrue(*new_dist.check_valid())
self.assertEqual(new_dist.size, dist.size)
new_state = new_dist.get_state()
chex.assert_trees_all_close(state, new_state)
new_priorities = new_dist.get_exponentiated_priorities(ids)
# Equal to raw priorities since priority exponent is 1.
np.testing.assert_array_equal(new_priorities, priorities)
def test_priorities_can_be_set_again(self):
priority_exponent = 0.45
dist = make_distribution(priority_exponent=priority_exponent)
ids = [2, 3, 5]
priorities = [0.2, 0.3, 0.5]
dist.add_priorities(ids, priorities)
orig_priorities = dist.get_exponentiated_priorities(ids)
dist.update_priorities([3], [1.3])
new_priorities = dist.get_exponentiated_priorities(ids)
self.assertNotAlmostEqual(orig_priorities[1], new_priorities[1])
self.assertAlmostEqual(1.3**priority_exponent, new_priorities[1])
def test_add_priorities_with_empty_args(self):
dist = make_distribution()
dist.add_priorities([], [])
self.assertTrue(*dist.check_valid())
def test_priorities_can_be_removed(self):
dist = make_distribution()
ids = [2, 3, 5, 7]
priorities = [0.2, 0.3, 0.5, 0.7]
dist.add_priorities(ids, priorities)
self.assertEqual(4, dist.size)
dist.remove_priorities([3, 7])
self.assertEqual(2, dist.size)
self.assertTrue(*dist.check_valid())
def test_remove_priorities_with_empty_args(self):
dist = make_distribution()
ids = [2, 3, 5]
priorities = [0.2, 0.3, 0.5]
dist.add_priorities(ids, priorities)
dist.remove_priorities([])
self.assertTrue(*dist.check_valid())
def test_update_priorities_with_empty_args(self):
dist = make_distribution()
ids = [2, 3, 5]
priorities = [0.2, 0.3, 0.5]
dist.add_priorities(ids, priorities)
dist.update_priorities([], [])
self.assertTrue(*dist.check_valid())
def test_all_zero_priorities_results_in_uniform_sampling(self):
dist = make_distribution()
dist.add_priorities(ids=[2, 3, 5], priorities=[0.0, 0.0, 0.0])
for _ in range(10):
unused_ids, probabilities = dist.sample(size=2)
np.testing.assert_allclose(probabilities, 1.0 / 3.0)
def test_sample_distribution(self):
priority_exponent = 0.8
uniform_sample_probability = 0.1
dist = make_distribution(
priority_exponent=priority_exponent,
uniform_sample_probability=uniform_sample_probability,
)
# Set priorities, update one.
ids = [2, 3, 5]
initial_priorities = np.array([1.0, 0.0, 3.0], dtype=np.float64)
dist.add_priorities(ids=ids, priorities=initial_priorities)
final_priorities = np.array([1.0, 4.0, 3.0], dtype=np.float64)
dist.update_priorities([ids[1]], [final_priorities[1]])
usp = uniform_sample_probability
expected_raw_sample_dist = final_priorities**priority_exponent
expected_raw_sample_dist /= expected_raw_sample_dist.sum()
expected_sample_dist = (1 - usp) * expected_raw_sample_dist + usp * 1 / len(
final_priorities
)
sampled_ids, counts = sample_distribution_bin_count(
dist, num=50_000, sample_size=2
)
self.assertEqual(ids, sampled_ids)
sample_dist = counts / counts.sum()
np.testing.assert_allclose(sample_dist, expected_sample_dist, rtol=1e-2)
def test_update_priorities_raises_an_error_if_id_not_present(self):
dist = make_distribution()
dist.add_priorities(ids=[2, 3, 5], priorities=[1.0, 2.0, 3.0])
with self.assertRaises(IndexError):
dist.update_priorities(ids=[4], priorities=[0.0])
with self.assertRaises(IndexError):
dist.update_priorities(ids=[1], priorities=[1.0])
with self.assertRaises(IndexError):
dist.update_priorities(ids=[0], priorities=[2.0])
def test_priorities_can_be_updated(self):
dist = make_distribution(priority_exponent=1.0)
ids = [2, 3, 5]
dist.add_priorities(ids=ids, priorities=[1.0, 2.0, 3.0])
dist.update_priorities(ids=[3, 5], priorities=[4.0, 6.0])
updated_priorities = dist.get_exponentiated_priorities(ids)
np.testing.assert_allclose(updated_priorities, [1, 4, 6])
def test_removing_ids_results_in_only_remaining_ids_being_sampled(self):
usp = 0.1
dist = make_distribution(
priority_exponent=1.0, uniform_sample_probability=usp
)
ids = [2, 3, 5]
dist.add_priorities(ids=ids, priorities=[1.0, 100.0, 9.0])
dist.remove_priorities([3])
ids, probabilities = dist.sample(1000)
unique_probs = list(sorted(set(probabilities)))
self.assertSetEqual({2, 5}, set(ids))
self.assertLen(unique_probs, 2)
self.assertAlmostEqual(usp * 0.5 + (1 - usp) * 0.1, unique_probs[0])
self.assertAlmostEqual(usp * 0.5 + (1 - usp) * 0.9, unique_probs[1])
def test_removing_last_id_is_valid(self):
# This tests internal logic for ID removal where the final ID is "special".
dist = make_distribution()
ids = [2, 3, 5]
dist.add_priorities(ids=ids, priorities=[1.0, 100.0, 9.0])
dist.remove_priorities([5])
self.assertTrue(*dist.check_valid())
class PrioritizedTransitionReplayTest(absltest.TestCase):
def test_empty_replay_properties_are_correct(self):
capacity = 7
replay = make_replay(capacity=capacity)
self.assertEqual(0, replay.size)
self.assertEqual(capacity, replay.capacity)
def test_add(self):
replay = make_replay()
add(replay, [10])
add(replay, [11])
self.assertListEqual([10], get(replay, [0]))
self.assertListEqual([11], get(replay, [1]))
def test_only_latest_elements_are_kept(self):
capacity = 5
replay = make_replay(capacity=capacity)
num_items = 7
assert num_items > capacity
add(replay, list(range(num_items)))
self.assertTrue(*replay.check_valid())
values = get(replay, list(range(num_items - capacity, num_items)))
expected_values = list(range(num_items - capacity, num_items))
self.assertCountEqual(expected_values, values)
def test_sample_returns_batch(self):
replay = make_replay()
add(replay, [1, 2, 3])
sample_size = 2
samples, unused_ids, unused_weights = replay.sample(sample_size)
chex.assert_shape(samples.value, (sample_size,))
def test_get_state_and_set_state(self):
orig_replay = make_replay(priority_exponent=1.0)
add(orig_replay, values=[11, 22, 33], priorities=[1.0, 2.0, 3.0])
state = orig_replay.get_state()
new_replay = make_replay()
new_replay.set_state(state)
self.assertEqual(orig_replay.size, new_replay.size)
def test_sample_distribution(self):
priority_exponent = 0.8
uniform_sample_probability = 0.1
replay = make_replay(
capacity=3,
priority_exponent=priority_exponent,
uniform_sample_probability=uniform_sample_probability,
seed=1,
)
priorities = np.array([3.0, 2.0, 0.0, 4.0], dtype=np.float64)
add(
replay, values=list(range(len(priorities))), priorities=list(priorities)
)
pe, usp = priority_exponent, uniform_sample_probability
expected_dist = np.zeros_like(priorities)
active_priorities = priorities[-replay.size :].copy()
exp_priorities = active_priorities**pe
prioritized_probs = exp_priorities / exp_priorities.sum()
uniform_prob = 1.0 / replay.size
expected_dist[-replay.size :] = (
1.0 - usp
) * prioritized_probs + usp * uniform_prob
counts = sample_replay_bin_count(replay, num=10000, sample_size=2)
dist = counts / counts.sum()
np.testing.assert_allclose(dist, expected_dist, rtol=0.1)
class SumTreeTest(parameterized.TestCase):
def test_can_create_empty(self):
sum_tree = replay_lib.SumTree()
self.assertTrue(*sum_tree.check_valid())
self.assertEqual(0, sum_tree.size)
self.assertTrue(np.isnan(sum_tree.root()))
def test_size_is_correct(self):
sum_tree = replay_lib.SumTree()
self.assertEqual(0, sum_tree.size)
size = 3
sum_tree.resize(size)
self.assertEqual(size, sum_tree.size)
def test_resize_returns_zero_values_initially(self):
sum_tree = replay_lib.SumTree()
size = 3
sum_tree.resize(size)
for i in range(size):
self.assertEqual(0, sum_tree.get([i]))
def test_resize_to_1(self):
sum_tree = replay_lib.SumTree()
sum_tree.resize(1)
self.assertTrue(*sum_tree.check_valid())
self.assertEqual(0, sum_tree.root())
def test_resize_to_0(self):
sum_tree = replay_lib.SumTree()
sum_tree.resize(0)
self.assertTrue(*sum_tree.check_valid())
self.assertTrue(np.isnan(sum_tree.root()))
def test_set_all(self):
sum_tree = replay_lib.SumTree()
values = [4.0, 5.0, 3.0]
sum_tree.set_all(values)
self.assertLen(values, sum_tree.size)
for i in range(len(values)):
np.testing.assert_array_almost_equal([values[i]], sum_tree.get([i]))
self.assertTrue(*sum_tree.check_valid())
def test_capacity_greater_or_equal_to_size_and_power_of_2(self):
sum_tree = replay_lib.SumTree()
sum_tree.set_all([4.0, 5.0, 3.0, 2.0])
self.assertEqual(4, sum_tree.capacity)
sum_tree = replay_lib.SumTree()
sum_tree.set_all([4.0, 5.0, 3.0, 2.0, 9])
self.assertEqual(8, sum_tree.capacity)
def test_values_returns_values(self):
sum_tree = replay_lib.SumTree()
values = [4.0, 5.0, 3.0]
sum_tree.set_all(values)
np.testing.assert_allclose(values, sum_tree.values)
def test_resize_preserves_values_and_zeros_the_rest_when_growing(self):
sum_tree = replay_lib.SumTree()
values = [4.0, 5.0, 3.0]
sum_tree.set_all(values)
new_size = len(values) + 5
sum_tree.resize(new_size)
for i in range(len(values)):
np.testing.assert_array_almost_equal([values[i]], sum_tree.get([i]))
for i in range(len(values), new_size):
np.testing.assert_array_almost_equal([0.0], sum_tree.get([i]))
self.assertTrue(*sum_tree.check_valid())
def test_resizes_preserves_values_when_shrinking(self):
sum_tree = replay_lib.SumTree()
values = [4.0, 5.0, 3.0, 8.0, 2.0]
sum_tree.set_all(values)
new_size = len(values) - 2
sum_tree.resize(new_size)
for i in range(new_size):
np.testing.assert_array_almost_equal([values[i]], sum_tree.get([i]))
self.assertTrue(*sum_tree.check_valid())
def test_resizing_to_size_between_current_size_and_capacity(self):
sum_tree = replay_lib.SumTree()
values = [4.0, 5.0, 3.0, 8.0, 2.0]
sum_tree.set_all(values)
new_size = 7
assert sum_tree.size < new_size < sum_tree.capacity
sum_tree.resize(new_size)
np.testing.assert_allclose(values + [0.0, 0.0], sum_tree.values)
self.assertTrue(*sum_tree.check_valid())
def test_exception_raised_when_index_out_of_bounds_in_get(self):
sum_tree = replay_lib.SumTree()
size = 3
sum_tree.resize(size)
for i in [-1, size]:
with self.assertRaises(IndexError):
sum_tree.get([i])
def test_get_with_multiple_indexes(self):
sum_tree = replay_lib.SumTree()
values = [4.0, 5.0, 3.0, 9.0]
sum_tree.set_all(values)
selected = sum_tree.get([1, 3])
np.testing.assert_allclose([values[1], values[3]], selected)
def test_set_single(self):
sum_tree = replay_lib.SumTree()
values = [4, 5, 3, 9]
sum_tree.set_all(values)
sum_tree.set([2], [99])
np.testing.assert_allclose([4, 5, 99, 9], sum_tree.values)
def test_set_multiple(self):
sum_tree = replay_lib.SumTree()
values = [4, 5, 3, 9]
sum_tree.set_all(values)
sum_tree.set([2, 0], [99, 88])
np.testing.assert_allclose([88, 5, 99, 9], sum_tree.values)
@parameterized.parameters(
(0, 0.0),
(0, 3.0 - 0.1),
(1, 3.0),
(1, 4.0 - 0.1),
(2, 4.0),
(2, 6.0 - 0.1),
(3, 6.0),
(3, 11.0 - 0.1),
)
def test_query_typical(self, expected_index, target):
sum_tree = replay_lib.SumTree()
values = [3.0, 1.0, 2.0, 5.0]
sum_tree.set_all(values)
self.assertEqual([expected_index], sum_tree.query([target]))
def test_query_raises_exception_if_target_out_of_range(self):
sum_tree = replay_lib.SumTree()
values = [3.0, 1.0, 2.0, 5.0]
sum_tree.set_all(values)
with self.assertRaises(ValueError):
sum_tree.query([-1.0])
with self.assertRaises(ValueError):
sum_tree.query([sum(values)])
with self.assertRaises(ValueError):
sum_tree.query([sum(values) + 1.0])
with self.assertRaises(ValueError):
sum_tree.query([sum_tree.root()])
def test_query_multiple(self):
sum_tree = replay_lib.SumTree()
values = [3.0, 1.0, 2.0, 5.0]
sum_tree.set_all(values)
np.testing.assert_array_equal([0, 1, 2], sum_tree.query([2.9, 3.0, 4]))
@parameterized.parameters(
(t,)
for t in [0, 0.1, 0.9, 1, 1.1, 3.9, 4, 4.1, 5.9, 6, 6.1, 8.9, 8.999999]
)
def test_query_never_returns_an_index_with_zero_index(self, target):
sum_tree = replay_lib.SumTree()
values = np.array([0, 1, 0, 0, 3, 0, 2, 0, 3, 0], dtype=np.float64)
zero_indices = (values == 0).nonzero()[0]
sum_tree.set_all(values)
self.assertNotIn(sum_tree.query([target])[0], zero_indices)
def test_root_returns_sum(self):
sum_tree = replay_lib.SumTree()
values = [3.0, 1.0, 2.0, 5.0]
sum_tree.set_all(values)
self.assertAlmostEqual(sum(values), sum_tree.root())
def test_set_cannot_add_negative_nan_or_inf_values(self):
sum_tree = replay_lib.SumTree()
sum_tree.set_all([0, 1, 2])
with self.assertRaises(ValueError):
sum_tree.set([1], [-1])
with self.assertRaises(ValueError):
sum_tree.set([1], [np.nan])
with self.assertRaises(ValueError):
sum_tree.set([1], [np.inf])
def test_set_all_cannot_add_negative_nan_or_inf_values(self):
with self.assertRaises(ValueError):
replay_lib.SumTree().set_all([1, -1])
with self.assertRaises(ValueError):
replay_lib.SumTree().set_all([1, np.nan])
with self.assertRaises(ValueError):
replay_lib.SumTree().set_all([1, np.inf])
def test_set_updates_total_sum(self):
sum_tree = replay_lib.SumTree()
values = [4, 5, 3, 9]
sum_tree.set_all(values)
sum_tree.set([1], [2])
self.assertAlmostEqual(sum(values) - 5 + 2, sum_tree.root())
self.assertTrue(*sum_tree.check_valid())
def test_get_state_and_set_state(self):
sum_tree = replay_lib.SumTree()
values = [4, 5, 3, 9]
sum_tree.set_all(values)
self.assertTrue(*sum_tree.check_valid())
state = copy.deepcopy(sum_tree.get_state())
new_sum_tree = replay_lib.SumTree()
new_sum_tree.set_state(state)
self.assertTrue(*new_sum_tree.check_valid())
self.assertEqual(sum_tree.size, new_sum_tree.size)
new_state = new_sum_tree.get_state()
chex.assert_trees_all_close(state, new_state)
np.testing.assert_allclose(new_sum_tree.values, sum_tree.values)
self.assertEqual(sum_tree.capacity, new_sum_tree.capacity)
class NaiveSumTree:
"""Same as `SumTree`, but less efficient with a simpler implementation."""
def __init__(self):
self._values = np.zeros(0, np.float64)
def resize(self, size: int) -> None:
"""Resizes tree, truncating or expanding with zeros as needed."""
# Usually there shouldn't be references to self._values, but to prevent
# certain Coverage test errors, we pass refcheck=False.
self._values.resize(size, refcheck=False)
def get(self, indices: Sequence[int]) -> Sequence[float]:
"""Gets values corresponding to given indices."""
indices = np.asarray(indices)
if not ((0 <= indices) & (indices < self.size)).all():
raise IndexError('Index out range expect 0 <= index < %s' % self.size)
return self._values[indices]
def set(self, indices: Sequence[int], values: Sequence[float]):
"""Sets values at the given indices."""
values = np.asarray(values)
if not np.isfinite(values).all() or (values < 0.0).any():
raise ValueError('value must be finite positive numbers.')
self._values[indices] = values
def set_all(self, values: Sequence[float]) -> None:
"""Sets many values all at once, also setting size of the sum tree."""
values = np.asarray(values)
if not np.isfinite(values).all() or (values < 0.0).any():
raise ValueError('Values must be finite positive numbers.')
self._values = values
def query(self, targets: Sequence[float]) -> Sequence[int]:
"""Finds smallest index such that `target <` cumulative sum up to index."""
return [self._query_single(t) for t in targets]
def _query_single(self, target: float) -> int:
"""Queries a single target, see `SumTree.query` for more documentation."""
if not 0.0 <= target < self.root():
raise ValueError('Require 0 <= target < total sum.')
acc_sum = 0.0
for i in range(self.size):
acc_sum += self.values[i]
if target < acc_sum:
return i
raise RuntimeError('Should not reach here as target < total sum.')
def root(self) -> float:
return self._values.sum() if self.size > 0 else np.nan
@property
def values(self) -> np.ndarray:
return self._values
@property
def size(self) -> int:
return len(self._values)
@property
def capacity(self) -> int:
return len(self._values)
def get_state(self) -> Mapping[Text, Any]:
return {
'values': self._values,
}
def set_state(self, state: Mapping[Text, Any]) -> None:
self._values = state['values']
def random_operations(sum_tree, seed):
random_state = np.random.RandomState(seed)
random_values = lambda m: np.abs(random_state.standard_cauchy(m))
random_indices = lambda m: random_state.randint(sum_tree.size, size=m)
random_targets = lambda m: random_state.uniform(0, sum_tree.root(), size=m)
random_size = lambda: random_state.randint(10, 40)
for _ in range(20):
sum_tree.resize(random_size())
yield
sum_tree.set(random_indices(3), random_values(3))
yield
yield sum_tree.query(random_targets(4))
sum_tree.set_all(random_values(random_size()))
sum_tree.set(random_indices(4), random_values(4))
yield sum_tree.query(random_targets(3))
sum_tree.set_state(sum_tree.get_state())
yield
class NaiveSumTreeEquivalenceTest(parameterized.TestCase):
"""Tests equivalence with naive implementation.
Has better coverage but harder to debug failures.
"""
@parameterized.parameters([(i,) for i in list(range(10))])
def test_with_random_data(self, seed):
actual_sum_tree = replay_lib.SumTree()
naive_sum_tree = NaiveSumTree()
# Randomly perform operations, periodically stopping to compare.
operation_iterator = zip(
random_operations(actual_sum_tree, seed),
random_operations(naive_sum_tree, seed),
)
for actual_value, naive_value in operation_iterator:
if actual_value is not None and naive_value is not None:
np.testing.assert_allclose(actual_value, naive_value)
self.assertTrue(*actual_sum_tree.check_valid())
self.assertAlmostEqual(naive_sum_tree.root(), actual_sum_tree.root())
np.testing.assert_allclose(naive_sum_tree.values, actual_sum_tree.values)
if __name__ == '__main__':
absltest.main()
| dqn_zoo-master | dqn_zoo/replay_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DQN agent network components and implementation."""
# pylint: disable=g-bad-import-order
import typing
from typing import Any, Callable, Tuple, Union
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
Network = hk.Transformed
Params = hk.Params
NetworkFn = Callable[..., Any]
class QNetworkOutputs(typing.NamedTuple):
q_values: jnp.ndarray
class IqnInputs(typing.NamedTuple):
state: jnp.ndarray
taus: jnp.ndarray
class IqnOutputs(typing.NamedTuple):
q_values: jnp.ndarray
q_dist: jnp.ndarray
class QRNetworkOutputs(typing.NamedTuple):
q_values: jnp.ndarray
q_dist: jnp.ndarray
class C51NetworkOutputs(typing.NamedTuple):
q_values: jnp.ndarray
q_logits: jnp.ndarray
def _dqn_default_initializer(
num_input_units: int,
) -> hk.initializers.Initializer:
"""Default initialization scheme inherited from past implementations of DQN.
This scheme was historically used to initialize all weights and biases
in convolutional and linear layers of DQN-type agents' networks.
It initializes each weight as an independent uniform sample from [`-c`, `c`],
where `c = 1 / np.sqrt(num_input_units)`, and `num_input_units` is the number
of input units affecting a single output unit in the given layer, i.e. the
total number of inputs in the case of linear (dense) layers, and
`num_input_channels * kernel_width * kernel_height` in the case of
convolutional layers.
Args:
num_input_units: number of input units to a single output unit of the layer.
Returns:
Haiku weight initializer.
"""
max_val = np.sqrt(1 / num_input_units)
return hk.initializers.RandomUniform(-max_val, max_val)
def conv(
num_features: int,
kernel_shape: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]],
) -> NetworkFn:
"""Convolutional layer with DQN's legacy weight initialization scheme."""
def net_fn(inputs):
"""Function representing conv layer with DQN's legacy initialization."""
num_input_units = inputs.shape[-1] * kernel_shape[0] * kernel_shape[1]
initializer = _dqn_default_initializer(num_input_units)
layer = hk.Conv2D(
num_features,
kernel_shape=kernel_shape,
stride=stride,
w_init=initializer,
b_init=initializer,
padding='VALID',
)
return layer(inputs)
return net_fn
def linear(num_outputs: int, with_bias=True) -> NetworkFn:
"""Linear layer with DQN's legacy weight initialization scheme."""
def net_fn(inputs):
"""Function representing linear layer with DQN's legacy initialization."""
initializer = _dqn_default_initializer(inputs.shape[-1])
layer = hk.Linear(
num_outputs, with_bias=with_bias, w_init=initializer, b_init=initializer
)
return layer(inputs)
return net_fn
def linear_with_shared_bias(num_outputs: int) -> NetworkFn:
"""Linear layer with single shared bias instead of one bias per output."""
def layer_fn(inputs):
"""Function representing a linear layer with single shared bias."""
initializer = _dqn_default_initializer(inputs.shape[-1])
bias_free_linear = hk.Linear(
num_outputs, with_bias=False, w_init=initializer
)
linear_output = bias_free_linear(inputs)
bias = hk.get_parameter('b', [1], inputs.dtype, init=initializer)
bias = jnp.broadcast_to(bias, linear_output.shape)
return linear_output + bias
return layer_fn
def noisy_linear(
num_outputs: int, weight_init_stddev: float, with_bias: bool = True
) -> NetworkFn:
"""Linear layer with weight randomization http://arxiv.org/abs/1706.10295."""
def make_noise_sqrt(rng, shape):
noise = jax.random.truncated_normal(rng, lower=-2.0, upper=2.0, shape=shape)
return jax.lax.stop_gradient(jnp.sign(noise) * jnp.sqrt(jnp.abs(noise)))
def net_fn(inputs):
"""Function representing a linear layer with learned noise distribution."""
num_inputs = inputs.shape[-1]
mu_initializer = _dqn_default_initializer(num_inputs)
mu_layer = hk.Linear(
num_outputs,
name='mu',
with_bias=with_bias,
w_init=mu_initializer,
b_init=mu_initializer,
)
sigma_initializer = hk.initializers.Constant( #
weight_init_stddev / jnp.sqrt(num_inputs)
)
sigma_layer = hk.Linear(
num_outputs,
name='sigma',
with_bias=True,
w_init=sigma_initializer,
b_init=sigma_initializer,
)
# Broadcast noise over batch dimension.
input_noise_sqrt = make_noise_sqrt(hk.next_rng_key(), [1, num_inputs])
output_noise_sqrt = make_noise_sqrt(hk.next_rng_key(), [1, num_outputs])
# Factorized Gaussian noise.
mu = mu_layer(inputs)
noisy_inputs = input_noise_sqrt * inputs
sigma = sigma_layer(noisy_inputs) * output_noise_sqrt
return mu + sigma
return net_fn
def dqn_torso() -> NetworkFn:
"""DQN convolutional torso.
Includes scaling from [`0`, `255`] (`uint8`) to [`0`, `1`] (`float32`)`.
Returns:
Network function that `haiku.transform` can be called on.
"""
def net_fn(inputs):
"""Function representing convolutional torso for a DQN Q-network."""
network = hk.Sequential([
lambda x: x.astype(jnp.float32) / 255.0,
conv(32, kernel_shape=(8, 8), stride=(4, 4)),
jax.nn.relu,
conv(64, kernel_shape=(4, 4), stride=(2, 2)),
jax.nn.relu,
conv(64, kernel_shape=(3, 3), stride=(1, 1)),
jax.nn.relu,
hk.Flatten(),
])
return network(inputs)
return net_fn
def dqn_value_head(num_actions: int, shared_bias: bool = False) -> NetworkFn:
"""Regular DQN Q-value head with single hidden layer."""
last_layer = linear_with_shared_bias if shared_bias else linear
def net_fn(inputs):
"""Function representing value head for a DQN Q-network."""
network = hk.Sequential([
linear(512),
jax.nn.relu,
last_layer(num_actions),
])
return network(inputs)
return net_fn
def rainbow_atari_network(
num_actions: int,
support: jnp.ndarray,
noisy_weight_init: float,
) -> NetworkFn:
"""Rainbow network, expects `uint8` input."""
chex.assert_rank(support, 1)
num_atoms = len(support)
support = support[None, None, :]
def net_fn(inputs):
"""Function representing Rainbow Q-network."""
inputs = dqn_torso()(inputs)
# Advantage head.
advantage = noisy_linear(512, noisy_weight_init, with_bias=True)(inputs)
advantage = jax.nn.relu(advantage)
advantage = noisy_linear(
num_actions * num_atoms, noisy_weight_init, with_bias=False
)(advantage)
advantage = jnp.reshape(advantage, (-1, num_actions, num_atoms))
# Value head.
value = noisy_linear(512, noisy_weight_init, with_bias=True)(inputs)
value = jax.nn.relu(value)
value = noisy_linear(num_atoms, noisy_weight_init, with_bias=False)(value)
value = jnp.reshape(value, (-1, 1, num_atoms))
# Q-distribution and values.
q_logits = value + advantage - jnp.mean(advantage, axis=-2, keepdims=True)
assert q_logits.shape[1:] == (num_actions, num_atoms)
q_dist = jax.nn.softmax(q_logits)
q_values = jnp.sum(q_dist * support, axis=2)
q_values = jax.lax.stop_gradient(q_values)
return C51NetworkOutputs(q_logits=q_logits, q_values=q_values)
return net_fn
def iqn_atari_network(num_actions: int, latent_dim: int) -> NetworkFn:
"""IQN network, expects `uint8` input."""
def net_fn(iqn_inputs):
"""Function representing IQN-DQN Q-network."""
state = iqn_inputs.state # batch x state_shape
taus = iqn_inputs.taus # batch x samples
# Apply DQN convnet to embed state.
state_embedding = dqn_torso()(state)
state_dim = state_embedding.shape[-1]
# Embed taus with cosine embedding + linear layer.
# cos(pi * i * tau) for i = 1,...,latents for each batch_element x sample.
# Broadcast everything to batch x samples x latent_dim.
pi_multiples = jnp.arange(1, latent_dim + 1, dtype=jnp.float32) * jnp.pi
tau_embedding = jnp.cos(pi_multiples[None, None, :] * taus[:, :, None])
# Map tau embedding onto state_dim via linear layer.
embedding_layer = linear(state_dim)
tau_embedding = hk.BatchApply(embedding_layer)(tau_embedding)
tau_embedding = jax.nn.relu(tau_embedding)
# Reshape/broadcast both embeddings to batch x num_samples x state_dim
# and multiply together, before applying value head.
head_input = tau_embedding * state_embedding[:, None, :]
value_head = dqn_value_head(num_actions)
q_dist = hk.BatchApply(value_head)(head_input)
q_values = jnp.mean(q_dist, axis=1)
q_values = jax.lax.stop_gradient(q_values)
return IqnOutputs(q_dist=q_dist, q_values=q_values)
return net_fn
def qr_atari_network(num_actions: int, quantiles: jnp.ndarray) -> NetworkFn:
"""QR-DQN network, expects `uint8` input."""
chex.assert_rank(quantiles, 1)
num_quantiles = len(quantiles)
def net_fn(inputs):
"""Function representing QR-DQN Q-network."""
network = hk.Sequential([
dqn_torso(),
dqn_value_head(num_quantiles * num_actions),
])
network_output = network(inputs)
q_dist = jnp.reshape(network_output, (-1, num_quantiles, num_actions))
q_values = jnp.mean(q_dist, axis=1)
q_values = jax.lax.stop_gradient(q_values)
return QRNetworkOutputs(q_dist=q_dist, q_values=q_values)
return net_fn
def c51_atari_network(num_actions: int, support: jnp.ndarray) -> NetworkFn:
"""C51 network, expects `uint8` input."""
chex.assert_rank(support, 1)
num_atoms = len(support)
def net_fn(inputs):
"""Function representing C51 Q-network."""
network = hk.Sequential([
dqn_torso(),
dqn_value_head(num_actions * num_atoms),
])
network_output = network(inputs)
q_logits = jnp.reshape(network_output, (-1, num_actions, num_atoms))
q_dist = jax.nn.softmax(q_logits)
q_values = jnp.sum(q_dist * support[None, None, :], axis=2)
q_values = jax.lax.stop_gradient(q_values)
return C51NetworkOutputs(q_logits=q_logits, q_values=q_values)
return net_fn
def double_dqn_atari_network(num_actions: int) -> NetworkFn:
"""DQN network with shared bias in final layer, expects `uint8` input."""
def net_fn(inputs):
"""Function representing DQN Q-network with shared bias output layer."""
network = hk.Sequential([
dqn_torso(),
dqn_value_head(num_actions, shared_bias=True),
])
return QNetworkOutputs(q_values=network(inputs))
return net_fn
def dqn_atari_network(num_actions: int) -> NetworkFn:
"""DQN network, expects `uint8` input."""
def net_fn(inputs):
"""Function representing DQN Q-network."""
network = hk.Sequential([
dqn_torso(),
dqn_value_head(num_actions),
])
return QNetworkOutputs(q_values=network(inputs))
return net_fn
| dqn_zoo-master | dqn_zoo/networks.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Replay components for DQN-type agents."""
# pylint: disable=g-bad-import-order
import collections
import typing
from typing import Any, Callable, Generic, Iterable, Mapping, Optional, Sequence, Text, Tuple, TypeVar
import dm_env
import numpy as np
import snappy
from dqn_zoo import parts
CompressedArray = Tuple[bytes, Tuple, np.dtype]
# Generic replay structure: Any flat named tuple.
ReplayStructure = TypeVar('ReplayStructure', bound=Tuple[Any, ...])
class Transition(typing.NamedTuple):
s_tm1: Optional[np.ndarray]
a_tm1: Optional[parts.Action]
r_t: Optional[float]
discount_t: Optional[float]
s_t: Optional[np.ndarray]
class UniformDistribution:
"""Provides uniform sampling of user-defined integer IDs."""
def __init__(self, random_state: np.random.RandomState):
self._random_state = random_state
self._ids = [] # IDs in a contiguous indexable format for sampling.
self._id_to_index = {} # User ID -> index into self._ids.
def add(self, ids: Sequence[int]) -> None:
"""Adds new IDs."""
for i in ids:
if i in self._id_to_index:
raise IndexError('Cannot add ID %d, it already exists.' % i)
for i in ids:
idx = len(self._ids)
self._id_to_index[i] = idx
self._ids.append(i)
def remove(self, ids: Sequence[int]) -> None:
"""Removes existing IDs."""
for i in ids:
if i not in self._id_to_index:
raise IndexError('Cannot remove ID %d, it does not exist.' % i)
for i in ids:
idx = self._id_to_index[i]
# Swap ID to be removed with ID at the end of self._ids.
self._ids[idx], self._ids[-1] = self._ids[-1], self._ids[idx]
self._id_to_index[self._ids[idx]] = idx # Update index for swapped ID.
self._id_to_index.pop(self._ids.pop()) # Remove ID from data structures.
def sample(self, size: int) -> np.ndarray:
"""Returns sample of IDs, uniformly sampled."""
indices = self._random_state.randint(self.size, size=size)
ids = np.fromiter(
(self._ids[idx] for idx in indices), dtype=np.int64, count=len(indices)
)
return ids
def ids(self) -> Iterable[int]:
"""Returns an iterable of all current IDs."""
return self._id_to_index.keys()
@property
def size(self) -> int:
"""Number of IDs currently tracked."""
return len(self._ids)
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves distribution state as a dictionary (e.g. for serialization)."""
return {
'ids': self._ids,
'id_to_index': self._id_to_index,
}
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets distribution state from a (potentially de-serialized) dictionary."""
self._ids = state['ids']
self._id_to_index = state['id_to_index']
def check_valid(self) -> Tuple[bool, str]:
"""Checks internal consistency."""
if len(self._ids) != len(self._id_to_index):
return False, 'ids and id_to_index should be the same size.'
if len(self._ids) != len(set(self._ids)):
return False, 'IDs should be unique.'
if len(self._id_to_index.values()) != len(set(self._id_to_index.values())):
return False, 'Indices should be unique.'
for i in self._ids:
if self._ids[self._id_to_index[i]] != i:
return False, 'ID %d should map to itself.' % i
# Indices map to themselves because of previous check and uniqueness.
return True, ''
class TransitionReplay(Generic[ReplayStructure]):
"""Uniform replay, with LIFO storage for flat named tuples."""
def __init__(
self,
capacity: int,
structure: ReplayStructure,
random_state: np.random.RandomState,
encoder: Optional[Callable[[ReplayStructure], Any]] = None,
decoder: Optional[Callable[[Any], ReplayStructure]] = None,
):
self._capacity = capacity
self._structure = structure
self._random_state = random_state
self._encoder = encoder or (lambda s: s)
self._decoder = decoder or (lambda s: s)
self._distribution = UniformDistribution(random_state=random_state)
self._storage = collections.OrderedDict() # ID -> item.
self._t = 0 # Used to generate unique IDs for each item.
def add(self, item: ReplayStructure) -> None:
"""Adds single item to replay."""
if self.size == self._capacity:
oldest_id, _ = self._storage.popitem(last=False)
self._distribution.remove([oldest_id])
item_id = self._t
self._distribution.add([item_id])
self._storage[item_id] = self._encoder(item)
self._t += 1
def get(self, ids: Sequence[int]) -> Iterable[ReplayStructure]:
"""Retrieves items by IDs."""
for i in ids:
yield self._decoder(self._storage[i])
def sample(self, size: int) -> ReplayStructure:
"""Samples batch of items from replay uniformly, with replacement."""
ids = self._distribution.sample(size)
samples = self.get(ids)
transposed = zip(*samples)
stacked = [np.stack(xs, axis=0) for xs in transposed]
return type(self._structure)(*stacked) # pytype: disable=not-callable
def ids(self) -> Iterable[int]:
"""Get IDs of stored transitions, for testing."""
return self._storage.keys()
@property
def size(self) -> int:
"""Number of items currently contained in the replay."""
return len(self._storage)
@property
def capacity(self) -> int:
"""Total capacity of replay (max number of items stored at any one time)."""
return self._capacity
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves replay state as a dictionary (e.g. for serialization)."""
return {
# Serialize OrderedDict as a simpler, more common data structure.
'storage': list(self._storage.items()),
't': self._t,
'distribution': self._distribution.get_state(),
}
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets replay state from a (potentially de-serialized) dictionary."""
self._storage = collections.OrderedDict(state['storage'])
self._t = state['t']
self._distribution.set_state(state['distribution'])
def check_valid(self) -> Tuple[bool, str]:
"""Checks internal consistency."""
if self._t < len(self._storage):
return False, 't should be >= storage size.'
if set(self._storage.keys()) != set(self._distribution.ids()):
return False, 'IDs in storage and distribution do not match.'
return self._distribution.check_valid()
def _power(base, exponent) -> np.ndarray:
"""Same as usual power except `0 ** 0` is zero."""
# By default 0 ** 0 is 1 but we never want indices with priority zero to be
# sampled, even if the priority exponent is zero.
base = np.asarray(base)
return np.where(base == 0.0, 0.0, base**exponent)
def importance_sampling_weights(
probabilities: np.ndarray,
uniform_probability: float,
exponent: float,
normalize: bool,
) -> np.ndarray:
"""Calculates importance sampling weights from given sampling probabilities.
Args:
probabilities: Array of sampling probabilities for a subset of items. Since
this is a subset the probabilites will typically not sum to `1`.
uniform_probability: Probability of sampling an item if uniformly sampling.
exponent: Scalar that controls the amount of importance sampling correction
in the weights. Where `1` corrects fully and `0` is no correction
(resulting weights are all `1`).
normalize: Whether to scale all weights so that the maximum weight is `1`.
Can be enabled for stability since weights will only scale down.
Returns:
Importance sampling weights that can be used to scale the loss. These have
the same shape as `probabilities`.
"""
if not 0.0 <= exponent <= 1.0:
raise ValueError('Require 0 <= exponent <= 1.')
if not 0.0 <= uniform_probability <= 1.0:
raise ValueError('Expected 0 <= uniform_probability <= 1.')
weights = (uniform_probability / probabilities) ** exponent
if normalize:
weights /= np.max(weights)
if not np.isfinite(weights).all():
raise ValueError('Weights are not finite: %s.' % weights)
return weights
class SumTree:
"""A binary tree where non-leaf nodes are the sum of child nodes.
Leaf nodes contain non-negative floats and are set externally. Non-leaf nodes
are the sum of their children. This data structure allows O(log n) updates and
O(log n) queries of which index corresponds to a given sum. The main use
case is sampling from a multinomial distribution with many probabilities
which are updated a few at a time.
"""
def __init__(self):
"""Initializes an empty `SumTree`."""
# When there are n values, the storage array will have size 2 * n. The first
# n elements are non-leaf nodes (ignoring the very first element), with
# index 1 corresponding to the root node. The next n elements are leaf nodes
# that contain values. A non-leaf node with index i has children at
# locations 2 * i, 2 * i + 1.
self._size = 0
self._storage = np.zeros(0, dtype=np.float64)
self._first_leaf = 0 # Boundary between non-leaf and leaf nodes.
def resize(self, size: int) -> None:
"""Resizes tree, truncating or expanding with zeros as needed."""
self._initialize(size, values=None)
def get(self, indices: Sequence[int]) -> np.ndarray:
"""Gets values corresponding to given indices."""
indices = np.asarray(indices)
if not ((0 <= indices) & (indices < self.size)).all():
raise IndexError('index out of range, expect 0 <= index < %s' % self.size)
return self.values[indices]
def set(self, indices: Sequence[int], values: Sequence[float]) -> None:
"""Sets values at the given indices."""
values = np.asarray(values)
if not np.isfinite(values).all() or (values < 0.0).any():
raise ValueError('value must be finite and positive.')
self.values[indices] = values
storage = self._storage
for idx in np.asarray(indices) + self._first_leaf:
parent = idx // 2
while parent > 0:
# At this point the subtree with root parent is consistent.
storage[parent] = storage[2 * parent] + storage[2 * parent + 1]
parent //= 2
def set_all(self, values: Sequence[float]) -> None:
"""Sets many values all at once, also setting size of the sum tree."""
values = np.asarray(values)
if not np.isfinite(values).all() or (values < 0.0).any():
raise ValueError('Values must be finite positive numbers.')
self._initialize(len(values), values)
def query(self, targets: Sequence[float]) -> Sequence[int]:
"""Finds smallest indices where `target <` cumulative value sum up to index.
Args:
targets: The target sums.
Returns:
For each target, the smallest index such that target is strictly less than
the cumulative sum of values up to and including that index.
Raises:
ValueError: if `target >` sum of all values or `target < 0` for any
of the given targets.
"""
return [self._query_single(t) for t in targets]
def root(self) -> float:
"""Returns sum of values."""
return self._storage[1] if self.size > 0 else np.nan
@property
def values(self) -> np.ndarray:
"""View of array containing all (leaf) values in the sum tree."""
return self._storage[self._first_leaf : self._first_leaf + self.size]
@property
def size(self) -> int:
"""Number of (leaf) values in the sum tree."""
return self._size
@property
def capacity(self) -> int:
"""Current sum tree capacity (exceeding it will trigger resizing)."""
return self._first_leaf
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves sum tree state as a dictionary (e.g. for serialization)."""
return {
'size': self._size,
'storage': self._storage,
'first_leaf': self._first_leaf,
}
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets sum tree state from a (potentially de-serialized) dictionary."""
self._size = state['size']
self._storage = state['storage']
self._first_leaf = state['first_leaf']
def check_valid(self) -> Tuple[bool, str]:
"""Checks internal consistency."""
if len(self._storage) != 2 * self._first_leaf:
return False, 'first_leaf should be half the size of storage.'
if not 0 <= self.size <= self.capacity:
return False, 'Require 0 <= self.size <= self.capacity.'
if len(self.values) != self.size:
return False, 'Number of values should be equal to the size.'
storage = self._storage
for i in range(1, self._first_leaf):
if storage[i] != storage[2 * i] + storage[2 * i + 1]:
return False, 'Non-leaf node %d should be sum of child nodes.' % i
return True, ''
def _initialize(self, size: int, values: Optional[Sequence[float]]) -> None:
"""Resizes storage and sets new values if supplied."""
assert size >= 0
assert values is None or len(values) == size
if size < self.size: # Keep storage and values, zero out extra values.
if values is None:
new_values = self.values[:size] # Truncate existing values.
else:
new_values = values
self._size = size
self._set_values(new_values)
# self._first_leaf remains the same.
elif size <= self.capacity: # Reuse same storage, but size increases.
self._size = size
if values is not None:
self._set_values(values)
# self._first_leaf remains the same.
# New activated leaf nodes are already zero and sum nodes already correct.
else: # Allocate new storage.
new_capacity = 1
while new_capacity < size:
new_capacity *= 2
new_storage = np.empty((2 * new_capacity,), dtype=np.float64)
if values is None:
new_values = self.values
else:
new_values = values
self._storage = new_storage
self._first_leaf = new_capacity
self._size = size
self._set_values(new_values)
def _set_values(self, values: Sequence[float]) -> None:
"""Sets values assuming storage has enough capacity and update sums."""
# Note every part of the storage is set here.
assert len(values) <= self.capacity
storage = self._storage
storage[self._first_leaf : self._first_leaf + len(values)] = values
storage[self._first_leaf + len(values) :] = 0
for i in range(self._first_leaf - 1, 0, -1):
storage[i] = storage[2 * i] + storage[2 * i + 1]
storage[0] = 0.0 # Unused.
def _query_single(self, target: float) -> int:
"""Queries a single target, see query for more detailed documentation."""
if not 0.0 <= target < self.root():
raise ValueError('Require 0 <= target < total sum.')
storage = self._storage
idx = 1 # Root node.
while idx < self._first_leaf:
# At this point we always have target < storage[idx].
assert target < storage[idx]
left_idx = 2 * idx
right_idx = left_idx + 1
left_sum = storage[left_idx]
if target < left_sum:
idx = left_idx
else:
idx = right_idx
target -= left_sum
assert idx < 2 * self.capacity
return idx - self._first_leaf
class PrioritizedDistribution:
"""Distribution for weighted sampling of user-defined integer IDs."""
def __init__(
self,
priority_exponent: float,
uniform_sample_probability: float,
random_state: np.random.RandomState,
min_capacity: int = 0,
max_capacity: Optional[int] = None,
):
if priority_exponent < 0.0:
raise ValueError('Require priority_exponent >= 0.')
self._priority_exponent = priority_exponent
if not 0.0 <= uniform_sample_probability <= 1.0:
raise ValueError('Require 0 <= uniform_sample_probability <= 1.')
if max_capacity is not None and max_capacity < min_capacity:
raise ValueError('Require max_capacity >= min_capacity.')
if min_capacity < 0:
raise ValueError('Require min_capacity >= 0.')
self._uniform_sample_probability = uniform_sample_probability
self._max_capacity = max_capacity
self._sum_tree = SumTree()
self._sum_tree.resize(min_capacity)
self._random_state = random_state
self._id_to_index = {} # User ID -> sum tree index.
self._index_to_id = {} # Sum tree index -> user ID.
# Unused sum tree indices that can be allocated to new user IDs.
self._inactive_indices = list(range(min_capacity))
# Currently used sum tree indices, needed for uniform sampling.
self._active_indices = []
# Maps an active index to its location in active_indices_, for removal.
self._active_indices_location = {}
def ensure_capacity(self, capacity: int) -> None:
"""Ensures sufficient capacity, a no-op if capacity is already enough."""
if self._max_capacity is not None and capacity > self._max_capacity:
raise ValueError(
'capacity %d cannot exceed max_capacity %d'
% (capacity, self._max_capacity)
)
if capacity <= self._sum_tree.size:
return # There is already sufficient capacity.
self._inactive_indices.extend(range(self._sum_tree.size, capacity))
self._sum_tree.resize(capacity)
def add_priorities(
self, ids: Sequence[int], priorities: Sequence[float]
) -> None:
"""Add priorities for new IDs."""
for i in ids:
if i in self._id_to_index:
raise IndexError('ID %d already exists.' % i)
new_size = self.size + len(ids)
if self._max_capacity is not None and new_size > self._max_capacity:
raise ValueError('Cannot add IDs as max capacity would be exceeded.')
# Expand to accommodate new IDs if needed.
if new_size > self.capacity:
candidate_capacity = max(new_size, 2 * self.capacity)
if self._max_capacity is None:
new_capacity = candidate_capacity
else:
new_capacity = min(self._max_capacity, candidate_capacity)
self.ensure_capacity(new_capacity)
# Assign unused indices to IDs.
indices = []
for i in ids:
idx = self._inactive_indices.pop()
self._active_indices_location[idx] = len(self._active_indices)
self._active_indices.append(idx)
self._id_to_index[i] = idx
self._index_to_id[idx] = i
indices.append(idx)
# Set priorities on sum tree.
self._sum_tree.set(indices, _power(priorities, self._priority_exponent))
def remove_priorities(self, ids: Sequence[int]) -> None:
"""Remove priorities associated with given IDs."""
indices = []
for i in ids:
try:
idx = self._id_to_index[i]
except IndexError as err:
raise IndexError('Cannot remove ID %d, it does not exist.' % i) from err
indices.append(idx)
for i, idx in zip(ids, indices):
del self._id_to_index[i]
del self._index_to_id[idx]
# Swap index to be removed with index at the end.
j = self._active_indices_location[idx]
self._active_indices[j], self._active_indices[-1] = (
self._active_indices[-1],
self._active_indices[j],
)
# Update location for the swapped index.
self._active_indices_location[self._active_indices[j]] = j
# Remove index from data structures.
self._active_indices_location.pop(self._active_indices.pop())
self._inactive_indices.extend(indices)
self._sum_tree.set(indices, np.zeros((len(indices),), dtype=np.float64))
def update_priorities(
self, ids: Sequence[int], priorities: Sequence[float]
) -> None:
"""Updates priorities for existing IDs."""
indices = []
for i in ids:
if i not in self._id_to_index:
raise IndexError('ID %d does not exist.' % i)
indices.append(self._id_to_index[i])
self._sum_tree.set(indices, _power(priorities, self._priority_exponent))
def sample(self, size: int) -> Tuple[np.ndarray, np.ndarray]:
"""Returns sample of IDs with corresponding probabilities."""
if self.size == 0:
raise RuntimeError('No IDs to sample.')
uniform_indices = [
self._active_indices[j]
for j in self._random_state.randint(self.size, size=size)
]
if self._sum_tree.root() == 0.0:
prioritized_indices = uniform_indices
else:
targets = self._random_state.uniform(size=size) * self._sum_tree.root()
prioritized_indices = np.asarray(self._sum_tree.query(targets))
usp = self._uniform_sample_probability
indices = np.where(
self._random_state.uniform(size=size) < usp,
uniform_indices,
prioritized_indices,
)
uniform_prob = np.asarray(1.0 / self.size) # np.asarray is for pytype.
priorities = self._sum_tree.get(indices)
if self._sum_tree.root() == 0.0:
prioritized_probs = np.full_like(priorities, fill_value=uniform_prob)
else:
prioritized_probs = priorities / self._sum_tree.root()
sample_probs = (1.0 - usp) * prioritized_probs + usp * uniform_prob
ids = np.fromiter(
(self._index_to_id[idx] for idx in indices),
dtype=np.int64,
count=len(indices),
)
return ids, sample_probs
def get_exponentiated_priorities(self, ids: Sequence[int]) -> Sequence[float]:
"""Returns priority ** priority_exponent for the given indices."""
indices = np.fromiter(
(self._id_to_index[i] for i in ids), dtype=np.int64, count=len(ids)
)
return self._sum_tree.get(indices)
def ids(self) -> Iterable[int]:
"""Returns an iterable of all current IDs."""
return self._id_to_index.keys()
@property
def capacity(self) -> int:
"""Number of IDs that can be stored until memory needs to be allocated."""
return self._sum_tree.size
@property
def size(self) -> int:
"""Number of IDs currently tracked."""
return len(self._id_to_index)
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves distribution state as a dictionary (e.g. for serialization)."""
return {
'sum_tree': self._sum_tree.get_state(),
'id_to_index': self._id_to_index,
'index_to_id': self._index_to_id,
'inactive_indices': self._inactive_indices,
'active_indices': self._active_indices,
'active_indices_location': self._active_indices_location,
}
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets distribution state from a (potentially de-serialized) dictionary."""
self._sum_tree.set_state(state['sum_tree'])
self._id_to_index = state['id_to_index']
self._index_to_id = state['index_to_id']
self._inactive_indices = state['inactive_indices']
self._active_indices = state['active_indices']
self._active_indices_location = state['active_indices_location']
def check_valid(self) -> Tuple[bool, str]:
"""Checks internal consistency."""
if len(self._id_to_index) != len(self._index_to_id):
return False, 'ID to index maps are not the same size.'
for i in self._id_to_index:
if self._index_to_id[self._id_to_index[i]] != i:
return False, 'ID %d should map to itself.' % i
# Indices map to themselves because of previous check and uniqueness.
if len(set(self._inactive_indices)) != len(self._inactive_indices):
return False, 'Inactive indices should be unique.'
if len(set(self._active_indices)) != len(self._active_indices):
return False, 'Active indices should be unique.'
if set(self._active_indices) != set(self._index_to_id.keys()):
return False, 'Active indices should match index to ID mapping keys.'
all_indices = self._inactive_indices + self._active_indices
if sorted(all_indices) != list(range(self._sum_tree.size)):
return False, 'Inactive and active indices should partition all indices.'
if len(self._active_indices) != len(self._active_indices_location):
return False, 'Active indices and their location should be the same size.'
for j, i in enumerate(self._active_indices):
if j != self._active_indices_location[i]:
return False, 'Active index location %d not correct for index %d.' % (
j,
i,
)
return self._sum_tree.check_valid()
class PrioritizedTransitionReplay(Generic[ReplayStructure]):
"""Prioritized replay, with LIFO storage for flat named tuples.
This is the proportional variant as described in
http://arxiv.org/abs/1511.05952.
"""
def __init__(
self,
capacity: int,
structure: ReplayStructure,
priority_exponent: float,
importance_sampling_exponent: Callable[[int], float],
uniform_sample_probability: float,
normalize_weights: bool,
random_state: np.random.RandomState,
encoder: Optional[Callable[[ReplayStructure], Any]] = None,
decoder: Optional[Callable[[Any], ReplayStructure]] = None,
):
self._capacity = capacity
self._structure = structure
self._random_state = random_state
self._encoder = encoder or (lambda s: s)
self._decoder = decoder or (lambda s: s)
self._distribution = PrioritizedDistribution(
min_capacity=capacity,
max_capacity=capacity,
priority_exponent=priority_exponent,
uniform_sample_probability=uniform_sample_probability,
random_state=random_state,
)
self._importance_sampling_exponent = importance_sampling_exponent
self._normalize_weights = normalize_weights
self._storage = collections.OrderedDict() # ID -> item.
self._t = 0 # Used to allocate IDs.
def add(self, item: ReplayStructure, priority: float) -> None:
"""Adds a single item with a given priority to the replay buffer."""
if self.size == self._capacity:
oldest_id, _ = self._storage.popitem(last=False)
self._distribution.remove_priorities([oldest_id])
item_id = self._t
self._distribution.add_priorities([item_id], [priority])
self._storage[item_id] = self._encoder(item)
self._t += 1
def get(self, ids: Sequence[int]) -> Iterable[ReplayStructure]:
"""Retrieves items by IDs."""
for i in ids:
yield self._decoder(self._storage[i])
def sample(
self,
size: int,
) -> Tuple[ReplayStructure, np.ndarray, np.ndarray]:
"""Samples a batch of transitions."""
ids, probabilities = self._distribution.sample(size)
weights = importance_sampling_weights(
probabilities,
uniform_probability=1.0 / self.size,
exponent=self.importance_sampling_exponent,
normalize=self._normalize_weights,
)
samples = self.get(ids)
transposed = zip(*samples)
stacked = [np.stack(xs, axis=0) for xs in transposed]
# pytype: disable=not-callable
return type(self._structure)(*stacked), ids, weights
# pytype: enable=not-callable
def update_priorities(
self, ids: Sequence[int], priorities: Sequence[float]
) -> None:
"""Updates IDs with given priorities."""
priorities = np.asarray(priorities)
self._distribution.update_priorities(ids, priorities)
@property
def size(self) -> int:
"""Number of elements currently contained in replay."""
return len(self._storage)
@property
def capacity(self) -> int:
"""Total capacity of replay (maximum number of items that can be stored)."""
return self._capacity
@property
def importance_sampling_exponent(self):
"""Importance sampling exponent at current step."""
return self._importance_sampling_exponent(self._t)
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves replay state as a dictionary (e.g. for serialization)."""
return {
# Serialize OrderedDict as a simpler, more common data structure.
'storage': list(self._storage.items()),
't': self._t,
'distribution': self._distribution.get_state(),
}
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets replay state from a (potentially de-serialized) dictionary."""
self._storage = collections.OrderedDict(state['storage'])
self._t = state['t']
self._distribution.set_state(state['distribution'])
def check_valid(self) -> Tuple[bool, str]:
"""Checks internal consistency."""
if self._t < len(self._storage):
return False, 't should be >= storage size.'
if set(self._storage.keys()) != set(self._distribution.ids()):
return False, 'IDs in storage and distribution do not match.'
return self._distribution.check_valid()
class TransitionAccumulator:
"""Accumulates timesteps to form transitions."""
def __init__(self):
self.reset()
def step(
self, timestep_t: dm_env.TimeStep, a_t: parts.Action
) -> Iterable[Transition]:
"""Accumulates timestep and resulting action, maybe yield a transition."""
if timestep_t.first():
self.reset()
if self._timestep_tm1 is None:
if not timestep_t.first():
raise ValueError('Expected FIRST timestep, got %s.' % str(timestep_t))
self._timestep_tm1 = timestep_t
self._a_tm1 = a_t
return # Empty iterable.
else:
transition = Transition(
s_tm1=self._timestep_tm1.observation,
a_tm1=self._a_tm1,
r_t=timestep_t.reward,
discount_t=timestep_t.discount,
s_t=timestep_t.observation,
)
self._timestep_tm1 = timestep_t
self._a_tm1 = a_t
yield transition
def reset(self) -> None:
"""Resets the accumulator. Following timestep is expected to be `FIRST`."""
self._timestep_tm1 = None
self._a_tm1 = None
def _build_n_step_transition(transitions):
"""Builds a single n-step transition from n 1-step transitions."""
r_t = 0.0
discount_t = 1.0
for transition in transitions:
r_t += discount_t * transition.r_t
discount_t *= transition.discount_t
# n-step transition, letting s_tm1 = s_tmn, and a_tm1 = a_tmn.
return Transition(
s_tm1=transitions[0].s_tm1,
a_tm1=transitions[0].a_tm1,
r_t=r_t,
discount_t=discount_t,
s_t=transitions[-1].s_t,
)
class NStepTransitionAccumulator:
"""Accumulates timesteps to form n-step transitions.
Let `t` be the index of a timestep within an episode and `T` be the index of
the final timestep within an episode. Then given the step type of the timestep
passed into `step()` the accumulator will:
* `FIRST`: yield nothing.
* `MID`: if `t < n`, yield nothing, else yield one n-step transition
`s_{t - n} -> s_t`.
* `LAST`: yield all transitions that end at `s_t = s_T` from up to n steps
away, specifically `s_{T - min(n, T)} -> s_T, ..., s_{T - 1} -> s_T`.
These are `min(n, T)`-step, ..., `1`-step transitions.
"""
def __init__(self, n):
self._transitions = collections.deque(maxlen=n) # Store 1-step transitions.
self.reset()
def step(
self, timestep_t: dm_env.TimeStep, a_t: parts.Action
) -> Iterable[Transition]:
"""Accumulates timestep and resulting action, yields transitions."""
if timestep_t.first():
self.reset()
# There are no transitions on the first timestep.
if self._timestep_tm1 is None:
assert self._a_tm1 is None
if not timestep_t.first():
raise ValueError('Expected FIRST timestep, got %s.' % str(timestep_t))
self._timestep_tm1 = timestep_t
self._a_tm1 = a_t
return # Empty iterable.
self._transitions.append(
Transition(
s_tm1=self._timestep_tm1.observation,
a_tm1=self._a_tm1,
r_t=timestep_t.reward,
discount_t=timestep_t.discount,
s_t=timestep_t.observation,
)
)
self._timestep_tm1 = timestep_t
self._a_tm1 = a_t
if timestep_t.last():
# Yield any remaining n, n-1, ..., 1-step transitions at episode end.
while self._transitions:
yield _build_n_step_transition(self._transitions)
self._transitions.popleft()
else:
# Wait for n transitions before yielding anything.
if len(self._transitions) < self._transitions.maxlen:
return # Empty iterable.
assert len(self._transitions) == self._transitions.maxlen
# This is the typical case, yield a single n-step transition.
yield _build_n_step_transition(self._transitions)
def reset(self) -> None:
"""Resets the accumulator. Following timestep is expected to be FIRST."""
self._transitions.clear()
self._timestep_tm1 = None
self._a_tm1 = None
def compress_array(array: np.ndarray) -> CompressedArray:
"""Compresses a numpy array with snappy."""
return snappy.compress(array), array.shape, array.dtype
def uncompress_array(compressed: CompressedArray) -> np.ndarray:
"""Uncompresses a numpy array with snappy given its shape and dtype."""
compressed_array, shape, dtype = compressed
byte_string = snappy.uncompress(compressed_array)
return np.frombuffer(byte_string, dtype=dtype).reshape(shape)
| dqn_zoo-master | dqn_zoo/replay.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for gym_atari."""
# pylint: disable=g-bad-import-order
import itertools
import dm_env.test_utils
import numpy as np
from dqn_zoo import gym_atari
from dqn_zoo import test_utils
from absl.testing import absltest
from absl.testing import parameterized
def make_gym_atari_env(game, seed=1):
env = gym_atari.GymAtari(game, seed=seed)
env = gym_atari.RandomNoopsEnvironmentWrapper(
env, min_noop_steps=1, max_noop_steps=30, seed=seed
)
return env
class GymAtariEnvironmentTest(
dm_env.test_utils.EnvironmentTestMixin, absltest.TestCase
):
"""Sanity checks compliance with `dm_env.Environment` interface contract."""
def make_object_under_test(self):
return make_gym_atari_env('pong', seed=1)
def random_action(random_state, action_spec):
return random_state.randint(
action_spec.minimum, action_spec.maximum + 1, dtype=action_spec.dtype
)
def timestep_generator(seed):
random_state = np.random.RandomState(seed=seed)
env = make_gym_atari_env('pong', seed=seed)
yield env.reset()
while True:
action = random_action(random_state, env.action_spec())
yield env.step(action)
class GymAtariTest(absltest.TestCase):
"""Sanity checks expected properties of Gym Atari."""
def test_seed_range(self):
for seed in (0, 1, 2**32 - 1):
gym_atari.GymAtari('pong', seed=seed)
def test_can_call_close(self):
gym_atari.GymAtari('pong', seed=1).close()
def test_determinism(self):
num_timesteps = 1000
# Check using same seed produces the same timesteps.
same_timesteps = zip(timestep_generator(seed=1), timestep_generator(seed=1))
for ts1, ts2 in itertools.islice(same_timesteps, num_timesteps):
self.assertEqual(ts1.step_type, ts2.step_type)
self.assertEqual(ts1.reward, ts2.reward)
self.assertEqual(ts1.discount, ts2.discount)
self.assertEqual(ts1.observation[1], ts2.observation[1])
np.testing.assert_array_equal(ts1.observation[0], ts2.observation[0])
# Sanity check different seeds produces different timesteps.
diff_timesteps = zip(timestep_generator(seed=2), timestep_generator(seed=3))
same = True
for ts1, ts2 in itertools.islice(diff_timesteps, num_timesteps):
same = same and (ts1.step_type == ts2.step_type)
same = same and (ts1.reward == ts2.reward)
same = same and (ts1.discount == ts2.discount)
same = same and (ts1.observation[1] == ts2.observation[1])
same = same and np.array_equal(ts1.observation[0], ts2.observation[0])
assert not same
class RandomNoopsEnvironmentWrapperTest(parameterized.TestCase):
@parameterized.parameters((0, 5), (2, 5), (0, 0), (3, 3))
def test_basic(self, min_noop_steps, max_noop_steps):
noop_action = 3
tape = []
environment = test_utils.DummyEnvironment(tape, episode_length=10)
wrapped_environment = gym_atari.RandomNoopsEnvironmentWrapper(
environment,
min_noop_steps=min_noop_steps,
max_noop_steps=max_noop_steps,
noop_action=noop_action,
seed=42,
)
# Make sure noops are applied appropriate number of times (in min/max range
# and not always the same number), with correct action.
num_noop_steps = set()
for i in range(20):
# Switch between different ways of starting a new episode.
if i % 4 == 0:
tape.clear()
wrapped_environment.reset()
num_steps = len(tape)
expected_tape = ['Environment reset'] + [
'Environment step (%s)' % noop_action
] * (num_steps - 1)
else:
timestep = wrapped_environment.reset()
while not timestep.last():
timestep = wrapped_environment.step(0)
tape.clear()
wrapped_environment.step(noop_action)
num_steps = len(tape)
expected_tape = ['Environment step (%s)' % noop_action] * num_steps
self.assertEqual(expected_tape, tape)
# +1 because of the extra initial reset() / step().
self.assertBetween(num_steps, min_noop_steps + 1, max_noop_steps + 1)
num_noop_steps.add(num_steps)
# Do some regular steps & check pass-through of actions.
wrapped_environment.step(6)
wrapped_environment.step(7)
self.assertLen(tape, num_steps + 2)
self.assertEqual(
['Environment step (6)', 'Environment step (7)'], tape[-2:]
)
# Check it's not always the same number of random noop steps.
if max_noop_steps > min_noop_steps:
self.assertGreater(len(num_noop_steps), 1)
def test_specs(self):
environment = test_utils.DummyEnvironment([], episode_length=10)
wrapped_environment = gym_atari.RandomNoopsEnvironmentWrapper(
environment, max_noop_steps=5
)
self.assertEqual(
environment.observation_spec(), wrapped_environment.observation_spec()
)
self.assertEqual(
environment.action_spec(), wrapped_environment.action_spec()
)
self.assertEqual(
environment.reward_spec(), wrapped_environment.reward_spec()
)
self.assertEqual(
environment.discount_spec(), wrapped_environment.discount_spec()
)
def test_determinism(self):
def num_noops_sequence(seed, num_episodes):
tape = []
environment = test_utils.DummyEnvironment(tape, episode_length=10)
wrapped_environment = gym_atari.RandomNoopsEnvironmentWrapper(
environment, max_noop_steps=8, seed=seed
)
seq = []
for _ in range(num_episodes):
tape.clear()
wrapped_environment.reset()
seq.append(len(tape))
return seq
sequence_1 = num_noops_sequence(seed=123, num_episodes=20)
sequence_2 = num_noops_sequence(seed=123, num_episodes=20)
sequence_3 = num_noops_sequence(seed=124, num_episodes=20)
self.assertEqual(sequence_1, sequence_2)
self.assertNotEqual(sequence_1, sequence_3)
def test_episode_end_during_noop_steps(self):
environment = test_utils.DummyEnvironment([], episode_length=5)
wrapped_environment = gym_atari.RandomNoopsEnvironmentWrapper(
environment, min_noop_steps=10, max_noop_steps=20
)
with self.assertRaisesRegex(RuntimeError, 'Episode ended'):
wrapped_environment.reset()
if __name__ == '__main__':
absltest.main()
| dqn_zoo-master | dqn_zoo/gym_atari_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Components for DQN."""
# pylint: disable=g-bad-import-order
import abc
import collections
import csv
import os
import timeit
from typing import Any, Iterable, Mapping, Optional, Text, Tuple, Union
import distrax
import dm_env
import jax
import jax.numpy as jnp
import numpy as np
from dqn_zoo import networks
from dqn_zoo import processors
Action = int
Network = networks.Network
NetworkParams = networks.Params
PRNGKey = jnp.ndarray # A size 2 array.
class Agent(abc.ABC):
"""Agent interface."""
@abc.abstractmethod
def step(self, timestep: dm_env.TimeStep) -> Action:
"""Selects action given timestep and potentially learns."""
@abc.abstractmethod
def reset(self) -> None:
"""Resets the agent's episodic state such as frame stack and action repeat.
This method should be called at the beginning of every episode.
"""
@abc.abstractmethod
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves agent state as a dictionary (e.g. for serialization)."""
@abc.abstractmethod
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets agent state from a (potentially de-serialized) dictionary."""
@property
@abc.abstractmethod
def statistics(self) -> Mapping[Text, float]:
"""Returns current agent statistics as a dictionary."""
def run_loop(
agent: Agent,
environment: dm_env.Environment,
max_steps_per_episode: int = 0,
yield_before_reset: bool = False,
) -> Iterable[
Tuple[
dm_env.Environment, Optional[dm_env.TimeStep], Agent, Optional[Action]
]
]:
"""Repeatedly alternates step calls on environment and agent.
At time `t`, `t + 1` environment timesteps and `t + 1` agent steps have been
seen in the current episode. `t` resets to `0` for the next episode.
Args:
agent: Agent to be run, has methods `step(timestep)` and `reset()`.
environment: Environment to run, has methods `step(action)` and `reset()`.
max_steps_per_episode: If positive, when time t reaches this value within an
episode, the episode is truncated.
yield_before_reset: Whether to additionally yield `(environment, None,
agent, None)` before the agent and environment is reset at the start of
each episode.
Yields:
Tuple `(environment, timestep_t, agent, a_t)` where
`a_t = agent.step(timestep_t)`.
"""
while True: # For each episode.
if yield_before_reset:
yield environment, None, agent, None,
t = 0
agent.reset()
timestep_t = environment.reset() # timestep_0.
while True: # For each step in the current episode.
a_t = agent.step(timestep_t)
yield environment, timestep_t, agent, a_t
# Update t after one environment step and agent step and relabel.
t += 1
a_tm1 = a_t
timestep_t = environment.step(a_tm1)
if max_steps_per_episode > 0 and t >= max_steps_per_episode:
assert t == max_steps_per_episode
timestep_t = timestep_t._replace(step_type=dm_env.StepType.LAST)
if timestep_t.last():
unused_a_t = agent.step(timestep_t) # Extra agent step, action ignored.
yield environment, timestep_t, agent, None
break
def generate_statistics(
trackers: Iterable[Any],
timestep_action_sequence: Iterable[
Tuple[
dm_env.Environment,
Optional[dm_env.TimeStep],
Agent,
Optional[Action],
]
],
) -> Mapping[Text, Any]:
"""Generates statistics from a sequence of timestep and actions."""
# Only reset at the start, not between episodes.
for tracker in trackers:
tracker.reset()
for environment, timestep_t, agent, a_t in timestep_action_sequence:
for tracker in trackers:
tracker.step(environment, timestep_t, agent, a_t)
# Merge all statistics dictionaries into one.
statistics_dicts = (tracker.get() for tracker in trackers)
return dict(collections.ChainMap(*statistics_dicts))
class EpisodeTracker:
"""Tracks episode return and other statistics."""
def __init__(self):
self._num_steps_since_reset = None
self._num_steps_over_episodes = None
self._episode_returns = None
self._current_episode_rewards = None
self._current_episode_step = None
def step(
self,
environment: Optional[dm_env.Environment],
timestep_t: dm_env.TimeStep,
agent: Optional[Agent],
a_t: Optional[Action],
) -> None:
"""Accumulates statistics from timestep."""
del (environment, agent, a_t)
if timestep_t.first():
if self._current_episode_rewards:
raise ValueError('Current episode reward list should be empty.')
if self._current_episode_step != 0:
raise ValueError('Current episode step should be zero.')
else:
# First reward is invalid, all other rewards are appended.
self._current_episode_rewards.append(timestep_t.reward)
self._num_steps_since_reset += 1
self._current_episode_step += 1
if timestep_t.last():
self._episode_returns.append(sum(self._current_episode_rewards))
self._current_episode_rewards = []
self._num_steps_over_episodes += self._current_episode_step
self._current_episode_step = 0
def reset(self) -> None:
"""Resets all gathered statistics, not to be called between episodes."""
self._num_steps_since_reset = 0
self._num_steps_over_episodes = 0
self._episode_returns = []
self._current_episode_step = 0
self._current_episode_rewards = []
def get(self) -> Mapping[Text, Union[int, float, None]]:
"""Aggregates statistics and returns as a dictionary.
Here the convention is `episode_return` is set to `current_episode_return`
if a full episode has not been encountered. Otherwise it is set to
`mean_episode_return` which is the mean return of complete episodes only. If
no steps have been taken at all, `episode_return` is set to `NaN`.
Returns:
A dictionary of aggregated statistics.
"""
if self._episode_returns:
mean_episode_return = np.array(self._episode_returns).mean()
current_episode_return = sum(self._current_episode_rewards)
episode_return = mean_episode_return
else:
mean_episode_return = np.nan
if self._num_steps_since_reset > 0:
current_episode_return = sum(self._current_episode_rewards)
else:
current_episode_return = np.nan
episode_return = current_episode_return
return {
'mean_episode_return': mean_episode_return,
'current_episode_return': current_episode_return,
'episode_return': episode_return,
'num_episodes': len(self._episode_returns),
'num_steps_over_episodes': self._num_steps_over_episodes,
'current_episode_step': self._current_episode_step,
'num_steps_since_reset': self._num_steps_since_reset,
}
class StepRateTracker:
"""Tracks step rate, number of steps taken and duration since last reset."""
def __init__(self):
self._num_steps_since_reset = None
self._start = None
def step(
self,
environment: Optional[dm_env.Environment],
timestep_t: Optional[dm_env.TimeStep],
agent: Optional[Agent],
a_t: Optional[Action],
) -> None:
del (environment, timestep_t, agent, a_t)
self._num_steps_since_reset += 1
def reset(self) -> None:
self._num_steps_since_reset = 0
self._start = timeit.default_timer()
def get(self) -> Mapping[Text, float]:
duration = timeit.default_timer() - self._start
if self._num_steps_since_reset > 0:
step_rate = self._num_steps_since_reset / duration
else:
step_rate = np.nan
return {
'step_rate': step_rate,
'num_steps': self._num_steps_since_reset,
'duration': duration,
}
class UnbiasedExponentialWeightedAverageAgentTracker:
"""'Unbiased Constant-Step-Size Trick' from the Sutton and Barto RL book."""
def __init__(self, step_size: float, initial_agent: Agent):
self._initial_statistics = dict(initial_agent.statistics)
self._step_size = step_size
self.trace = 0.0
self._statistics = dict(self._initial_statistics)
def step(
self,
environment: Optional[dm_env.Environment],
timestep_t: Optional[dm_env.TimeStep],
agent: Agent,
a_t: Optional[Action],
) -> None:
"""Accumulates agent statistics."""
del (environment, timestep_t, a_t)
self.trace = (1 - self._step_size) * self.trace + self._step_size
final_step_size = self._step_size / self.trace
assert 0 <= final_step_size <= 1
if final_step_size == 1:
# Since the self._initial_statistics is likely to be NaN and
# 0 * NaN == NaN just replace self._statistics on the first step.
self._statistics = dict(agent.statistics)
else:
self._statistics = jax.tree_map(
lambda s, x: (1 - final_step_size) * s + final_step_size * x,
self._statistics,
agent.statistics,
)
def reset(self) -> None:
"""Resets statistics and internal state."""
self.trace = 0.0
# get() may be called before step() so ensure statistics are initialized.
self._statistics = dict(self._initial_statistics)
def get(self) -> Mapping[Text, float]:
"""Returns current accumulated statistics."""
return self._statistics
def make_default_trackers(initial_agent: Agent):
return [
EpisodeTracker(),
StepRateTracker(),
UnbiasedExponentialWeightedAverageAgentTracker(
step_size=1e-3, initial_agent=initial_agent
),
]
class EpsilonGreedyActor(Agent):
"""Agent that acts with a given set of Q-network parameters and epsilon.
Network parameters are set on the actor. The actor can be serialized,
ensuring determinism of execution (e.g. when checkpointing).
"""
def __init__(
self,
preprocessor: processors.Processor,
network: Network,
exploration_epsilon: float,
rng_key: PRNGKey,
):
self._preprocessor = preprocessor
self._rng_key = rng_key
self._action = None
self.network_params = None # Nest of arrays (haiku.Params), set externally.
def select_action(rng_key, network_params, s_t):
"""Samples action from eps-greedy policy wrt Q-values at given state."""
rng_key, apply_key, policy_key = jax.random.split(rng_key, 3)
q_t = network.apply(network_params, apply_key, s_t[None, ...]).q_values[0]
a_t = distrax.EpsilonGreedy(q_t, exploration_epsilon).sample(
seed=policy_key
)
return rng_key, a_t
self._select_action = jax.jit(select_action)
def step(self, timestep: dm_env.TimeStep) -> Action:
"""Selects action given a timestep."""
timestep = self._preprocessor(timestep)
if timestep is None: # Repeat action.
return self._action
s_t = timestep.observation
self._rng_key, a_t = self._select_action(
self._rng_key, self.network_params, s_t
)
self._action = Action(jax.device_get(a_t))
return self._action
def reset(self) -> None:
"""Resets the agent's episodic state such as frame stack and action repeat.
This method should be called at the beginning of every episode.
"""
processors.reset(self._preprocessor)
self._action = None
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves agent state as a dictionary (e.g. for serialization)."""
# State contains network params to make agent easy to run from a checkpoint.
return {
'rng_key': self._rng_key,
'network_params': self.network_params,
}
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets agent state from a (potentially de-serialized) dictionary."""
self._rng_key = state['rng_key']
self.network_params = state['network_params']
@property
def statistics(self) -> Mapping[Text, float]:
return {}
class LinearSchedule:
"""Linear schedule, used for exploration epsilon in DQN agents."""
def __init__(
self, begin_value, end_value, begin_t, end_t=None, decay_steps=None
):
if (end_t is None) == (decay_steps is None):
raise ValueError('Exactly one of end_t, decay_steps must be provided.')
self._decay_steps = decay_steps if end_t is None else end_t - begin_t
self._begin_t = begin_t
self._begin_value = begin_value
self._end_value = end_value
def __call__(self, t):
"""Implements a linear transition from a begin to an end value."""
frac = min(max(t - self._begin_t, 0), self._decay_steps) / self._decay_steps
return (1 - frac) * self._begin_value + frac * self._end_value
class NullWriter:
"""A placeholder logging object that does nothing."""
def write(self, *args, **kwargs) -> None:
pass
def close(self) -> None:
pass
class CsvWriter:
"""A logging object writing to a CSV file.
Each `write()` takes a `OrderedDict`, creating one column in the CSV file for
each dictionary key on the first call. Successive calls to `write()` must
contain the same dictionary keys.
"""
def __init__(self, fname: Text):
"""Initializes a `CsvWriter`.
Args:
fname: File name (path) for file to be written to.
"""
dirname = os.path.dirname(fname)
if not os.path.exists(dirname):
os.makedirs(dirname)
self._fname = fname
self._header_written = False
self._fieldnames = None
def write(self, values: collections.OrderedDict) -> None:
"""Appends given values as new row to CSV file."""
if self._fieldnames is None:
self._fieldnames = values.keys()
# Open a file in 'append' mode, so we can continue logging safely to the
# same file after e.g. restarting from a checkpoint.
with open(self._fname, 'a') as file:
# Always use same fieldnames to create writer, this way a consistency
# check is performed automatically on each write.
writer = csv.DictWriter(file, fieldnames=self._fieldnames)
# Write a header if this is the very first write.
if not self._header_written:
writer.writeheader()
self._header_written = True
writer.writerow(values)
def close(self) -> None:
"""Closes the `CsvWriter`."""
pass
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves `CsvWriter` state as a `dict` (e.g. for serialization)."""
return {
'header_written': self._header_written,
'fieldnames': self._fieldnames,
}
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets `CsvWriter` state from a (potentially de-serialized) dictionary."""
self._header_written = state['header_written']
self._fieldnames = state['fieldnames']
class NullCheckpoint:
"""A placeholder checkpointing object that does nothing.
Can be used as a substitute for an actual checkpointing object when
checkpointing is disabled.
"""
def __init__(self):
self.state = AttributeDict()
def save(self) -> None:
pass
def can_be_restored(self) -> bool:
return False
def restore(self) -> None:
pass
class AttributeDict(dict):
"""A `dict` that supports getting, setting, deleting keys via attributes."""
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
del self[key]
| dqn_zoo-master | dqn_zoo/parts.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""dm_env environment wrapper around Gym Atari configured to be like Xitari.
Gym Atari is built on the Arcade Learning Environment (ALE), whereas Xitari is
an old fork of the ALE.
"""
# pylint: disable=g-bad-import-order
from typing import Optional, Tuple
import atari_py # pylint: disable=unused-import for gym to load Atari games.
import dm_env
from dm_env import specs
import gym
import numpy as np
from dqn_zoo import atari_data
_GYM_ID_SUFFIX = '-xitari-v1'
def _register_atari_environments():
"""Registers Atari environments in Gym to be as similar to Xitari as possible.
Main difference from PongNoFrameSkip-v4, etc. is max_episode_steps is unset
and only the usual 57 Atari games are registered.
"""
for game in atari_data.ATARI_GAMES:
gym.envs.registration.register(
id=game + _GYM_ID_SUFFIX, # Add suffix so ID has required format.
entry_point='gym.envs.atari:AtariEnv',
kwargs={ # Explicitly set all known arguments.
'game': game,
'mode': None, # Not necessarily the same as 0.
'difficulty': None, # Not necessarily the same as 0.
'obs_type': 'image',
'frameskip': 1, # Get every frame.
'repeat_action_probability': 0.0, # No sticky actions.
'full_action_space': False,
},
max_episode_steps=None, # No time limit, handled in training run loop.
nondeterministic=False, # Xitari is deterministic.
)
_register_atari_environments()
class GymAtari(dm_env.Environment):
"""Gym Atari with a `dm_env.Environment` interface."""
def __init__(self, game, seed):
self._gym_env = gym.make(game + _GYM_ID_SUFFIX)
self._gym_env.seed(seed)
self._start_of_episode = True
def reset(self) -> dm_env.TimeStep:
"""Resets the environment and starts a new episode."""
observation = self._gym_env.reset()
lives = np.int32(self._gym_env.ale.lives())
timestep = dm_env.restart((observation, lives))
self._start_of_episode = False
return timestep
def step(self, action: np.int32) -> dm_env.TimeStep:
"""Updates the environment given an action and returns a timestep."""
# If the previous timestep was LAST then we call reset() on the Gym
# environment, otherwise step(). Although Gym environments allow you to step
# through episode boundaries (similar to dm_env) they emit a warning.
if self._start_of_episode:
step_type = dm_env.StepType.FIRST
observation = self._gym_env.reset()
discount = None
reward = None
done = False
else:
observation, reward, done, info = self._gym_env.step(action)
if done:
assert 'TimeLimit.truncated' not in info, 'Should never truncate.'
step_type = dm_env.StepType.LAST
discount = 0.0
else:
step_type = dm_env.StepType.MID
discount = 1.0
lives = np.int32(self._gym_env.ale.lives())
timestep = dm_env.TimeStep(
step_type=step_type,
observation=(observation, lives),
reward=reward,
discount=discount,
)
self._start_of_episode = done
return timestep
def observation_spec(self) -> Tuple[specs.Array, specs.Array]:
space = self._gym_env.observation_space
return (
specs.Array(shape=space.shape, dtype=space.dtype, name='rgb'),
specs.Array(shape=(), dtype=np.int32, name='lives'),
)
def action_spec(self) -> specs.DiscreteArray:
space = self._gym_env.action_space
return specs.DiscreteArray(
num_values=space.n, dtype=np.int32, name='action'
)
def close(self):
self._gym_env.close()
class RandomNoopsEnvironmentWrapper(dm_env.Environment):
"""Adds a random number of noop actions at the beginning of each episode."""
def __init__(
self,
environment: dm_env.Environment,
max_noop_steps: int,
min_noop_steps: int = 0,
noop_action: int = 0,
seed: Optional[int] = None,
):
"""Initializes the random noops environment wrapper."""
self._environment = environment
if max_noop_steps < min_noop_steps:
raise ValueError('max_noop_steps must be greater or equal min_noop_steps')
self._min_noop_steps = min_noop_steps
self._max_noop_steps = max_noop_steps
self._noop_action = noop_action
self._rng = np.random.RandomState(seed)
def reset(self):
"""Begins new episode.
This method resets the wrapped environment and applies a random number
of noop actions before returning the last resulting observation
as the first episode timestep. Intermediate timesteps emitted by the inner
environment (including all rewards and discounts) are discarded.
Returns:
First episode timestep corresponding to the timestep after a random number
of noop actions are applied to the inner environment.
Raises:
RuntimeError: if an episode end occurs while the inner environment
is being stepped through with the noop action.
"""
return self._apply_random_noops(initial_timestep=self._environment.reset())
def step(self, action):
"""Steps environment given action.
If beginning a new episode then random noops are applied as in `reset()`.
Args:
action: action to pass to environment conforming to action spec.
Returns:
`Timestep` from the inner environment unless beginning a new episode, in
which case this is the timestep after a random number of noop actions
are applied to the inner environment.
"""
timestep = self._environment.step(action)
if timestep.first():
return self._apply_random_noops(initial_timestep=timestep)
else:
return timestep
def _apply_random_noops(self, initial_timestep):
assert initial_timestep.first()
num_steps = self._rng.randint(
self._min_noop_steps, self._max_noop_steps + 1
)
timestep = initial_timestep
for _ in range(num_steps):
timestep = self._environment.step(self._noop_action)
if timestep.last():
raise RuntimeError(
'Episode ended while applying %s noop actions.' % num_steps
)
# We make sure to return a FIRST timestep, i.e. discard rewards & discounts.
return dm_env.restart(timestep.observation)
## All methods except for reset and step redirect to the underlying env.
def observation_spec(self):
return self._environment.observation_spec()
def action_spec(self):
return self._environment.action_spec()
def reward_spec(self):
return self._environment.reward_spec()
def discount_spec(self):
return self._environment.discount_spec()
def close(self):
return self._environment.close()
| dqn_zoo-master | dqn_zoo/gym_atari.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Composable timestep processing, for DQN Atari preprocessing.
Aims:
* Be self-contained.
* Easy to have the preprocessing on the agent side or on the environment side.
* Easy to swap out and modify parts of the processing.
Conventions:
* The term "processor" is used to refer to any callable that could also have
a `reset()` function to clear any internal state. E.g. a plain function. Or an
instance of a class with `__call__` method, with or without a `reset()`
method.
* `None` means no output when subsampling inputs.
"""
# pylint: disable=g-bad-import-order
import collections
from typing import Any, Callable, List, Iterable, Optional, Sequence, Text, Tuple
import chex
import dm_env
from dm_env import specs
import numpy as np
from PIL import Image
Processor = Callable # Actually a callable that may also have a reset() method.
Nest = Any # Recursive types are not yet supported by pytype.
NamedTuple = Any
StepType = dm_env.StepType
def reset(processor: Processor[[Any], Any]) -> None:
"""Calls `reset()` on a `Processor` or function if the method exists."""
if hasattr(processor, 'reset'):
processor.reset()
identity = lambda v: v
def trailing_zero_pad(
length: int,
) -> Processor[[List[np.ndarray]], List[np.ndarray]]:
"""Adds trailing zero padding to array lists to ensure a minimum length."""
def trailing_zero_pad_fn(arrays):
padding_length = length - len(arrays)
if padding_length <= 0:
return arrays
zero = np.zeros_like(arrays[0])
return arrays + [zero] * padding_length
return trailing_zero_pad_fn
def none_to_zero_pad(values: List[Optional[NamedTuple]]) -> List[NamedTuple]:
"""Replaces `None`s in a list of named tuples with zeros of same structure."""
actual_values = [n for n in values if n is not None]
if not actual_values:
raise ValueError('Must have at least one value which is not None.')
if len(actual_values) == len(values):
return values
example = actual_values[0]
zero = type(example)(*(np.zeros_like(x) for x in example))
return [zero if v is None else v for v in values]
def named_tuple_sequence_stack(values: Sequence[NamedTuple]) -> NamedTuple:
"""Converts a sequence of named tuples into a named tuple of tuples."""
# [T(1, 2), T(3, 4), T(5, 6)].
transposed = zip(*values)
# ((1, 3, 5), (2, 4, 6)).
return type(values[0])(*transposed)
# T((1, 3, 5), (2, 4, 6)).
class Deque:
"""Double ended queue with a maximum length and initial values."""
def __init__(self, max_length: int, initial_values=None):
self._deque = collections.deque(maxlen=max_length)
self._initial_values = initial_values or []
def reset(self) -> None:
self._deque.clear()
self._deque.extend(self._initial_values)
def __call__(self, value: Any) -> collections.deque:
self._deque.append(value)
return self._deque
class FixedPaddedBuffer:
"""Fixed size `None`-padded buffer which is cleared after it is filled.
E.g. with `length = 3`, `initial_index = 2` and values `[0, 1, 2, 3, 4, 5, 6]`
this will return `~~0`, `1~~`, `12~`, `123`, `4~~`, `45~`, `456`, where `~`
represents `None`. Used to concatenate timesteps for action repeats.
Action repeat requirements are:
* Fixed size buffer of timesteps.
* The `FIRST` timestep should return immediately to get the first action of
the episode, as there is no preceding action to repeat. Prefix with padding.
* For `MID` timesteps, the timestep buffer is periodically returned when full.
* When a `LAST` timestep is encountered, the current buffer of timesteps is
returned, suffixed with padding, as buffers should not cross episode
boundaries.
The requirements can be fulfilled by conditionally subsampling the output of
this processor.
"""
def __init__(self, length: int, initial_index: int):
self._length = length
self._initial_index = initial_index % length
self._index = self._initial_index
self._buffer = [None] * self._length
def reset(self) -> None:
self._index = self._initial_index
self._buffer = [None] * self._length
def __call__(self, value: Any) -> Sequence[Any]:
if self._index >= self._length:
assert self._index == self._length
self._index = 0
self._buffer = [None] * self._length
self._buffer[self._index] = value
self._index += 1
return self._buffer
class ConditionallySubsample:
"""Conditionally passes through input, returning `None` otherwise."""
def __init__(self, condition: Processor[[Any], bool]):
self._condition = condition
def reset(self) -> None:
reset(self._condition)
def __call__(self, value: Any) -> Optional[Any]:
return value if self._condition(value) else None
class TimestepBufferCondition:
"""Returns `True` when an iterable of timesteps should be passed on.
Specifically returns `True`:
* If timesteps contain a `FIRST`.
* If timesteps contain a `LAST`.
* If number of steps passed since `FIRST` timestep modulo `period` is `0`.
Returns `False` otherwise. Used for action repeats in Atari preprocessing.
"""
def __init__(self, period: int):
self._period = period
self._steps_since_first_timestep = None
self._should_reset = False
def reset(self):
self._should_reset = False
self._steps_since_first_timestep = None
def __call__(self, timesteps: Iterable[dm_env.TimeStep]) -> bool:
if self._should_reset:
raise RuntimeError('Should have reset.')
# Find the main step type, FIRST and LAST take precedence over MID.
main_step_type = StepType.MID
precedent_step_types = (StepType.FIRST, StepType.LAST)
for timestep in timesteps:
if timestep is None:
continue
if timestep.step_type in precedent_step_types:
if main_step_type in precedent_step_types:
raise RuntimeError('Expected at most one FIRST or LAST.')
main_step_type = timestep.step_type
# Must have FIRST timestep after a reset.
if self._steps_since_first_timestep is None:
if main_step_type != StepType.FIRST:
raise RuntimeError('After reset first timestep should be FIRST.')
# pytype: disable=unsupported-operands
if main_step_type == StepType.FIRST:
self._steps_since_first_timestep = 0
return True
elif main_step_type == StepType.LAST:
self._steps_since_first_timestep = None
self._should_reset = True
return True
elif (self._steps_since_first_timestep + 1) % self._period == 0:
self._steps_since_first_timestep += 1
return True
else:
self._steps_since_first_timestep += 1
return False
# pytype: enable=unsupported-operands
class ApplyToNamedTupleField:
"""Runs processors on a particular field of a named tuple."""
def __init__(self, field: Text, *processors: Processor[[Any], Any]):
self._field = field
self._processors = processors
def reset(self) -> None:
for processor in self._processors:
reset(processor)
def __call__(self, value: NamedTuple) -> NamedTuple:
attr_value = getattr(value, self._field)
for processor in self._processors:
attr_value = processor(attr_value)
return value._replace(**{self._field: attr_value})
class Maybe:
"""Wraps another processor so that `None` is returned when `None` is input."""
def __init__(self, processor: Processor[[Any], Any]):
self._processor = processor
def reset(self) -> None:
reset(self._processor)
def __call__(self, value: Optional[Any]) -> Optional[Any]:
if value is None:
return None
else:
return self._processor(value)
class Sequential:
"""Chains together multiple processors."""
def __init__(self, *processors: Processor[[Any], Any]):
self._processors = processors
def reset(self) -> None:
for processor in self._processors:
reset(processor)
def __call__(self, value: Any) -> Any:
for processor in self._processors:
value = processor(value)
return value
class ZeroDiscountOnLifeLoss:
"""Sets discount to zero on timestep if number of lives has decreased.
This processor assumes observations to be tuples whose second entry is a
scalar indicating the remaining number of lives.
"""
def __init__(self):
self._num_lives_on_prev_step = None
def reset(self) -> None:
self._num_lives_on_prev_step = None
def __call__(self, timestep: dm_env.TimeStep) -> dm_env.TimeStep:
# We have a life loss when the timestep is a regular transition and lives
# have decreased since the previous timestep.
num_lives = timestep.observation[1]
life_lost = timestep.mid() and (num_lives < self._num_lives_on_prev_step)
self._num_lives_on_prev_step = num_lives
return timestep._replace(discount=0.0) if life_lost else timestep
def reduce_step_type(
step_types: Sequence[StepType], debug: bool = False
) -> StepType:
"""Outputs a representative step type from an array of step types."""
# Zero padding will appear to be FIRST. Padding should only be seen before the
# FIRST (e.g. 000F) or after LAST (e.g. ML00).
if debug:
np_step_types = np.array(step_types)
output_step_type = StepType.MID
for i, step_type in enumerate(step_types):
if step_type == 0: # step_type not actually FIRST, but we do expect 000F.
if debug and not (np_step_types == 0).all():
raise ValueError('Expected zero padding followed by FIRST.')
output_step_type = StepType.FIRST
break
elif step_type == StepType.LAST:
output_step_type = StepType.LAST
if debug and not (np_step_types[i + 1 :] == 0).all():
raise ValueError('Expected LAST to be followed by zero padding.')
break
else:
if step_type != StepType.MID:
raise ValueError('Expected MID if not FIRST or LAST.')
return output_step_type
def aggregate_rewards(
rewards: Sequence[Optional[float]], debug: bool = False
) -> Optional[float]:
"""Sums up rewards, assumes discount is 1."""
if None in rewards:
if debug:
np_rewards = np.array(rewards)
if not (np_rewards[-1] is None and (np_rewards[:-1] == 0).all()):
# Should only ever have [0, 0, 0, None] due to zero padding.
raise ValueError('Should only have a None reward for FIRST.')
return None
else:
# Faster than np.sum for a list of floats.
return sum(rewards)
def aggregate_discounts(
discounts: Sequence[Optional[float]], debug: bool = False
) -> Optional[float]:
"""Aggregates array of discounts into a scalar, expects `0`, `1` or `None`."""
if debug:
np_discounts = np.array(discounts)
if not np.isin(np_discounts, [0.0, 1.0, None]).all():
raise ValueError(
'All discounts should be 0 or 1, got: %s.' % np_discounts
)
if None in discounts:
if debug:
if not (np_discounts[-1] is None and (np_discounts[:-1] == 0).all()):
# Should have [0, 0, 0, None] due to zero padding.
raise ValueError('Should only have a None discount for FIRST.')
return None
else:
# Faster than np.prod for a list of floats.
result = 1
for d in discounts:
result *= d
return result
def rgb2y(array: np.ndarray) -> np.ndarray:
"""Converts RGB image array into grayscale."""
chex.assert_rank(array, 3)
output = np.tensordot(array, [0.299, 0.587, 1 - (0.299 + 0.587)], (-1, 0))
return output.astype(np.uint8)
def resize(shape: Tuple[int, ...]) -> Processor[[np.ndarray], np.ndarray]:
"""Resizes array to the given shape."""
if len(shape) != 2:
raise ValueError('Resize shape has to be 2D, given: %s.' % str(shape))
# Image.resize takes (width, height) as output_shape argument.
image_shape = (shape[1], shape[0])
def resize_fn(array):
image = Image.fromarray(array).resize(image_shape, Image.BILINEAR)
return np.array(image, dtype=np.uint8)
return resize_fn
def select_rgb_observation(timestep: dm_env.TimeStep) -> dm_env.TimeStep:
"""Replaces an observation tuple by its first entry (the RGB observation)."""
return timestep._replace(observation=timestep.observation[0])
def apply_additional_discount(
additional_discount: float,
) -> Processor[[float], float]:
"""Returns a function that scales its non-`None` input by a constant."""
return lambda d: None if d is None else additional_discount * d
def clip_reward(bound: float) -> Processor[[Optional[float]], Optional[float]]:
"""Returns a function that clips non-`None` inputs to (`-bound`, `bound`)."""
def clip_reward_fn(reward):
return None if reward is None else max(min(reward, bound), -bound)
return clip_reward_fn
def show(prefix: Text) -> Processor[[Any], Any]:
"""Prints value and passes through, for debugging."""
def show_fn(value):
print('%s: %s' % (prefix, value))
return value
return show_fn
def atari(
additional_discount: float = 0.99,
max_abs_reward: Optional[float] = 1.0,
resize_shape: Optional[Tuple[int, int]] = (84, 84),
num_action_repeats: int = 4,
num_pooled_frames: int = 2,
zero_discount_on_life_loss: bool = True,
num_stacked_frames: int = 4,
grayscaling: bool = True,
) -> Processor[[dm_env.TimeStep], Optional[dm_env.TimeStep]]:
"""Standard DQN preprocessing on Atari."""
# This processor does the following to a sequence of timesteps.
#
# 1. Zeroes discount on loss of life.
# 2. Repeats actions (previous action should be repeated if None is returned).
# 3. Max pools action repeated observations.
# 4. Grayscales observations.
# 5. Resizes observations.
# 6. Stacks observations.
# 7. Clips rewards.
# 8. Applies an additional discount.
#
# For more detail see the annotations in the processors below.
# The FixedPaddedBuffer, ConditionallySubsample, none_to_zero_pad, stack and
# max_pool on the observation collectively does this (step types: F = FIRST,
# M = MID, L = LAST, ~ is None):
#
# Type: F | M M M M | M M L | F |
# Frames: A | B C D E | F G H | I |
# Output: max[0A]| ~ ~ ~ max[DE]| ~ ~ max[H0]|max[0I]|
return Sequential(
# When the number of lives decreases, set discount to 0.
ZeroDiscountOnLifeLoss() if zero_discount_on_life_loss else identity,
# Select the RGB observation as the main observation, dropping lives.
select_rgb_observation,
# obs: 1, 2, 3, 4, 5, 6, 7, 8, 9, ...
# Write timesteps into a fixed-sized buffer padded with None.
FixedPaddedBuffer(length=num_action_repeats, initial_index=-1),
# obs: ~~~1, 2~~~, 23~~, 234~, 2345, 6~~~, 67~~, 678~, 6789, ...
# Periodically return the deque of timesteps, when the current timestep is
# FIRST, after that every 4 steps, and when the current timestep is LAST.
ConditionallySubsample(TimestepBufferCondition(num_action_repeats)),
# obs: ~~~1, ~, ~, ~, 2345, ~, ~, ~, 6789, ...
# If None pass through, otherwise apply the processor.
Maybe(
Sequential(
# Replace Nones with zero padding in each buffer.
none_to_zero_pad,
# obs: 0001, ~, ~, ~, 2345, ~, ~, ~, 6789, ...
# Convert sequence of nests into a nest of sequences.
named_tuple_sequence_stack,
# Choose representative step type from an array of step types.
ApplyToNamedTupleField('step_type', reduce_step_type),
# Rewards: sum then clip.
ApplyToNamedTupleField(
'reward',
aggregate_rewards,
clip_reward(max_abs_reward) if max_abs_reward else identity,
),
# Discounts: take product and scale by an additional discount.
ApplyToNamedTupleField(
'discount',
aggregate_discounts,
apply_additional_discount(additional_discount),
),
# Observations: max pool, grayscale, resize, and stack.
ApplyToNamedTupleField(
'observation',
lambda obs: np.stack(obs[-num_pooled_frames:], axis=0),
lambda obs: np.max(obs, axis=0),
# obs: max[01], ~, ~, ~, max[45], ~, ~, ~, max[89], ...
# obs: A, ~, ~, ~, B, ~, ~, ~, C, ...
rgb2y if grayscaling else identity,
resize(resize_shape) if resize_shape else identity,
Deque(max_length=num_stacked_frames),
# obs: A, ~, ~, ~, AB, ~, ~, ~, ABC, ~, ~, ~, ABCD, ~, ~, ~,
# BCDE, ~, ~, ~, CDEF, ...
list,
trailing_zero_pad(length=num_stacked_frames),
# obs: A000, ~, ~, ~, AB00, ~, ~, ~, ABC0, ~, ~, ~, ABCD,
# ~, ~, ~, BCDE, ...
lambda obs: np.stack(obs, axis=-1),
),
)
),
)
class AtariEnvironmentWrapper(dm_env.Environment):
"""Python environment wrapper that provides DQN Atari preprocessing.
This is a thin wrapper around the Atari processor.
Expects underlying Atari environment to have interleaved pixels (HWC) and
zero-indexed actions.
"""
def __init__(
self,
environment: dm_env.Environment,
additional_discount: float = 0.99,
max_abs_reward: Optional[float] = 1.0,
resize_shape: Optional[Tuple[int, int]] = (84, 84),
num_action_repeats: int = 4,
num_pooled_frames: int = 2,
zero_discount_on_life_loss: bool = True,
num_stacked_frames: int = 4,
grayscaling: bool = True,
):
rgb_spec, unused_lives_spec = environment.observation_spec()
if rgb_spec.shape[2] != 3:
raise ValueError(
'This wrapper assumes interleaved pixel observations with shape '
'(height, width, channels).'
)
if int(environment.action_spec().minimum) != 0:
raise ValueError('This wrapper assumes zero-indexed actions.')
self._environment = environment
self._processor = atari(
additional_discount=additional_discount,
max_abs_reward=max_abs_reward,
resize_shape=resize_shape,
num_action_repeats=num_action_repeats,
num_pooled_frames=num_pooled_frames,
zero_discount_on_life_loss=zero_discount_on_life_loss,
num_stacked_frames=num_stacked_frames,
grayscaling=grayscaling,
)
if grayscaling:
self._observation_shape = resize_shape + (num_stacked_frames,)
self._observation_spec_name = 'grayscale'
else:
self._observation_shape = resize_shape + (3, num_stacked_frames)
self._observation_spec_name = 'RGB'
self._reset_next_step = True
def reset(self) -> dm_env.TimeStep:
"""Resets environment and provides the first processed timestep."""
reset(self._processor)
timestep = self._environment.reset()
processed_timestep = self._processor(timestep)
assert processed_timestep is not None
self._reset_next_step = False
return processed_timestep
def step(self, action: int) -> dm_env.TimeStep:
"""Steps up to `num_action_repeat` times, returns a processed timestep."""
# This implements the action repeat by repeatedly passing in the last action
# until an actual timestep is returned by the processor.
if self._reset_next_step:
return self.reset() # Ignore action.
processed_timestep = None
while processed_timestep is None:
timestep = self._environment.step(action)
processed_timestep = self._processor(timestep)
if timestep.last():
self._reset_next_step = True
assert processed_timestep is not None
return processed_timestep
def action_spec(self) -> specs.DiscreteArray:
return self._environment.action_spec()
def observation_spec(self) -> specs.Array:
return specs.Array(
shape=self._observation_shape,
dtype=np.uint8,
name=self._observation_spec_name,
)
| dqn_zoo-master | dqn_zoo/processors.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for networks."""
# pylint: disable=g-bad-import-order
import chex
import haiku as hk
import jax
from jax.config import config
import jax.numpy as jnp
import numpy as np
import tree
from dqn_zoo import networks
from absl.testing import absltest
def _sample_input(input_shape):
return jnp.zeros((1,) + input_shape, dtype=jnp.float32)
class SimpleLayersTest(absltest.TestCase):
def test_linear(self):
layer = hk.transform(networks.linear(4))
params = layer.init(jax.random.PRNGKey(1), _sample_input((3,)))
self.assertCountEqual(['linear'], params)
lin_params = params['linear']
self.assertCountEqual(['w', 'b'], lin_params)
chex.assert_shape(lin_params['w'], (3, 4))
chex.assert_shape(lin_params['b'], (4,))
def test_conv(self):
layer = hk.transform(networks.conv(4, (3, 3), 2))
params = layer.init(jax.random.PRNGKey(1), _sample_input((7, 7, 3)))
self.assertCountEqual(['conv2_d'], params)
conv_params = params['conv2_d']
self.assertCountEqual(['w', 'b'], conv_params)
chex.assert_shape(conv_params['w'], (3, 3, 3, 4))
chex.assert_shape(conv_params['b'], (4,))
class LinearWithSharedBiasTest(absltest.TestCase):
def setUp(self):
super().setUp()
rng_key = jax.random.PRNGKey(1)
self.init_rng_key, self.apply_rng_key = jax.random.split(rng_key)
self.input_shape = (4,)
self.output_shape = (3,)
self.weights_shape = (self.input_shape[0], self.output_shape[0])
network_fn = networks.linear_with_shared_bias(self.output_shape[0])
self.network = hk.transform(network_fn)
def test_bias_parameter_shape(self):
params = self.network.init(
self.init_rng_key, _sample_input(self.input_shape)
)
self.assertLen(tree.flatten(params), 2)
def check_params(path, param):
if path[-1] == 'b':
self.assertNotEqual(self.output_shape, param.shape)
chex.assert_shape(param, (1,))
elif path[-1] == 'w':
chex.assert_shape(param, self.weights_shape)
else:
self.fail('Unexpected parameter %s.' % path)
tree.map_structure_with_path(check_params, params)
def test_output_shares_bias(self):
bias = 1.23
params = self.network.init(
self.init_rng_key, _sample_input(self.input_shape)
)
def replace_params(path, param):
if path[-1] == 'b':
return jnp.ones_like(param) * bias
else:
return jnp.zeros_like(param)
params = tree.map_structure_with_path(replace_params, params)
output = self.network.apply(
params, self.apply_rng_key, jnp.zeros((1,) + self.input_shape)
)
chex.assert_shape(output, (1,) + self.output_shape)
np.testing.assert_allclose([bias] * self.output_shape[0], list(output[0]))
class NoisyLinearTest(absltest.TestCase):
def setUp(self):
super().setUp()
rng_key = jax.random.PRNGKey(1)
self.init_rng_key, self.apply_rng_key = jax.random.split(rng_key)
self.input_shape = (4,)
self.output_shape = (3,)
self.network_fn = networks.noisy_linear(self.output_shape[0], 0.1)
self.network = hk.transform(self.network_fn)
self.params = self.network.init(
self.init_rng_key, _sample_input(self.input_shape)
)
self.inputs = jnp.zeros((2,) + self.input_shape)
def test_basic(self):
self.network.apply(self.params, self.apply_rng_key, self.inputs)
def test_error_raised_if_rng_is_not_passed_in(self):
with self.assertRaisesRegex(ValueError, 'must be called with an RNG'):
self.network.apply(self.params, self.inputs)
def test_error_raised_if_transformed_without_rng_1(self):
network = hk.without_apply_rng(hk.transform(self.network_fn))
with self.assertRaisesRegex(ValueError, 'PRNGKey'):
network.apply(self.params, self.inputs)
def test_error_raised_if_transformed_without_rng_2(self):
network = hk.without_apply_rng(hk.transform(self.network_fn))
with self.assertRaisesRegex(TypeError, 'positional argument'):
network.apply(self.params, self.apply_rng_key, self.inputs)
def test_same_rng_produces_same_outputs(self):
outputs_1 = self.network.apply(self.params, self.apply_rng_key, self.inputs)
outputs_2 = self.network.apply(self.params, self.apply_rng_key, self.inputs)
np.testing.assert_allclose(outputs_1, outputs_2)
def test_different_rngs_produce_different_outputs(self):
rng_1, rng_2 = jax.random.split(jax.random.PRNGKey(1))
outputs_1 = self.network.apply(self.params, rng_1, self.inputs)
outputs_2 = self.network.apply(self.params, rng_2, self.inputs)
self.assertFalse(np.allclose(outputs_1, outputs_2))
def test_number_of_params_with_bias_correct(self):
net_fn = networks.noisy_linear(self.output_shape[0], 0.1, with_bias=True)
network = hk.transform(net_fn)
params = network.init(self.init_rng_key, _sample_input(self.input_shape))
self.assertCountEqual(['mu', 'sigma'], params)
self.assertCountEqual(['b', 'w'], params['mu'])
self.assertCountEqual(['b', 'w'], params['sigma'])
def test_number_of_params_without_bias_correct(self):
net_fn = networks.noisy_linear(self.output_shape[0], 0.1, with_bias=False)
network = hk.transform(net_fn)
params = network.init(self.init_rng_key, _sample_input(self.input_shape))
self.assertCountEqual(['mu', 'sigma'], params)
self.assertCountEqual(['w'], params['mu'])
self.assertCountEqual(['b', 'w'], params['sigma'])
def test_sigma_params_are_constant(self):
self.assertCountEqual(['mu', 'sigma'], self.params)
sigma_params = self.params['sigma']
sigma_w_values = np.unique(sigma_params['w'])
sigma_b_values = np.unique(sigma_params['b'])
self.assertLen(sigma_w_values, 1)
self.assertLen(sigma_b_values, 1)
value = 0.1 / np.sqrt(self.input_shape[0])
self.assertAlmostEqual(value, sigma_w_values)
self.assertAlmostEqual(value, sigma_b_values)
if __name__ == '__main__':
config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| dqn_zoo-master | dqn_zoo/networks_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Double DQN (tuned) agent class."""
# pylint: disable=g-bad-import-order
from typing import Any, Callable, Mapping, Text
from absl import logging
import chex
import distrax
import dm_env
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
from dqn_zoo import parts
from dqn_zoo import processors
from dqn_zoo import replay as replay_lib
# Batch variant of double_q_learning.
_batch_double_q_learning = jax.vmap(rlax.double_q_learning)
class DoubleDqn(parts.Agent):
"""Double DQN (tuned) agent."""
def __init__(
self,
preprocessor: processors.Processor,
sample_network_input: jnp.ndarray,
network: parts.Network,
optimizer: optax.GradientTransformation,
transition_accumulator: Any,
replay: replay_lib.TransitionReplay,
batch_size: int,
exploration_epsilon: Callable[[int], float],
min_replay_capacity_fraction: float,
learn_period: int,
target_network_update_period: int,
grad_error_bound: float,
rng_key: parts.PRNGKey,
):
self._preprocessor = preprocessor
self._replay = replay
self._transition_accumulator = transition_accumulator
self._batch_size = batch_size
self._exploration_epsilon = exploration_epsilon
self._min_replay_capacity = min_replay_capacity_fraction * replay.capacity
self._learn_period = learn_period
self._target_network_update_period = target_network_update_period
# Initialize network parameters and optimizer.
self._rng_key, network_rng_key = jax.random.split(rng_key)
self._online_params = network.init(
network_rng_key, sample_network_input[None, ...]
)
self._target_params = self._online_params
self._opt_state = optimizer.init(self._online_params)
# Other agent state: last action, frame count, etc.
self._action = None
self._frame_t = -1 # Current frame index.
self._statistics = {'state_value': np.nan}
# Define jitted loss, update, and policy functions here instead of as
# class methods, to emphasize that these are meant to be pure functions
# and should not access the agent object's state via `self`.
def loss_fn(online_params, target_params, transitions, rng_key):
"""Calculates loss given network parameters and transitions."""
_, *apply_keys = jax.random.split(rng_key, 4)
q_tm1 = network.apply(
online_params, apply_keys[0], transitions.s_tm1
).q_values
q_t = network.apply(
online_params, apply_keys[1], transitions.s_t
).q_values
q_target_t = network.apply(
target_params, apply_keys[2], transitions.s_t
).q_values
td_errors = _batch_double_q_learning(
q_tm1,
transitions.a_tm1,
transitions.r_t,
transitions.discount_t,
q_target_t,
q_t,
)
td_errors = rlax.clip_gradient(
td_errors, -grad_error_bound, grad_error_bound
)
losses = rlax.l2_loss(td_errors)
chex.assert_shape(losses, (self._batch_size,))
loss = jnp.mean(losses)
return loss
def update(rng_key, opt_state, online_params, target_params, transitions):
"""Computes learning update from batch of replay transitions."""
rng_key, update_key = jax.random.split(rng_key)
d_loss_d_params = jax.grad(loss_fn)(
online_params, target_params, transitions, update_key
)
updates, new_opt_state = optimizer.update(d_loss_d_params, opt_state)
new_online_params = optax.apply_updates(online_params, updates)
return rng_key, new_opt_state, new_online_params
self._update = jax.jit(update)
def select_action(rng_key, network_params, s_t, exploration_epsilon):
"""Samples action from eps-greedy policy wrt Q-values at given state."""
rng_key, apply_key, policy_key = jax.random.split(rng_key, 3)
q_t = network.apply(network_params, apply_key, s_t[None, ...]).q_values[0]
a_t = distrax.EpsilonGreedy(q_t, exploration_epsilon).sample(
seed=policy_key
)
v_t = jnp.max(q_t, axis=-1)
return rng_key, a_t, v_t
self._select_action = jax.jit(select_action)
def step(self, timestep: dm_env.TimeStep) -> parts.Action:
"""Selects action given timestep and potentially learns."""
self._frame_t += 1
timestep = self._preprocessor(timestep)
if timestep is None: # Repeat action.
action = self._action
else:
action = self._action = self._act(timestep)
for transition in self._transition_accumulator.step(timestep, action):
self._replay.add(transition)
if self._replay.size < self._min_replay_capacity:
return action
if self._frame_t % self._learn_period == 0:
self._learn()
if self._frame_t % self._target_network_update_period == 0:
self._target_params = self._online_params
return action
def reset(self) -> None:
"""Resets the agent's episodic state such as frame stack and action repeat.
This method should be called at the beginning of every episode.
"""
self._transition_accumulator.reset()
processors.reset(self._preprocessor)
self._action = None
def _act(self, timestep) -> parts.Action:
"""Selects action given timestep, according to epsilon-greedy policy."""
s_t = timestep.observation
self._rng_key, a_t, v_t = self._select_action(
self._rng_key, self._online_params, s_t, self.exploration_epsilon
)
a_t, v_t = jax.device_get((a_t, v_t))
self._statistics['state_value'] = v_t
return parts.Action(a_t)
def _learn(self) -> None:
"""Samples a batch of transitions from replay and learns from it."""
logging.log_first_n(logging.INFO, 'Begin learning', 1)
transitions = self._replay.sample(self._batch_size)
self._rng_key, self._opt_state, self._online_params = self._update(
self._rng_key,
self._opt_state,
self._online_params,
self._target_params,
transitions,
)
@property
def online_params(self) -> parts.NetworkParams:
"""Returns current parameters of Q-network."""
return self._online_params
@property
def statistics(self) -> Mapping[Text, float]:
"""Returns current agent statistics as a dictionary."""
# Check for DeviceArrays in values as this can be very slow.
assert all(
not isinstance(x, jnp.DeviceArray) for x in self._statistics.values()
)
return self._statistics
@property
def exploration_epsilon(self) -> float:
"""Returns epsilon value currently used by (eps-greedy) behavior policy."""
return self._exploration_epsilon(self._frame_t)
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves agent state as a dictionary (e.g. for serialization)."""
state = {
'rng_key': self._rng_key,
'frame_t': self._frame_t,
'opt_state': self._opt_state,
'online_params': self._online_params,
'target_params': self._target_params,
'replay': self._replay.get_state(),
}
return state
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets agent state from a (potentially de-serialized) dictionary."""
self._rng_key = state['rng_key']
self._frame_t = state['frame_t']
self._opt_state = jax.device_put(state['opt_state'])
self._online_params = jax.device_put(state['online_params'])
self._target_params = jax.device_put(state['target_params'])
self._replay.set_state(state['replay'])
| dqn_zoo-master | dqn_zoo/double_q/agent.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Double DQN."""
# pylint: disable=g-bad-import-order
from absl import flags
from absl.testing import flagsaver
from jax.config import config
from dqn_zoo.double_q import run_atari
from absl.testing import absltest
FLAGS = flags.FLAGS
class RunAtariTest(absltest.TestCase):
@flagsaver.flagsaver
def test_can_run_agent(self):
FLAGS.environment_name = 'pong'
FLAGS.replay_capacity = 1000
FLAGS.exploration_epsilon_decay_frame_fraction = 0.1
FLAGS.target_network_update_period = 12
FLAGS.num_train_frames = 100
FLAGS.num_eval_frames = 50
FLAGS.num_iterations = 2
FLAGS.batch_size = 10
FLAGS.learn_period = 2
run_atari.main(None)
if __name__ == '__main__':
config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| dqn_zoo-master | dqn_zoo/double_q/run_atari_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Double DQN (tuned) agent training on Atari.
From the paper "Deep Reinforcement Learning with Double Q-learning"
http://arxiv.org/abs/1509.06461.
This is DQN with:
* Double Q-learning.
* Shared bias in final layer of the network.
* Tuned parameters:
* Smaller final training exploration epsilon.
* Smaller evaluation exploration epsilon.
* Larger target network update period.
"""
# pylint: disable=g-bad-import-order
import collections
import itertools
import sys
import typing
from absl import app
from absl import flags
from absl import logging
import chex
import dm_env
import haiku as hk
import jax
from jax.config import config
import numpy as np
import optax
from dqn_zoo import atari_data
from dqn_zoo import gym_atari
from dqn_zoo import networks
from dqn_zoo import parts
from dqn_zoo import processors
from dqn_zoo import replay as replay_lib
from dqn_zoo.double_q import agent
# Relevant flag values are expressed in terms of environment frames.
FLAGS = flags.FLAGS
_ENVIRONMENT_NAME = flags.DEFINE_string('environment_name', 'pong', '')
_ENVIRONMENT_HEIGHT = flags.DEFINE_integer('environment_height', 84, '')
_ENVIRONMENT_WIDTH = flags.DEFINE_integer('environment_width', 84, '')
_REPLAY_CAPACITY = flags.DEFINE_integer('replay_capacity', int(1e6), '')
_COMPRESS_STATE = flags.DEFINE_bool('compress_state', True, '')
_MIN_REPLAY_CAPACITY_FRACTION = flags.DEFINE_float(
'min_replay_capacity_fraction', 0.05, ''
)
_BATCH_SIZE = flags.DEFINE_integer('batch_size', 32, '')
_MAX_FRAMES_PER_EPISODE = flags.DEFINE_integer(
'max_frames_per_episode', 108000, ''
) # 30 mins.
_NUM_ACTION_REPEATS = flags.DEFINE_integer('num_action_repeats', 4, '')
_NUM_STACKED_FRAMES = flags.DEFINE_integer('num_stacked_frames', 4, '')
_EXPLORATION_EPSILON_BEGIN_VALUE = flags.DEFINE_float(
'exploration_epsilon_begin_value', 1.0, ''
)
_EXPLORATION_EPSILON_END_VALUE = flags.DEFINE_float(
'exploration_epsilon_end_value', 0.01, ''
)
_EXPLORATION_EPSILON_DECAY_FRAME_FRACTION = flags.DEFINE_float(
'exploration_epsilon_decay_frame_fraction', 0.02, ''
)
_EVAL_EXPLORATION_EPSILON = flags.DEFINE_float(
'eval_exploration_epsilon', 0.01, ''
)
_TARGET_NETWORK_UPDATE_PERIOD = flags.DEFINE_integer(
'target_network_update_period', int(1.2e5), ''
)
_GRAD_ERROR_BOUND = flags.DEFINE_float('grad_error_bound', 1.0 / 32, '')
_LEARNING_RATE = flags.DEFINE_float('learning_rate', 0.00025, '')
_OPTIMIZER_EPSILON = flags.DEFINE_float('optimizer_epsilon', 0.01 / 32**2, '')
_ADDITIONAL_DISCOUNT = flags.DEFINE_float('additional_discount', 0.99, '')
_MAX_ABS_REWARD = flags.DEFINE_float('max_abs_reward', 1.0, '')
_SEED = flags.DEFINE_integer('seed', 1, '') # GPU may introduce nondeterminism.
_NUM_ITERATIONS = flags.DEFINE_integer('num_iterations', 200, '')
_NUM_TRAIN_FRAMES = flags.DEFINE_integer(
'num_train_frames', int(1e6), ''
) # Per iteration.
_NUM_EVAL_FRAMES = flags.DEFINE_integer(
'num_eval_frames', int(5e5), ''
) # Per iteration.
_LEARN_PERIOD = flags.DEFINE_integer('learn_period', 16, '')
_RESULTS_CSV_PATH = flags.DEFINE_string(
'results_csv_path', '/tmp/results.csv', ''
)
def main(argv):
"""Trains Double DQN agent on Atari."""
del argv
logging.info(
'Double DQN on Atari on %s.', jax.lib.xla_bridge.get_backend().platform
)
random_state = np.random.RandomState(_SEED.value)
rng_key = jax.random.PRNGKey(
random_state.randint(-sys.maxsize - 1, sys.maxsize + 1, dtype=np.int64)
)
if _RESULTS_CSV_PATH.value:
writer = parts.CsvWriter(_RESULTS_CSV_PATH.value)
else:
writer = parts.NullWriter()
def environment_builder():
"""Creates Atari environment."""
env = gym_atari.GymAtari(
_ENVIRONMENT_NAME.value, seed=random_state.randint(1, 2**32)
)
return gym_atari.RandomNoopsEnvironmentWrapper(
env,
min_noop_steps=1,
max_noop_steps=30,
seed=random_state.randint(1, 2**32),
)
env = environment_builder()
logging.info('Environment: %s', _ENVIRONMENT_NAME.value)
logging.info('Action spec: %s', env.action_spec())
logging.info('Observation spec: %s', env.observation_spec())
num_actions = env.action_spec().num_values
network_fn = networks.double_dqn_atari_network(num_actions)
network = hk.transform(network_fn)
def preprocessor_builder():
return processors.atari(
additional_discount=_ADDITIONAL_DISCOUNT.value,
max_abs_reward=_MAX_ABS_REWARD.value,
resize_shape=(_ENVIRONMENT_HEIGHT.value, _ENVIRONMENT_WIDTH.value),
num_action_repeats=_NUM_ACTION_REPEATS.value,
num_pooled_frames=2,
zero_discount_on_life_loss=True,
num_stacked_frames=_NUM_STACKED_FRAMES.value,
grayscaling=True,
)
# Create sample network input from sample preprocessor output.
sample_processed_timestep = preprocessor_builder()(env.reset())
sample_processed_timestep = typing.cast(
dm_env.TimeStep, sample_processed_timestep
)
sample_network_input = sample_processed_timestep.observation
chex.assert_shape(
sample_network_input,
(
_ENVIRONMENT_HEIGHT.value,
_ENVIRONMENT_WIDTH.value,
_NUM_STACKED_FRAMES.value,
),
)
exploration_epsilon_schedule = parts.LinearSchedule(
begin_t=int(
_MIN_REPLAY_CAPACITY_FRACTION.value
* _REPLAY_CAPACITY.value
* _NUM_ACTION_REPEATS.value
),
decay_steps=int(
_EXPLORATION_EPSILON_DECAY_FRAME_FRACTION.value
* _NUM_ITERATIONS.value
* _NUM_TRAIN_FRAMES.value
),
begin_value=_EXPLORATION_EPSILON_BEGIN_VALUE.value,
end_value=_EXPLORATION_EPSILON_END_VALUE.value,
)
if _COMPRESS_STATE.value:
def encoder(transition):
return transition._replace(
s_tm1=replay_lib.compress_array(transition.s_tm1),
s_t=replay_lib.compress_array(transition.s_t),
)
def decoder(transition):
return transition._replace(
s_tm1=replay_lib.uncompress_array(transition.s_tm1),
s_t=replay_lib.uncompress_array(transition.s_t),
)
else:
encoder = None
decoder = None
replay_structure = replay_lib.Transition(
s_tm1=None,
a_tm1=None,
r_t=None,
discount_t=None,
s_t=None,
)
replay = replay_lib.TransitionReplay(
_REPLAY_CAPACITY.value, replay_structure, random_state, encoder, decoder
)
optimizer = optax.rmsprop(
learning_rate=_LEARNING_RATE.value,
decay=0.95,
eps=_OPTIMIZER_EPSILON.value,
centered=True,
)
train_rng_key, eval_rng_key = jax.random.split(rng_key)
train_agent = agent.DoubleDqn(
preprocessor=preprocessor_builder(),
sample_network_input=sample_network_input,
network=network,
optimizer=optimizer,
transition_accumulator=replay_lib.TransitionAccumulator(),
replay=replay,
batch_size=_BATCH_SIZE.value,
exploration_epsilon=exploration_epsilon_schedule,
min_replay_capacity_fraction=_MIN_REPLAY_CAPACITY_FRACTION.value,
learn_period=_LEARN_PERIOD.value,
target_network_update_period=_TARGET_NETWORK_UPDATE_PERIOD.value,
grad_error_bound=_GRAD_ERROR_BOUND.value,
rng_key=train_rng_key,
)
eval_agent = parts.EpsilonGreedyActor(
preprocessor=preprocessor_builder(),
network=network,
exploration_epsilon=_EVAL_EXPLORATION_EPSILON.value,
rng_key=eval_rng_key,
)
# Set up checkpointing.
checkpoint = parts.NullCheckpoint()
state = checkpoint.state
state.iteration = 0
state.train_agent = train_agent
state.eval_agent = eval_agent
state.random_state = random_state
state.writer = writer
if checkpoint.can_be_restored():
checkpoint.restore()
while state.iteration <= _NUM_ITERATIONS.value:
# New environment for each iteration to allow for determinism if preempted.
env = environment_builder()
logging.info('Training iteration %d.', state.iteration)
train_seq = parts.run_loop(train_agent, env, _MAX_FRAMES_PER_EPISODE.value)
num_train_frames = 0 if state.iteration == 0 else _NUM_TRAIN_FRAMES.value
train_seq_truncated = itertools.islice(train_seq, num_train_frames)
train_trackers = parts.make_default_trackers(train_agent)
train_stats = parts.generate_statistics(train_trackers, train_seq_truncated)
logging.info('Evaluation iteration %d.', state.iteration)
eval_agent.network_params = train_agent.online_params
eval_seq = parts.run_loop(eval_agent, env, _MAX_FRAMES_PER_EPISODE.value)
eval_seq_truncated = itertools.islice(eval_seq, _NUM_EVAL_FRAMES.value)
eval_trackers = parts.make_default_trackers(eval_agent)
eval_stats = parts.generate_statistics(eval_trackers, eval_seq_truncated)
# Logging and checkpointing.
human_normalized_score = atari_data.get_human_normalized_score(
_ENVIRONMENT_NAME.value, eval_stats['episode_return']
)
capped_human_normalized_score = np.amin([1.0, human_normalized_score])
log_output = [
('iteration', state.iteration, '%3d'),
('frame', state.iteration * _NUM_TRAIN_FRAMES.value, '%5d'),
('eval_episode_return', eval_stats['episode_return'], '% 2.2f'),
('train_episode_return', train_stats['episode_return'], '% 2.2f'),
('eval_num_episodes', eval_stats['num_episodes'], '%3d'),
('train_num_episodes', train_stats['num_episodes'], '%3d'),
('eval_frame_rate', eval_stats['step_rate'], '%4.0f'),
('train_frame_rate', train_stats['step_rate'], '%4.0f'),
('train_exploration_epsilon', train_agent.exploration_epsilon, '%.3f'),
('train_state_value', train_stats['state_value'], '%.3f'),
('normalized_return', human_normalized_score, '%.3f'),
('capped_normalized_return', capped_human_normalized_score, '%.3f'),
('human_gap', 1.0 - capped_human_normalized_score, '%.3f'),
]
log_output_str = ', '.join(('%s: ' + f) % (n, v) for n, v, f in log_output)
logging.info(log_output_str)
writer.write(collections.OrderedDict((n, v) for n, v, _ in log_output))
state.iteration += 1
checkpoint.save()
writer.close()
if __name__ == '__main__':
config.update('jax_platform_name', 'gpu') # Default to GPU.
config.update('jax_numpy_rank_promotion', 'raise')
config.config_with_absl()
app.run(main)
| dqn_zoo-master | dqn_zoo/double_q/run_atari.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""QR-DQN agent class."""
# pylint: disable=g-bad-import-order
from typing import Any, Callable, Mapping, Text
from absl import logging
import chex
import distrax
import dm_env
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
from dqn_zoo import parts
from dqn_zoo import processors
from dqn_zoo import replay as replay_lib
# Batch variant of quantile_q_learning with fixed tau input across batch.
_batch_quantile_q_learning = jax.vmap(
rlax.quantile_q_learning, in_axes=(0, None, 0, 0, 0, 0, 0, None)
)
class QrDqn(parts.Agent):
"""Quantile Regression DQN agent."""
def __init__(
self,
preprocessor: processors.Processor,
sample_network_input: jnp.ndarray,
network: parts.Network,
quantiles: jnp.ndarray,
optimizer: optax.GradientTransformation,
transition_accumulator: Any,
replay: replay_lib.TransitionReplay,
batch_size: int,
exploration_epsilon: Callable[[int], float],
min_replay_capacity_fraction: float,
learn_period: int,
target_network_update_period: int,
huber_param: float,
rng_key: parts.PRNGKey,
):
self._preprocessor = preprocessor
self._replay = replay
self._transition_accumulator = transition_accumulator
self._batch_size = batch_size
self._exploration_epsilon = exploration_epsilon
self._min_replay_capacity = min_replay_capacity_fraction * replay.capacity
self._learn_period = learn_period
self._target_network_update_period = target_network_update_period
# Initialize network parameters and optimizer.
self._rng_key, network_rng_key = jax.random.split(rng_key)
self._online_params = network.init(
network_rng_key, sample_network_input[None, ...]
)
self._target_params = self._online_params
self._opt_state = optimizer.init(self._online_params)
# Other agent state: last action, frame count, etc.
self._action = None
self._frame_t = -1 # Current frame index.
self._statistics = {'state_value': np.nan}
# Define jitted loss, update, and policy functions here instead of as
# class methods, to emphasize that these are meant to be pure functions
# and should not access the agent object's state via `self`.
def loss_fn(online_params, target_params, transitions, rng_key):
"""Calculates loss given network parameters and transitions."""
# Compute Q value distributions.
_, online_key, target_key = jax.random.split(rng_key, 3)
dist_q_tm1 = network.apply(
online_params, online_key, transitions.s_tm1
).q_dist
dist_q_target_t = network.apply(
target_params, target_key, transitions.s_t
).q_dist
losses = _batch_quantile_q_learning(
dist_q_tm1,
quantiles,
transitions.a_tm1,
transitions.r_t,
transitions.discount_t,
dist_q_target_t, # No double Q-learning here.
dist_q_target_t,
huber_param,
)
chex.assert_shape(losses, (self._batch_size,))
loss = jnp.mean(losses)
return loss
def update(rng_key, opt_state, online_params, target_params, transitions):
"""Computes learning update from batch of replay transitions."""
rng_key, update_key = jax.random.split(rng_key)
d_loss_d_params = jax.grad(loss_fn)(
online_params, target_params, transitions, update_key
)
updates, new_opt_state = optimizer.update(d_loss_d_params, opt_state)
new_online_params = optax.apply_updates(online_params, updates)
return rng_key, new_opt_state, new_online_params
self._update = jax.jit(update)
def select_action(rng_key, network_params, s_t, exploration_epsilon):
"""Samples action from eps-greedy policy wrt Q-values at given state."""
rng_key, apply_key, policy_key = jax.random.split(rng_key, 3)
q_t = network.apply(network_params, apply_key, s_t[None, ...]).q_values[0]
a_t = distrax.EpsilonGreedy(q_t, exploration_epsilon).sample(
seed=policy_key
)
v_t = jnp.max(q_t, axis=-1)
return rng_key, a_t, v_t
self._select_action = jax.jit(select_action)
def step(self, timestep: dm_env.TimeStep) -> parts.Action:
"""Selects action given timestep and potentially learns."""
self._frame_t += 1
timestep = self._preprocessor(timestep)
if timestep is None: # Repeat action.
action = self._action
else:
action = self._action = self._act(timestep)
for transition in self._transition_accumulator.step(timestep, action):
self._replay.add(transition)
if self._replay.size < self._min_replay_capacity:
return action
if self._frame_t % self._learn_period == 0:
self._learn()
if self._frame_t % self._target_network_update_period == 0:
self._target_params = self._online_params
return action
def reset(self) -> None:
"""Resets the agent's episodic state such as frame stack and action repeat.
This method should be called at the beginning of every episode.
"""
self._transition_accumulator.reset()
processors.reset(self._preprocessor)
self._action = None
def _act(self, timestep) -> parts.Action:
"""Selects action given timestep, according to epsilon-greedy policy."""
s_t = timestep.observation
self._rng_key, a_t, v_t = self._select_action(
self._rng_key, self._online_params, s_t, self.exploration_epsilon
)
a_t, v_t = jax.device_get((a_t, v_t))
self._statistics['state_value'] = v_t
return parts.Action(a_t)
def _learn(self) -> None:
"""Samples a batch of transitions from replay and learns from it."""
logging.log_first_n(logging.INFO, 'Begin learning', 1)
transitions = self._replay.sample(self._batch_size)
self._rng_key, self._opt_state, self._online_params = self._update(
self._rng_key,
self._opt_state,
self._online_params,
self._target_params,
transitions,
)
@property
def online_params(self) -> parts.NetworkParams:
"""Returns current parameters of Q-network."""
return self._online_params
@property
def statistics(self) -> Mapping[Text, float]:
"""Returns current agent statistics as a dictionary."""
# Check for DeviceArrays in values as this can be very slow.
assert all(
not isinstance(x, jnp.DeviceArray) for x in self._statistics.values()
)
return self._statistics
@property
def exploration_epsilon(self) -> float:
"""Returns epsilon value currently used by (eps-greedy) behavior policy."""
return self._exploration_epsilon(self._frame_t)
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves agent state as a dictionary (e.g. for serialization)."""
state = {
'rng_key': self._rng_key,
'frame_t': self._frame_t,
'opt_state': self._opt_state,
'online_params': self._online_params,
'target_params': self._target_params,
'replay': self._replay.get_state(),
}
return state
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets agent state from a (potentially de-serialized) dictionary."""
self._rng_key = state['rng_key']
self._frame_t = state['frame_t']
self._opt_state = jax.device_put(state['opt_state'])
self._online_params = jax.device_put(state['online_params'])
self._target_params = jax.device_put(state['target_params'])
self._replay.set_state(state['replay'])
| dqn_zoo-master | dqn_zoo/qrdqn/agent.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for QR-DQN."""
# pylint: disable=g-bad-import-order
from absl import flags
from absl.testing import flagsaver
from jax.config import config
from dqn_zoo.qrdqn import run_atari
from absl.testing import absltest
FLAGS = flags.FLAGS
class RunAtariTest(absltest.TestCase):
@flagsaver.flagsaver
def test_can_run_agent(self):
FLAGS.environment_name = 'pong'
FLAGS.replay_capacity = 1000
FLAGS.exploration_epsilon_decay_frame_fraction = 0.1
FLAGS.target_network_update_period = 4
FLAGS.num_train_frames = 100
FLAGS.num_eval_frames = 50
FLAGS.num_iterations = 2
FLAGS.batch_size = 10
FLAGS.learn_period = 4
run_atari.main(None)
if __name__ == '__main__':
config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| dqn_zoo-master | dqn_zoo/qrdqn/run_atari_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A QR-DQN agent training on Atari.
From the paper "Distributional Reinforcement Learning with Quantile Regression"
http://arxiv.org/abs/1710.10044.
"""
# pylint: disable=g-bad-import-order
import collections
import itertools
import sys
import typing
from absl import app
from absl import flags
from absl import logging
import chex
import dm_env
import haiku as hk
import jax
from jax.config import config
import jax.numpy as jnp
import numpy as np
import optax
from dqn_zoo import atari_data
from dqn_zoo import gym_atari
from dqn_zoo import networks
from dqn_zoo import parts
from dqn_zoo import processors
from dqn_zoo import replay as replay_lib
from dqn_zoo.qrdqn import agent
# Relevant flag values are expressed in terms of environment frames.
FLAGS = flags.FLAGS
_ENVIRONMENT_NAME = flags.DEFINE_string('environment_name', 'pong', '')
_ENVIRONMENT_HEIGHT = flags.DEFINE_integer('environment_height', 84, '')
_ENVIRONMENT_WIDTH = flags.DEFINE_integer('environment_width', 84, '')
_REPLAY_CAPACITY = flags.DEFINE_integer('replay_capacity', int(1e6), '')
_COMPRESS_STATE = flags.DEFINE_bool('compress_state', True, '')
_MIN_REPLAY_CAPACITY_FRACTION = flags.DEFINE_float(
'min_replay_capacity_fraction', 0.05, ''
)
_BATCH_SIZE = flags.DEFINE_integer('batch_size', 32, '')
_MAX_FRAMES_PER_EPISODE = flags.DEFINE_integer(
'max_frames_per_episode', 108000, ''
) # 30 mins.
_NUM_ACTION_REPEATS = flags.DEFINE_integer('num_action_repeats', 4, '')
_NUM_STACKED_FRAMES = flags.DEFINE_integer('num_stacked_frames', 4, '')
_EXPLORATION_EPSILON_BEGIN_VALUE = flags.DEFINE_float(
'exploration_epsilon_begin_value', 1.0, ''
)
_EXPLORATION_EPSILON_END_VALUE = flags.DEFINE_float(
'exploration_epsilon_end_value', 0.01, ''
)
_EXPLORATION_EPSILON_DECAY_FRAME_FRACTION = flags.DEFINE_float(
'exploration_epsilon_decay_frame_fraction', 0.02, ''
)
_EVAL_EXPLORATION_EPSILON = flags.DEFINE_float(
'eval_exploration_epsilon', 0.001, ''
)
_TARGET_NETWORK_UPDATE_PERIOD = flags.DEFINE_integer(
'target_network_update_period', int(4e4), ''
)
_HUBER_PARAM = flags.DEFINE_float('huber_param', 1.0, '')
_LEARNING_RATE = flags.DEFINE_float('learning_rate', 0.00005, '')
_OPTIMIZER_EPSILON = flags.DEFINE_float('optimizer_epsilon', 0.01 / 32, '')
_ADDITIONAL_DISCOUNT = flags.DEFINE_float('additional_discount', 0.99, '')
_MAX_ABS_REWARD = flags.DEFINE_float('max_abs_reward', 1.0, '')
_MAX_GLOBAL_GRAD_NORM = flags.DEFINE_float('max_global_grad_norm', 10.0, '')
_SEED = flags.DEFINE_integer('seed', 1, '') # GPU may introduce nondeterminism.
_NUM_ITERATIONS = flags.DEFINE_integer('num_iterations', 200, '')
_NUM_TRAIN_FRAMES = flags.DEFINE_integer(
'num_train_frames', int(1e6), ''
) # Per iteration.
_NUM_EVAL_FRAMES = flags.DEFINE_integer(
'num_eval_frames', int(5e5), ''
) # Per iteration.
_LEARN_PERIOD = flags.DEFINE_integer('learn_period', 16, '')
_RESULTS_CSV_PATH = flags.DEFINE_string(
'results_csv_path', '/tmp/results.csv', ''
)
_NUM_QUANTILES = flags.DEFINE_integer('num_quantiles', 201, '')
def main(argv):
"""Trains QR-DQN agent on Atari."""
del argv
logging.info(
'QR-DQN on Atari on %s.', jax.lib.xla_bridge.get_backend().platform
)
random_state = np.random.RandomState(_SEED.value)
rng_key = jax.random.PRNGKey(
random_state.randint(-sys.maxsize - 1, sys.maxsize + 1, dtype=np.int64)
)
if _RESULTS_CSV_PATH.value:
writer = parts.CsvWriter(_RESULTS_CSV_PATH.value)
else:
writer = parts.NullWriter()
def environment_builder():
"""Creates Atari environment."""
env = gym_atari.GymAtari(
_ENVIRONMENT_NAME.value, seed=random_state.randint(1, 2**32)
)
return gym_atari.RandomNoopsEnvironmentWrapper(
env,
min_noop_steps=1,
max_noop_steps=30,
seed=random_state.randint(1, 2**32),
)
env = environment_builder()
logging.info('Environment: %s', _ENVIRONMENT_NAME.value)
logging.info('Action spec: %s', env.action_spec())
logging.info('Observation spec: %s', env.observation_spec())
num_actions = env.action_spec().num_values
num_quantiles = _NUM_QUANTILES.value
quantiles = (jnp.arange(0, num_quantiles) + 0.5) / float(num_quantiles)
network_fn = networks.qr_atari_network(num_actions, quantiles)
network = hk.transform(network_fn)
def preprocessor_builder():
return processors.atari(
additional_discount=_ADDITIONAL_DISCOUNT.value,
max_abs_reward=_MAX_ABS_REWARD.value,
resize_shape=(_ENVIRONMENT_HEIGHT.value, _ENVIRONMENT_WIDTH.value),
num_action_repeats=_NUM_ACTION_REPEATS.value,
num_pooled_frames=2,
zero_discount_on_life_loss=True,
num_stacked_frames=_NUM_STACKED_FRAMES.value,
grayscaling=True,
)
# Create sample network input from sample preprocessor output.
sample_processed_timestep = preprocessor_builder()(env.reset())
sample_processed_timestep = typing.cast(
dm_env.TimeStep, sample_processed_timestep
)
sample_network_input = sample_processed_timestep.observation
chex.assert_shape(
sample_network_input,
(
_ENVIRONMENT_HEIGHT.value,
_ENVIRONMENT_WIDTH.value,
_NUM_STACKED_FRAMES.value,
),
)
exploration_epsilon_schedule = parts.LinearSchedule(
begin_t=int(
_MIN_REPLAY_CAPACITY_FRACTION.value
* _REPLAY_CAPACITY.value
* _NUM_ACTION_REPEATS.value
),
decay_steps=int(
_EXPLORATION_EPSILON_DECAY_FRAME_FRACTION.value
* _NUM_ITERATIONS.value
* _NUM_TRAIN_FRAMES.value
),
begin_value=_EXPLORATION_EPSILON_BEGIN_VALUE.value,
end_value=_EXPLORATION_EPSILON_END_VALUE.value,
)
if _COMPRESS_STATE.value:
def encoder(transition):
return transition._replace(
s_tm1=replay_lib.compress_array(transition.s_tm1),
s_t=replay_lib.compress_array(transition.s_t),
)
def decoder(transition):
return transition._replace(
s_tm1=replay_lib.uncompress_array(transition.s_tm1),
s_t=replay_lib.uncompress_array(transition.s_t),
)
else:
encoder = None
decoder = None
replay_structure = replay_lib.Transition(
s_tm1=None,
a_tm1=None,
r_t=None,
discount_t=None,
s_t=None,
)
replay = replay_lib.TransitionReplay(
_REPLAY_CAPACITY.value, replay_structure, random_state, encoder, decoder
)
optimizer = optax.adam(
learning_rate=_LEARNING_RATE.value, eps=_OPTIMIZER_EPSILON.value
)
if _MAX_GLOBAL_GRAD_NORM.value > 0:
optimizer = optax.chain(
optax.clip_by_global_norm(_MAX_GLOBAL_GRAD_NORM.value), optimizer
)
train_rng_key, eval_rng_key = jax.random.split(rng_key)
train_agent = agent.QrDqn(
preprocessor=preprocessor_builder(),
sample_network_input=sample_network_input,
network=network,
quantiles=quantiles,
optimizer=optimizer,
transition_accumulator=replay_lib.TransitionAccumulator(),
replay=replay,
batch_size=_BATCH_SIZE.value,
exploration_epsilon=exploration_epsilon_schedule,
min_replay_capacity_fraction=_MIN_REPLAY_CAPACITY_FRACTION.value,
learn_period=_LEARN_PERIOD.value,
target_network_update_period=_TARGET_NETWORK_UPDATE_PERIOD.value,
huber_param=_HUBER_PARAM.value,
rng_key=train_rng_key,
)
eval_agent = parts.EpsilonGreedyActor(
preprocessor=preprocessor_builder(),
network=network,
exploration_epsilon=_EVAL_EXPLORATION_EPSILON.value,
rng_key=eval_rng_key,
)
# Set up checkpointing.
checkpoint = parts.NullCheckpoint()
state = checkpoint.state
state.iteration = 0
state.train_agent = train_agent
state.eval_agent = eval_agent
state.random_state = random_state
state.writer = writer
if checkpoint.can_be_restored():
checkpoint.restore()
while state.iteration <= _NUM_ITERATIONS.value:
# New environment for each iteration to allow for determinism if preempted.
env = environment_builder()
logging.info('Training iteration %d.', state.iteration)
train_seq = parts.run_loop(train_agent, env, _MAX_FRAMES_PER_EPISODE.value)
num_train_frames = 0 if state.iteration == 0 else _NUM_TRAIN_FRAMES.value
train_seq_truncated = itertools.islice(train_seq, num_train_frames)
train_trackers = parts.make_default_trackers(train_agent)
train_stats = parts.generate_statistics(train_trackers, train_seq_truncated)
logging.info('Evaluation iteration %d.', state.iteration)
eval_agent.network_params = train_agent.online_params
eval_seq = parts.run_loop(eval_agent, env, _MAX_FRAMES_PER_EPISODE.value)
eval_seq_truncated = itertools.islice(eval_seq, _NUM_EVAL_FRAMES.value)
eval_trackers = parts.make_default_trackers(eval_agent)
eval_stats = parts.generate_statistics(eval_trackers, eval_seq_truncated)
# Logging and checkpointing.
human_normalized_score = atari_data.get_human_normalized_score(
_ENVIRONMENT_NAME.value, eval_stats['episode_return']
)
capped_human_normalized_score = np.amin([1.0, human_normalized_score])
log_output = [
('iteration', state.iteration, '%3d'),
('frame', state.iteration * _NUM_TRAIN_FRAMES.value, '%5d'),
('eval_episode_return', eval_stats['episode_return'], '% 2.2f'),
('train_episode_return', train_stats['episode_return'], '% 2.2f'),
('eval_num_episodes', eval_stats['num_episodes'], '%3d'),
('train_num_episodes', train_stats['num_episodes'], '%3d'),
('eval_frame_rate', eval_stats['step_rate'], '%4.0f'),
('train_frame_rate', train_stats['step_rate'], '%4.0f'),
('train_exploration_epsilon', train_agent.exploration_epsilon, '%.3f'),
('train_state_value', train_stats['state_value'], '%.3f'),
('normalized_return', human_normalized_score, '%.3f'),
('capped_normalized_return', capped_human_normalized_score, '%.3f'),
('human_gap', 1.0 - capped_human_normalized_score, '%.3f'),
]
log_output_str = ', '.join(('%s: ' + f) % (n, v) for n, v, f in log_output)
logging.info(log_output_str)
writer.write(collections.OrderedDict((n, v) for n, v, _ in log_output))
state.iteration += 1
checkpoint.save()
writer.close()
if __name__ == '__main__':
config.update('jax_platform_name', 'gpu') # Default to GPU.
config.update('jax_numpy_rank_promotion', 'raise')
config.config_with_absl()
app.run(main)
| dqn_zoo-master | dqn_zoo/qrdqn/run_atari.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Prioritized Experience Replay (proportional variant) DQN agent class."""
# pylint: disable=g-bad-import-order
from typing import Any, Callable, Mapping, Text
from absl import logging
import chex
import distrax
import dm_env
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
from dqn_zoo import parts
from dqn_zoo import processors
from dqn_zoo import replay as replay_lib
# Batch variant of double_q_learning.
_batch_double_q_learning = jax.vmap(rlax.double_q_learning)
class PrioritizedDqn(parts.Agent):
"""Prioritized Experience Replay (proportional variant) DQN agent."""
def __init__(
self,
preprocessor: processors.Processor,
sample_network_input: jnp.ndarray,
network: parts.Network,
optimizer: optax.GradientTransformation,
transition_accumulator: replay_lib.TransitionAccumulator,
replay: replay_lib.PrioritizedTransitionReplay,
batch_size: int,
exploration_epsilon: Callable[[int], float],
min_replay_capacity_fraction: float,
learn_period: int,
target_network_update_period: int,
grad_error_bound: float,
rng_key: parts.PRNGKey,
):
self._preprocessor = preprocessor
self._replay = replay
self._transition_accumulator = transition_accumulator
self._batch_size = batch_size
self._exploration_epsilon = exploration_epsilon
self._min_replay_capacity = min_replay_capacity_fraction * replay.capacity
self._learn_period = learn_period
self._target_network_update_period = target_network_update_period
# Initialize network parameters and optimizer.
self._rng_key, network_rng_key = jax.random.split(rng_key)
self._online_params = network.init(
network_rng_key, sample_network_input[None, ...]
)
self._target_params = self._online_params
self._opt_state = optimizer.init(self._online_params)
# Other agent state: last action, frame count, etc.
self._action = None
self._frame_t = -1 # Current frame index.
self._statistics = {'state_value': np.nan}
self._max_seen_priority = 1.0
# Define jitted loss, update, and policy functions here instead of as
# class methods, to emphasize that these are meant to be pure functions
# and should not access the agent object's state via `self`.
def loss_fn(online_params, target_params, transitions, weights, rng_key):
"""Calculates loss given network parameters and transitions."""
_, *apply_keys = jax.random.split(rng_key, 4)
q_tm1 = network.apply(
online_params, apply_keys[0], transitions.s_tm1
).q_values
q_t = network.apply(
online_params, apply_keys[1], transitions.s_t
).q_values
q_target_t = network.apply(
target_params, apply_keys[2], transitions.s_t
).q_values
td_errors = _batch_double_q_learning(
q_tm1,
transitions.a_tm1,
transitions.r_t,
transitions.discount_t,
q_target_t,
q_t,
)
td_errors = rlax.clip_gradient(
td_errors, -grad_error_bound, grad_error_bound
)
losses = rlax.l2_loss(td_errors)
chex.assert_shape((losses, weights), (self._batch_size,))
# This is not the same as using a huber loss and multiplying by weights.
loss = jnp.mean(losses * weights)
return loss, td_errors
def update(
rng_key, opt_state, online_params, target_params, transitions, weights
):
"""Computes learning update from batch of replay transitions."""
rng_key, update_key = jax.random.split(rng_key)
d_loss_d_params, td_errors = jax.grad(loss_fn, has_aux=True)(
online_params, target_params, transitions, weights, update_key
)
updates, new_opt_state = optimizer.update(d_loss_d_params, opt_state)
new_online_params = optax.apply_updates(online_params, updates)
return rng_key, new_opt_state, new_online_params, td_errors
self._update = jax.jit(update)
def select_action(rng_key, network_params, s_t, exploration_epsilon):
"""Samples action from eps-greedy policy wrt Q-values at given state."""
rng_key, apply_key, policy_key = jax.random.split(rng_key, 3)
q_t = network.apply(network_params, apply_key, s_t[None, ...]).q_values[0]
a_t = distrax.EpsilonGreedy(q_t, exploration_epsilon).sample(
seed=policy_key
)
v_t = jnp.max(q_t, axis=-1)
return rng_key, a_t, v_t
self._select_action = jax.jit(select_action)
def step(self, timestep: dm_env.TimeStep) -> parts.Action:
"""Selects action given timestep and potentially learns."""
self._frame_t += 1
timestep = self._preprocessor(timestep)
if timestep is None: # Repeat action.
action = self._action
else:
action = self._action = self._act(timestep)
for transition in self._transition_accumulator.step(timestep, action):
self._replay.add(transition, priority=self._max_seen_priority)
if self._replay.size < self._min_replay_capacity:
return action
if self._frame_t % self._learn_period == 0:
self._learn()
if self._frame_t % self._target_network_update_period == 0:
self._target_params = self._online_params
return action
def reset(self) -> None:
"""Resets the agent's episodic state such as frame stack and action repeat.
This method should be called at the beginning of every episode.
"""
self._transition_accumulator.reset()
processors.reset(self._preprocessor)
self._action = None
def _act(self, timestep) -> parts.Action:
"""Selects action given timestep, according to epsilon-greedy policy."""
s_t = timestep.observation
self._rng_key, a_t, v_t = self._select_action(
self._rng_key, self._online_params, s_t, self.exploration_epsilon
)
a_t, v_t = jax.device_get((a_t, v_t))
self._statistics['state_value'] = v_t
return parts.Action(a_t)
def _learn(self) -> None:
"""Samples a batch of transitions from replay and learns from it."""
logging.log_first_n(logging.INFO, 'Begin learning', 1)
transitions, indices, weights = self._replay.sample(self._batch_size)
self._rng_key, self._opt_state, self._online_params, td_errors = (
self._update(
self._rng_key,
self._opt_state,
self._online_params,
self._target_params,
transitions,
weights,
)
)
chex.assert_equal_shape((weights, td_errors))
priorities = jnp.abs(td_errors)
priorities = jax.device_get(priorities)
max_priority = priorities.max()
self._max_seen_priority = np.max([self._max_seen_priority, max_priority])
self._replay.update_priorities(indices, priorities)
@property
def online_params(self) -> parts.NetworkParams:
"""Returns current parameters of Q-network."""
return self._online_params
@property
def statistics(self) -> Mapping[Text, float]:
"""Returns current agent statistics as a dictionary."""
# Check for DeviceArrays in values as this can be very slow.
assert all(
not isinstance(x, jnp.DeviceArray) for x in self._statistics.values()
)
return self._statistics
@property
def exploration_epsilon(self) -> float:
"""Returns epsilon value currently used by (eps-greedy) behavior policy."""
return self._exploration_epsilon(self._frame_t)
@property
def importance_sampling_exponent(self) -> float:
"""Returns current importance sampling exponent of prioritized replay."""
return self._replay.importance_sampling_exponent
@property
def max_seen_priority(self) -> float:
"""Returns maximum seen replay priority up until this time."""
return self._max_seen_priority
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves agent state as a dictionary (e.g. for serialization)."""
state = {
'rng_key': self._rng_key,
'frame_t': self._frame_t,
'opt_state': self._opt_state,
'online_params': self._online_params,
'target_params': self._target_params,
'replay': self._replay.get_state(),
'max_seen_priority': self._max_seen_priority,
}
return state
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets agent state from a (potentially de-serialized) dictionary."""
self._rng_key = state['rng_key']
self._frame_t = state['frame_t']
self._opt_state = jax.device_put(state['opt_state'])
self._online_params = jax.device_put(state['online_params'])
self._target_params = jax.device_put(state['target_params'])
self._replay.set_state(state['replay'])
self._max_seen_priority = state['max_seen_priority']
| dqn_zoo-master | dqn_zoo/prioritized/agent.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Prioritized DQN."""
# pylint: disable=g-bad-import-order
from absl import flags
from absl.testing import flagsaver
from jax.config import config
from dqn_zoo.prioritized import run_atari
from absl.testing import absltest
FLAGS = flags.FLAGS
class RunAtariTest(absltest.TestCase):
@flagsaver.flagsaver
def test_can_run_agent(self):
FLAGS.environment_name = 'pong'
FLAGS.replay_capacity = 1000
FLAGS.exploration_epsilon_decay_frame_fraction = 0.1
FLAGS.target_network_update_period = 12
FLAGS.num_train_frames = 100
FLAGS.num_eval_frames = 50
FLAGS.num_iterations = 3
FLAGS.batch_size = 10
FLAGS.learn_period = 2
run_atari.main(None)
if __name__ == '__main__':
config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| dqn_zoo-master | dqn_zoo/prioritized/run_atari_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Prioritized DQN agent training on Atari.
From the paper "Prioritized Experience Replay" http://arxiv.org/abs/1511.05952.
This is Double DQN with:
* Proportional prioritized sampling and importance sampling correction.
* Smaller learning rate, but with the same effective maximum learning rate
(controlled by the optimizer epsilon parameter).
"""
# pylint: disable=g-bad-import-order
import collections
import itertools
import sys
import typing
from absl import app
from absl import flags
from absl import logging
import chex
import dm_env
import haiku as hk
import jax
from jax.config import config
import numpy as np
import optax
from dqn_zoo import atari_data
from dqn_zoo import gym_atari
from dqn_zoo import networks
from dqn_zoo import parts
from dqn_zoo import processors
from dqn_zoo import replay as replay_lib
from dqn_zoo.prioritized import agent
# Relevant flag values are expressed in terms of environment frames.
FLAGS = flags.FLAGS
_ENVIRONMENT_NAME = flags.DEFINE_string('environment_name', 'pong', '')
_ENVIRONMENT_HEIGHT = flags.DEFINE_integer('environment_height', 84, '')
_ENVIRONMENT_WIDTH = flags.DEFINE_integer('environment_width', 84, '')
_REPLAY_CAPACITY = flags.DEFINE_integer('replay_capacity', int(1e6), '')
_COMPRESS_STATE = flags.DEFINE_bool('compress_state', True, '')
_MIN_REPLAY_CAPACITY_FRACTION = flags.DEFINE_float(
'min_replay_capacity_fraction', 0.05, ''
)
_BATCH_SIZE = flags.DEFINE_integer('batch_size', 32, '')
_MAX_FRAMES_PER_EPISODE = flags.DEFINE_integer(
'max_frames_per_episode', 108000, ''
) # 30 mins.
_NUM_ACTION_REPEATS = flags.DEFINE_integer('num_action_repeats', 4, '')
_NUM_STACKED_FRAMES = flags.DEFINE_integer('num_stacked_frames', 4, '')
_EXPLORATION_EPSILON_BEGIN_VALUE = flags.DEFINE_float(
'exploration_epsilon_begin_value', 1.0, ''
)
_EXPLORATION_EPSILON_END_VALUE = flags.DEFINE_float(
'exploration_epsilon_end_value', 0.01, ''
)
_EXPLORATION_EPSILON_DECAY_FRAME_FRACTION = flags.DEFINE_float(
'exploration_epsilon_decay_frame_fraction', 0.02, ''
)
_EVAL_EXPLORATION_EPSILON = flags.DEFINE_float(
'eval_exploration_epsilon', 0.01, ''
)
_TARGET_NETWORK_UPDATE_PERIOD = flags.DEFINE_integer(
'target_network_update_period', int(1.2e5), ''
)
_GRAD_ERROR_BOUND = flags.DEFINE_float('grad_error_bound', 1.0 / 32, '')
_LEARNING_RATE = flags.DEFINE_float('learning_rate', 0.00025 / 4, '')
_OPTIMIZER_EPSILON = flags.DEFINE_float(
'optimizer_epsilon', (0.01 / 32**2) * (1.0 / 4) ** 2, ''
)
_ADDITIONAL_DISCOUNT = flags.DEFINE_float('additional_discount', 0.99, '')
_MAX_ABS_REWARD = flags.DEFINE_float('max_abs_reward', 1.0, '')
_SEED = flags.DEFINE_integer('seed', 1, '') # GPU may introduce nondeterminism.
_NUM_ITERATIONS = flags.DEFINE_integer('num_iterations', 200, '')
_NUM_TRAIN_FRAMES = flags.DEFINE_integer(
'num_train_frames', int(1e6), ''
) # Per iteration.
_NUM_EVAL_FRAMES = flags.DEFINE_integer(
'num_eval_frames', int(5e5), ''
) # Per iteration.
_LEARN_PERIOD = flags.DEFINE_integer('learn_period', 16, '')
_RESULTS_CSV_PATH = flags.DEFINE_string(
'results_csv_path', '/tmp/results.csv', ''
)
_PRIORITY_EXPONENT = flags.DEFINE_float('priority_exponent', 0.6, '')
_IMPORTANCE_SAMPLING_EXPONENT_BEGIN_VALUE = flags.DEFINE_float(
'importance_sampling_exponent_begin_value', 0.4, ''
)
_IMPORTANCE_SAMPLING_EXPONENT_END_VALUE = flags.DEFINE_float(
'importance_sampling_exponent_end_value', 1.0, ''
)
_UNIFORM_SAMPLE_PROBABILITY = flags.DEFINE_float(
'uniform_sample_probability', 1e-3, ''
)
_NORMALIZE_WEIGHTS = flags.DEFINE_bool('normalize_weights', True, '')
def main(argv):
"""Trains Prioritized DQN agent on Atari."""
del argv
logging.info(
'Prioritized DQN on Atari on %s.',
jax.lib.xla_bridge.get_backend().platform,
)
random_state = np.random.RandomState(_SEED.value)
rng_key = jax.random.PRNGKey(
random_state.randint(-sys.maxsize - 1, sys.maxsize + 1, dtype=np.int64)
)
if _RESULTS_CSV_PATH.value:
writer = parts.CsvWriter(_RESULTS_CSV_PATH.value)
else:
writer = parts.NullWriter()
def environment_builder():
"""Creates Atari environment."""
env = gym_atari.GymAtari(
_ENVIRONMENT_NAME.value, seed=random_state.randint(1, 2**32)
)
return gym_atari.RandomNoopsEnvironmentWrapper(
env,
min_noop_steps=1,
max_noop_steps=30,
seed=random_state.randint(1, 2**32),
)
env = environment_builder()
logging.info('Environment: %s', _ENVIRONMENT_NAME.value)
logging.info('Action spec: %s', env.action_spec())
logging.info('Observation spec: %s', env.observation_spec())
num_actions = env.action_spec().num_values
network_fn = networks.double_dqn_atari_network(num_actions)
network = hk.transform(network_fn)
def preprocessor_builder():
return processors.atari(
additional_discount=_ADDITIONAL_DISCOUNT.value,
max_abs_reward=_MAX_ABS_REWARD.value,
resize_shape=(_ENVIRONMENT_HEIGHT.value, _ENVIRONMENT_WIDTH.value),
num_action_repeats=_NUM_ACTION_REPEATS.value,
num_pooled_frames=2,
zero_discount_on_life_loss=True,
num_stacked_frames=_NUM_STACKED_FRAMES.value,
grayscaling=True,
)
# Create sample network input from sample preprocessor output.
sample_processed_timestep = preprocessor_builder()(env.reset())
sample_processed_timestep = typing.cast(
dm_env.TimeStep, sample_processed_timestep
)
sample_network_input = sample_processed_timestep.observation
chex.assert_shape(
sample_network_input,
(
_ENVIRONMENT_HEIGHT.value,
_ENVIRONMENT_WIDTH.value,
_NUM_STACKED_FRAMES.value,
),
)
exploration_epsilon_schedule = parts.LinearSchedule(
begin_t=int(
_MIN_REPLAY_CAPACITY_FRACTION.value
* _REPLAY_CAPACITY.value
* _NUM_ACTION_REPEATS.value
),
decay_steps=int(
_EXPLORATION_EPSILON_DECAY_FRAME_FRACTION.value
* _NUM_ITERATIONS.value
* _NUM_TRAIN_FRAMES.value
),
begin_value=_EXPLORATION_EPSILON_BEGIN_VALUE.value,
end_value=_EXPLORATION_EPSILON_END_VALUE.value,
)
# Note the t in the replay is not exactly aligned with the agent t.
importance_sampling_exponent_schedule = parts.LinearSchedule(
begin_t=int(_MIN_REPLAY_CAPACITY_FRACTION.value * _REPLAY_CAPACITY.value),
end_t=(
_NUM_ITERATIONS.value
* int(_NUM_TRAIN_FRAMES.value / _NUM_ACTION_REPEATS.value)
),
begin_value=_IMPORTANCE_SAMPLING_EXPONENT_BEGIN_VALUE.value,
end_value=_IMPORTANCE_SAMPLING_EXPONENT_END_VALUE.value,
)
if _COMPRESS_STATE.value:
def encoder(transition):
return transition._replace(
s_tm1=replay_lib.compress_array(transition.s_tm1),
s_t=replay_lib.compress_array(transition.s_t),
)
def decoder(transition):
return transition._replace(
s_tm1=replay_lib.uncompress_array(transition.s_tm1),
s_t=replay_lib.uncompress_array(transition.s_t),
)
else:
encoder = None
decoder = None
replay_structure = replay_lib.Transition(
s_tm1=None,
a_tm1=None,
r_t=None,
discount_t=None,
s_t=None,
)
replay = replay_lib.PrioritizedTransitionReplay(
_REPLAY_CAPACITY.value,
replay_structure,
_PRIORITY_EXPONENT.value,
importance_sampling_exponent_schedule,
_UNIFORM_SAMPLE_PROBABILITY.value,
_NORMALIZE_WEIGHTS.value,
random_state,
encoder,
decoder,
)
optimizer = optax.rmsprop(
learning_rate=_LEARNING_RATE.value,
decay=0.95,
eps=_OPTIMIZER_EPSILON.value,
centered=True,
)
train_rng_key, eval_rng_key = jax.random.split(rng_key)
train_agent = agent.PrioritizedDqn(
preprocessor=preprocessor_builder(),
sample_network_input=sample_network_input,
network=network,
optimizer=optimizer,
transition_accumulator=replay_lib.TransitionAccumulator(),
replay=replay,
batch_size=_BATCH_SIZE.value,
exploration_epsilon=exploration_epsilon_schedule,
min_replay_capacity_fraction=_MIN_REPLAY_CAPACITY_FRACTION.value,
learn_period=_LEARN_PERIOD.value,
target_network_update_period=_TARGET_NETWORK_UPDATE_PERIOD.value,
grad_error_bound=_GRAD_ERROR_BOUND.value,
rng_key=train_rng_key,
)
eval_agent = parts.EpsilonGreedyActor(
preprocessor=preprocessor_builder(),
network=network,
exploration_epsilon=_EVAL_EXPLORATION_EPSILON.value,
rng_key=eval_rng_key,
)
# Set up checkpointing.
checkpoint = parts.NullCheckpoint()
state = checkpoint.state
state.iteration = 0
state.train_agent = train_agent
state.eval_agent = eval_agent
state.random_state = random_state
state.writer = writer
if checkpoint.can_be_restored():
checkpoint.restore()
while state.iteration <= _NUM_ITERATIONS.value:
# New environment for each iteration to allow for determinism if preempted.
env = environment_builder()
logging.info('Training iteration %d.', state.iteration)
train_seq = parts.run_loop(train_agent, env, _MAX_FRAMES_PER_EPISODE.value)
num_train_frames = 0 if state.iteration == 0 else _NUM_TRAIN_FRAMES.value
train_seq_truncated = itertools.islice(train_seq, num_train_frames)
train_trackers = parts.make_default_trackers(train_agent)
train_stats = parts.generate_statistics(train_trackers, train_seq_truncated)
logging.info('Evaluation iteration %d.', state.iteration)
eval_agent.network_params = train_agent.online_params
eval_seq = parts.run_loop(eval_agent, env, _MAX_FRAMES_PER_EPISODE.value)
eval_seq_truncated = itertools.islice(eval_seq, _NUM_EVAL_FRAMES.value)
eval_trackers = parts.make_default_trackers(eval_agent)
eval_stats = parts.generate_statistics(eval_trackers, eval_seq_truncated)
# Logging and checkpointing.
human_normalized_score = atari_data.get_human_normalized_score(
_ENVIRONMENT_NAME.value, eval_stats['episode_return']
)
capped_human_normalized_score = np.amin([1.0, human_normalized_score])
log_output = [
('iteration', state.iteration, '%3d'),
('frame', state.iteration * _NUM_TRAIN_FRAMES.value, '%5d'),
('eval_episode_return', eval_stats['episode_return'], '% 2.2f'),
('train_episode_return', train_stats['episode_return'], '% 2.2f'),
('eval_num_episodes', eval_stats['num_episodes'], '%3d'),
('train_num_episodes', train_stats['num_episodes'], '%3d'),
('eval_frame_rate', eval_stats['step_rate'], '%4.0f'),
('train_frame_rate', train_stats['step_rate'], '%4.0f'),
('train_exploration_epsilon', train_agent.exploration_epsilon, '%.3f'),
('train_state_value', train_stats['state_value'], '%.3f'),
(
'importance_sampling_exponent',
train_agent.importance_sampling_exponent,
'%.3f',
),
('max_seen_priority', train_agent.max_seen_priority, '%.3f'),
('normalized_return', human_normalized_score, '%.3f'),
('capped_normalized_return', capped_human_normalized_score, '%.3f'),
('human_gap', 1.0 - capped_human_normalized_score, '%.3f'),
]
log_output_str = ', '.join(('%s: ' + f) % (n, v) for n, v, f in log_output)
logging.info(log_output_str)
writer.write(collections.OrderedDict((n, v) for n, v, _ in log_output))
state.iteration += 1
checkpoint.save()
writer.close()
if __name__ == '__main__':
config.update('jax_platform_name', 'gpu') # Default to GPU.
config.update('jax_numpy_rank_promotion', 'raise')
config.config_with_absl()
app.run(main)
| dqn_zoo-master | dqn_zoo/prioritized/run_atari.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DQN agent class."""
# pylint: disable=g-bad-import-order
from typing import Any, Callable, Mapping, Text
from absl import logging
import chex
import distrax
import dm_env
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
from dqn_zoo import parts
from dqn_zoo import processors
from dqn_zoo import replay as replay_lib
# Batch variant of q_learning.
_batch_q_learning = jax.vmap(rlax.q_learning)
class Dqn(parts.Agent):
"""Deep Q-Network agent."""
def __init__(
self,
preprocessor: processors.Processor,
sample_network_input: jnp.ndarray,
network: parts.Network,
optimizer: optax.GradientTransformation,
transition_accumulator: Any,
replay: replay_lib.TransitionReplay,
batch_size: int,
exploration_epsilon: Callable[[int], float],
min_replay_capacity_fraction: float,
learn_period: int,
target_network_update_period: int,
grad_error_bound: float,
rng_key: parts.PRNGKey,
):
self._preprocessor = preprocessor
self._replay = replay
self._transition_accumulator = transition_accumulator
self._batch_size = batch_size
self._exploration_epsilon = exploration_epsilon
self._min_replay_capacity = min_replay_capacity_fraction * replay.capacity
self._learn_period = learn_period
self._target_network_update_period = target_network_update_period
# Initialize network parameters and optimizer.
self._rng_key, network_rng_key = jax.random.split(rng_key)
self._online_params = network.init(
network_rng_key, sample_network_input[None, ...]
)
self._target_params = self._online_params
self._opt_state = optimizer.init(self._online_params)
# Other agent state: last action, frame count, etc.
self._action = None
self._frame_t = -1 # Current frame index.
self._statistics = {'state_value': np.nan}
# Define jitted loss, update, and policy functions here instead of as
# class methods, to emphasize that these are meant to be pure functions
# and should not access the agent object's state via `self`.
def loss_fn(online_params, target_params, transitions, rng_key):
"""Calculates loss given network parameters and transitions."""
_, online_key, target_key = jax.random.split(rng_key, 3)
q_tm1 = network.apply(
online_params, online_key, transitions.s_tm1
).q_values
q_target_t = network.apply(
target_params, target_key, transitions.s_t
).q_values
td_errors = _batch_q_learning(
q_tm1,
transitions.a_tm1,
transitions.r_t,
transitions.discount_t,
q_target_t,
)
td_errors = rlax.clip_gradient(
td_errors, -grad_error_bound, grad_error_bound
)
losses = rlax.l2_loss(td_errors)
chex.assert_shape(losses, (self._batch_size,))
loss = jnp.mean(losses)
return loss
def update(rng_key, opt_state, online_params, target_params, transitions):
"""Computes learning update from batch of replay transitions."""
rng_key, update_key = jax.random.split(rng_key)
d_loss_d_params = jax.grad(loss_fn)(
online_params, target_params, transitions, update_key
)
updates, new_opt_state = optimizer.update(d_loss_d_params, opt_state)
new_online_params = optax.apply_updates(online_params, updates)
return rng_key, new_opt_state, new_online_params
self._update = jax.jit(update)
def select_action(rng_key, network_params, s_t, exploration_epsilon):
"""Samples action from eps-greedy policy wrt Q-values at given state."""
rng_key, apply_key, policy_key = jax.random.split(rng_key, 3)
q_t = network.apply(network_params, apply_key, s_t[None, ...]).q_values[0]
a_t = distrax.EpsilonGreedy(q_t, exploration_epsilon).sample(
seed=policy_key
)
v_t = jnp.max(q_t, axis=-1)
return rng_key, a_t, v_t
self._select_action = jax.jit(select_action)
def step(self, timestep: dm_env.TimeStep) -> parts.Action:
"""Selects action given timestep and potentially learns."""
self._frame_t += 1
timestep = self._preprocessor(timestep)
if timestep is None: # Repeat action.
action = self._action
else:
action = self._action = self._act(timestep)
for transition in self._transition_accumulator.step(timestep, action):
self._replay.add(transition)
if self._replay.size < self._min_replay_capacity:
return action
if self._frame_t % self._learn_period == 0:
self._learn()
if self._frame_t % self._target_network_update_period == 0:
self._target_params = self._online_params
return action
def reset(self) -> None:
"""Resets the agent's episodic state such as frame stack and action repeat.
This method should be called at the beginning of every episode.
"""
self._transition_accumulator.reset()
processors.reset(self._preprocessor)
self._action = None
def _act(self, timestep) -> parts.Action:
"""Selects action given timestep, according to epsilon-greedy policy."""
s_t = timestep.observation
self._rng_key, a_t, v_t = self._select_action(
self._rng_key, self._online_params, s_t, self.exploration_epsilon
)
a_t, v_t = jax.device_get((a_t, v_t))
self._statistics['state_value'] = v_t
return parts.Action(a_t)
def _learn(self) -> None:
"""Samples a batch of transitions from replay and learns from it."""
logging.log_first_n(logging.INFO, 'Begin learning', 1)
transitions = self._replay.sample(self._batch_size)
self._rng_key, self._opt_state, self._online_params = self._update(
self._rng_key,
self._opt_state,
self._online_params,
self._target_params,
transitions,
)
@property
def online_params(self) -> parts.NetworkParams:
"""Returns current parameters of Q-network."""
return self._online_params
@property
def statistics(self) -> Mapping[Text, float]:
"""Returns current agent statistics as a dictionary."""
# Check for DeviceArrays in values as this can be very slow.
assert all(
not isinstance(x, jnp.DeviceArray) for x in self._statistics.values()
)
return self._statistics
@property
def exploration_epsilon(self) -> float:
"""Returns epsilon value currently used by (eps-greedy) behavior policy."""
return self._exploration_epsilon(self._frame_t)
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves agent state as a dictionary (e.g. for serialization)."""
state = {
'rng_key': self._rng_key,
'frame_t': self._frame_t,
'opt_state': self._opt_state,
'online_params': self._online_params,
'target_params': self._target_params,
'replay': self._replay.get_state(),
}
return state
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets agent state from a (potentially de-serialized) dictionary."""
self._rng_key = state['rng_key']
self._frame_t = state['frame_t']
self._opt_state = jax.device_put(state['opt_state'])
self._online_params = jax.device_put(state['online_params'])
self._target_params = jax.device_put(state['target_params'])
self._replay.set_state(state['replay'])
| dqn_zoo-master | dqn_zoo/dqn/agent.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DQN."""
# pylint: disable=g-bad-import-order
from absl import flags
from absl.testing import flagsaver
from jax.config import config
from dqn_zoo.dqn import run_atari
from absl.testing import absltest
FLAGS = flags.FLAGS
class RunAtariTest(absltest.TestCase):
@flagsaver.flagsaver
def test_can_run_agent(self):
FLAGS.environment_name = 'pong'
FLAGS.replay_capacity = 1000
FLAGS.exploration_epsilon_decay_frame_fraction = 0.1
FLAGS.target_network_update_period = 4
FLAGS.num_train_frames = 100
FLAGS.num_eval_frames = 50
FLAGS.num_iterations = 2
FLAGS.batch_size = 10
FLAGS.learn_period = 2
run_atari.main(None)
if __name__ == '__main__':
config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| dqn_zoo-master | dqn_zoo/dqn/run_atari_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A DQN agent training on Atari.
From the paper "Human Level Control Through Deep Reinforcement Learning"
http://www.nature.com/articles/nature14236.
"""
# pylint: disable=g-bad-import-order
import collections
import itertools
import sys
import typing
from absl import app
from absl import flags
from absl import logging
import chex
import dm_env
import haiku as hk
import jax
from jax.config import config
import numpy as np
import optax
from dqn_zoo import atari_data
from dqn_zoo import gym_atari
from dqn_zoo import networks
from dqn_zoo import parts
from dqn_zoo import processors
from dqn_zoo import replay as replay_lib
from dqn_zoo.dqn import agent
# Relevant flag values are expressed in terms of environment frames.
FLAGS = flags.FLAGS
_ENVIRONMENT_NAME = flags.DEFINE_string('environment_name', 'pong', '')
_ENVIRONMENT_HEIGHT = flags.DEFINE_integer('environment_height', 84, '')
_ENVIRONMENT_WIDTH = flags.DEFINE_integer('environment_width', 84, '')
_REPLAY_CAPACITY = flags.DEFINE_integer('replay_capacity', int(1e6), '')
_COMPRESS_STATE = flags.DEFINE_bool('compress_state', True, '')
_MIN_REPLAY_CAPACITY_FRACTION = flags.DEFINE_float(
'min_replay_capacity_fraction', 0.05, ''
)
_BATCH_SIZE = flags.DEFINE_integer('batch_size', 32, '')
_MAX_FRAMES_PER_EPISODE = flags.DEFINE_integer(
'max_frames_per_episode', 108000, ''
) # 30 mins.
_NUM_ACTION_REPEATS = flags.DEFINE_integer('num_action_repeats', 4, '')
_NUM_STACKED_FRAMES = flags.DEFINE_integer('num_stacked_frames', 4, '')
_EXPLORATION_EPSILON_BEGIN_VALUE = flags.DEFINE_float(
'exploration_epsilon_begin_value', 1.0, ''
)
_EXPLORATION_EPSILON_END_VALUE = flags.DEFINE_float(
'exploration_epsilon_end_value', 0.1, ''
)
_EXPLORATION_EPSILON_DECAY_FRAME_FRACTION = flags.DEFINE_float(
'exploration_epsilon_decay_frame_fraction', 0.02, ''
)
_EVAL_EXPLORATION_EPSILON = flags.DEFINE_float(
'eval_exploration_epsilon', 0.05, ''
)
_TARGET_NETWORK_UPDATE_PERIOD = flags.DEFINE_integer(
'target_network_update_period', int(4e4), ''
)
_GRAD_ERROR_BOUND = flags.DEFINE_float('grad_error_bound', 1.0 / 32, '')
_LEARNING_RATE = flags.DEFINE_float('learning_rate', 0.00025, '')
_OPTIMIZER_EPSILON = flags.DEFINE_float('optimizer_epsilon', 0.01 / 32**2, '')
_ADDITIONAL_DISCOUNT = flags.DEFINE_float('additional_discount', 0.99, '')
_MAX_ABS_REWARD = flags.DEFINE_float('max_abs_reward', 1.0, '')
_SEED = flags.DEFINE_integer('seed', 1, '') # GPU may introduce nondeterminism.
_NUM_ITERATIONS = flags.DEFINE_integer('num_iterations', 200, '')
_NUM_TRAIN_FRAMES = flags.DEFINE_integer(
'num_train_frames', int(1e6), ''
) # Per iteration.
_NUM_EVAL_FRAMES = flags.DEFINE_integer(
'num_eval_frames', int(5e5), ''
) # Per iteration.
_LEARN_PERIOD = flags.DEFINE_integer('learn_period', 16, '')
_RESULTS_CSV_PATH = flags.DEFINE_string(
'results_csv_path', '/tmp/results.csv', ''
)
def main(argv):
"""Trains DQN agent on Atari."""
del argv
logging.info('DQN on Atari on %s.', jax.lib.xla_bridge.get_backend().platform)
random_state = np.random.RandomState(_SEED.value)
rng_key = jax.random.PRNGKey(
random_state.randint(-sys.maxsize - 1, sys.maxsize + 1, dtype=np.int64)
)
if _RESULTS_CSV_PATH.value:
writer = parts.CsvWriter(_RESULTS_CSV_PATH.value)
else:
writer = parts.NullWriter()
def environment_builder():
"""Creates Atari environment."""
env = gym_atari.GymAtari(
_ENVIRONMENT_NAME.value, seed=random_state.randint(1, 2**32)
)
return gym_atari.RandomNoopsEnvironmentWrapper(
env,
min_noop_steps=1,
max_noop_steps=30,
seed=random_state.randint(1, 2**32),
)
env = environment_builder()
logging.info('Environment: %s', _ENVIRONMENT_NAME.value)
logging.info('Action spec: %s', env.action_spec())
logging.info('Observation spec: %s', env.observation_spec())
num_actions = env.action_spec().num_values
network_fn = networks.dqn_atari_network(num_actions)
network = hk.transform(network_fn)
def preprocessor_builder():
return processors.atari(
additional_discount=_ADDITIONAL_DISCOUNT.value,
max_abs_reward=_MAX_ABS_REWARD.value,
resize_shape=(_ENVIRONMENT_HEIGHT.value, _ENVIRONMENT_WIDTH.value),
num_action_repeats=_NUM_ACTION_REPEATS.value,
num_pooled_frames=2,
zero_discount_on_life_loss=True,
num_stacked_frames=_NUM_STACKED_FRAMES.value,
grayscaling=True,
)
# Create sample network input from sample preprocessor output.
sample_processed_timestep = preprocessor_builder()(env.reset())
sample_processed_timestep = typing.cast(
dm_env.TimeStep, sample_processed_timestep
)
sample_network_input = sample_processed_timestep.observation
chex.assert_shape(
sample_network_input,
(
_ENVIRONMENT_HEIGHT.value,
_ENVIRONMENT_WIDTH.value,
_NUM_STACKED_FRAMES.value,
),
)
exploration_epsilon_schedule = parts.LinearSchedule(
begin_t=int(
_MIN_REPLAY_CAPACITY_FRACTION.value
* _REPLAY_CAPACITY.value
* _NUM_ACTION_REPEATS.value
),
decay_steps=int(
_EXPLORATION_EPSILON_DECAY_FRAME_FRACTION.value
* _NUM_ITERATIONS.value
* _NUM_TRAIN_FRAMES.value
),
begin_value=_EXPLORATION_EPSILON_BEGIN_VALUE.value,
end_value=_EXPLORATION_EPSILON_END_VALUE.value,
)
if _COMPRESS_STATE.value:
def encoder(transition):
return transition._replace(
s_tm1=replay_lib.compress_array(transition.s_tm1),
s_t=replay_lib.compress_array(transition.s_t),
)
def decoder(transition):
return transition._replace(
s_tm1=replay_lib.uncompress_array(transition.s_tm1),
s_t=replay_lib.uncompress_array(transition.s_t),
)
else:
encoder = None
decoder = None
replay_structure = replay_lib.Transition(
s_tm1=None,
a_tm1=None,
r_t=None,
discount_t=None,
s_t=None,
)
replay = replay_lib.TransitionReplay(
_REPLAY_CAPACITY.value, replay_structure, random_state, encoder, decoder
)
optimizer = optax.rmsprop(
learning_rate=_LEARNING_RATE.value,
decay=0.95,
eps=_OPTIMIZER_EPSILON.value,
centered=True,
)
train_rng_key, eval_rng_key = jax.random.split(rng_key)
train_agent = agent.Dqn(
preprocessor=preprocessor_builder(),
sample_network_input=sample_network_input,
network=network,
optimizer=optimizer,
transition_accumulator=replay_lib.TransitionAccumulator(),
replay=replay,
batch_size=_BATCH_SIZE.value,
exploration_epsilon=exploration_epsilon_schedule,
min_replay_capacity_fraction=_MIN_REPLAY_CAPACITY_FRACTION.value,
learn_period=_LEARN_PERIOD.value,
target_network_update_period=_TARGET_NETWORK_UPDATE_PERIOD.value,
grad_error_bound=_GRAD_ERROR_BOUND.value,
rng_key=train_rng_key,
)
eval_agent = parts.EpsilonGreedyActor(
preprocessor=preprocessor_builder(),
network=network,
exploration_epsilon=_EVAL_EXPLORATION_EPSILON.value,
rng_key=eval_rng_key,
)
# Set up checkpointing.
checkpoint = parts.NullCheckpoint()
state = checkpoint.state
state.iteration = 0
state.train_agent = train_agent
state.eval_agent = eval_agent
state.random_state = random_state
state.writer = writer
if checkpoint.can_be_restored():
checkpoint.restore()
while state.iteration <= _NUM_ITERATIONS.value:
# New environment for each iteration to allow for determinism if preempted.
env = environment_builder()
logging.info('Training iteration %d.', state.iteration)
train_seq = parts.run_loop(train_agent, env, _MAX_FRAMES_PER_EPISODE.value)
num_train_frames = 0 if state.iteration == 0 else _NUM_TRAIN_FRAMES.value
train_seq_truncated = itertools.islice(train_seq, num_train_frames)
train_trackers = parts.make_default_trackers(train_agent)
train_stats = parts.generate_statistics(train_trackers, train_seq_truncated)
logging.info('Evaluation iteration %d.', state.iteration)
eval_agent.network_params = train_agent.online_params
eval_seq = parts.run_loop(eval_agent, env, _MAX_FRAMES_PER_EPISODE.value)
eval_seq_truncated = itertools.islice(eval_seq, _NUM_EVAL_FRAMES.value)
eval_trackers = parts.make_default_trackers(eval_agent)
eval_stats = parts.generate_statistics(eval_trackers, eval_seq_truncated)
# Logging and checkpointing.
human_normalized_score = atari_data.get_human_normalized_score(
_ENVIRONMENT_NAME.value, eval_stats['episode_return']
)
capped_human_normalized_score = np.amin([1.0, human_normalized_score])
log_output = [
('iteration', state.iteration, '%3d'),
('frame', state.iteration * _NUM_TRAIN_FRAMES.value, '%5d'),
('eval_episode_return', eval_stats['episode_return'], '% 2.2f'),
('train_episode_return', train_stats['episode_return'], '% 2.2f'),
('eval_num_episodes', eval_stats['num_episodes'], '%3d'),
('train_num_episodes', train_stats['num_episodes'], '%3d'),
('eval_frame_rate', eval_stats['step_rate'], '%4.0f'),
('train_frame_rate', train_stats['step_rate'], '%4.0f'),
('train_exploration_epsilon', train_agent.exploration_epsilon, '%.3f'),
('train_state_value', train_stats['state_value'], '%.3f'),
('normalized_return', human_normalized_score, '%.3f'),
('capped_normalized_return', capped_human_normalized_score, '%.3f'),
('human_gap', 1.0 - capped_human_normalized_score, '%.3f'),
]
log_output_str = ', '.join(('%s: ' + f) % (n, v) for n, v, f in log_output)
logging.info(log_output_str)
writer.write(collections.OrderedDict((n, v) for n, v, _ in log_output))
state.iteration += 1
checkpoint.save()
writer.close()
if __name__ == '__main__':
config.update('jax_platform_name', 'gpu') # Default to GPU.
config.update('jax_numpy_rank_promotion', 'raise')
config.config_with_absl()
app.run(main)
| dqn_zoo-master | dqn_zoo/dqn/run_atari.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""C51 agent class."""
# pylint: disable=g-bad-import-order
from typing import Any, Callable, Mapping, Text
from absl import logging
import chex
import distrax
import dm_env
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
from dqn_zoo import parts
from dqn_zoo import processors
from dqn_zoo import replay as replay_lib
# Batch variant of categorical_q_learning with fixed atoms across batch.
_batch_categorical_q_learning = jax.vmap(
rlax.categorical_q_learning, in_axes=(None, 0, 0, 0, 0, None, 0)
)
class C51(parts.Agent):
"""C51 agent."""
def __init__(
self,
preprocessor: processors.Processor,
sample_network_input: jnp.ndarray,
network: parts.Network,
support: jnp.ndarray,
optimizer: optax.GradientTransformation,
transition_accumulator: Any,
replay: replay_lib.TransitionReplay,
batch_size: int,
exploration_epsilon: Callable[[int], float],
min_replay_capacity_fraction: float,
learn_period: int,
target_network_update_period: int,
rng_key: parts.PRNGKey,
):
self._preprocessor = preprocessor
self._replay = replay
self._transition_accumulator = transition_accumulator
self._batch_size = batch_size
self._exploration_epsilon = exploration_epsilon
self._min_replay_capacity = min_replay_capacity_fraction * replay.capacity
self._learn_period = learn_period
self._target_network_update_period = target_network_update_period
# Initialize network parameters and optimizer.
self._rng_key, network_rng_key = jax.random.split(rng_key)
self._online_params = network.init(
network_rng_key, sample_network_input[None, ...]
)
self._target_params = self._online_params
self._opt_state = optimizer.init(self._online_params)
# Other agent state: last action, frame count, etc.
self._action = None
self._frame_t = -1 # Current frame index.
self._statistics = {'state_value': np.nan}
# Define jitted loss, update, and policy functions here instead of as
# class methods, to emphasize that these are meant to be pure functions
# and should not access the agent object's state via `self`.
def loss_fn(online_params, target_params, transitions, rng_key):
"""Calculates loss given network parameters and transitions."""
_, online_key, target_key = jax.random.split(rng_key, 3)
logits_q_tm1 = network.apply(
online_params, online_key, transitions.s_tm1
).q_logits
logits_target_q_t = network.apply(
target_params, target_key, transitions.s_t
).q_logits
losses = _batch_categorical_q_learning(
support,
logits_q_tm1,
transitions.a_tm1,
transitions.r_t,
transitions.discount_t,
support,
logits_target_q_t,
)
chex.assert_shape(losses, (self._batch_size,))
loss = jnp.mean(losses)
return loss
def update(rng_key, opt_state, online_params, target_params, transitions):
"""Computes learning update from batch of replay transitions."""
rng_key, update_key = jax.random.split(rng_key)
d_loss_d_params = jax.grad(loss_fn)(
online_params, target_params, transitions, update_key
)
updates, new_opt_state = optimizer.update(d_loss_d_params, opt_state)
new_online_params = optax.apply_updates(online_params, updates)
return rng_key, new_opt_state, new_online_params
self._update = jax.jit(update)
def select_action(rng_key, network_params, s_t, exploration_epsilon):
"""Samples action from eps-greedy policy wrt Q-values at given state."""
rng_key, apply_key, policy_key = jax.random.split(rng_key, 3)
q_t = network.apply(network_params, apply_key, s_t[None, ...]).q_values[0]
a_t = distrax.EpsilonGreedy(q_t, exploration_epsilon).sample(
seed=policy_key
)
v_t = jnp.max(q_t, axis=-1)
return rng_key, a_t, v_t
self._select_action = jax.jit(select_action)
def step(self, timestep: dm_env.TimeStep) -> parts.Action:
"""Selects action given timestep and potentially learns."""
self._frame_t += 1
timestep = self._preprocessor(timestep)
if timestep is None: # Repeat action.
action = self._action
else:
action = self._action = self._act(timestep)
for transition in self._transition_accumulator.step(timestep, action):
self._replay.add(transition)
if self._replay.size < self._min_replay_capacity:
return action
if self._frame_t % self._learn_period == 0:
self._learn()
if self._frame_t % self._target_network_update_period == 0:
self._target_params = self._online_params
return action
def reset(self) -> None:
"""Resets the agent's episodic state such as frame stack and action repeat.
This method should be called at the beginning of every episode.
"""
self._transition_accumulator.reset()
processors.reset(self._preprocessor)
self._action = None
def _act(self, timestep) -> parts.Action:
"""Selects action given timestep, according to epsilon-greedy policy."""
s_t = timestep.observation
self._rng_key, a_t, v_t = self._select_action(
self._rng_key, self._online_params, s_t, self.exploration_epsilon
)
a_t, v_t = jax.device_get((a_t, v_t))
self._statistics['state_value'] = v_t
return parts.Action(a_t)
def _learn(self) -> None:
"""Samples a batch of transitions from replay and learns from it."""
logging.log_first_n(logging.INFO, 'Begin learning', 1)
transitions = self._replay.sample(self._batch_size)
self._rng_key, self._opt_state, self._online_params = self._update(
self._rng_key,
self._opt_state,
self._online_params,
self._target_params,
transitions,
)
@property
def online_params(self) -> parts.NetworkParams:
"""Returns current parameters of Q-network."""
return self._online_params
@property
def statistics(self) -> Mapping[Text, float]:
"""Returns current agent statistics as a dictionary."""
# Check for DeviceArrays in values as this can be very slow.
assert all(
not isinstance(x, jnp.DeviceArray) for x in self._statistics.values()
)
return self._statistics
@property
def exploration_epsilon(self) -> float:
"""Returns epsilon value currently used by (eps-greedy) behavior policy."""
return self._exploration_epsilon(self._frame_t)
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves agent state as a dictionary (e.g. for serialization)."""
state = {
'rng_key': self._rng_key,
'frame_t': self._frame_t,
'opt_state': self._opt_state,
'online_params': self._online_params,
'target_params': self._target_params,
'replay': self._replay.get_state(),
}
return state
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets agent state from a (potentially de-serialized) dictionary."""
self._rng_key = state['rng_key']
self._frame_t = state['frame_t']
self._opt_state = jax.device_put(state['opt_state'])
self._online_params = jax.device_put(state['online_params'])
self._target_params = jax.device_put(state['target_params'])
self._replay.set_state(state['replay'])
| dqn_zoo-master | dqn_zoo/c51/agent.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for C51."""
# pylint: disable=g-bad-import-order
from absl import flags
from absl.testing import flagsaver
from jax.config import config
from dqn_zoo.c51 import run_atari
from absl.testing import absltest
FLAGS = flags.FLAGS
class RunAtariTest(absltest.TestCase):
@flagsaver.flagsaver
def test_can_run_agent(self):
FLAGS.environment_name = 'pong'
FLAGS.replay_capacity = 1000
FLAGS.exploration_epsilon_decay_frame_fraction = 0.1
FLAGS.target_network_update_period = 4
FLAGS.num_train_frames = 100
FLAGS.num_eval_frames = 50
FLAGS.num_iterations = 2
FLAGS.batch_size = 10
FLAGS.learn_period = 2
run_atari.main(None)
if __name__ == '__main__':
config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| dqn_zoo-master | dqn_zoo/c51/run_atari_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A C51 agent training on Atari.
From the paper "A Distributional Perspective on Reinforcement Learning"
http://arxiv.org/abs/1707.06887.
"""
# pylint: disable=g-bad-import-order
import collections
import itertools
import sys
import typing
from absl import app
from absl import flags
from absl import logging
import chex
import dm_env
import haiku as hk
import jax
from jax.config import config
import jax.numpy as jnp
import numpy as np
import optax
from dqn_zoo import atari_data
from dqn_zoo import gym_atari
from dqn_zoo import networks
from dqn_zoo import parts
from dqn_zoo import processors
from dqn_zoo import replay as replay_lib
from dqn_zoo.c51 import agent
# Relevant flag values are expressed in terms of environment frames.
FLAGS = flags.FLAGS
_ENVIRONMENT_NAME = flags.DEFINE_string('environment_name', 'pong', '')
_ENVIRONMENT_HEIGHT = flags.DEFINE_integer('environment_height', 84, '')
_ENVIRONMENT_WIDTH = flags.DEFINE_integer('environment_width', 84, '')
_REPLAY_CAPACITY = flags.DEFINE_integer('replay_capacity', int(1e6), '')
_COMPRESS_STATE = flags.DEFINE_bool('compress_state', True, '')
_MIN_REPLAY_CAPACITY_FRACTION = flags.DEFINE_float(
'min_replay_capacity_fraction', 0.05, ''
)
_BATCH_SIZE = flags.DEFINE_integer('batch_size', 32, '')
_MAX_FRAMES_PER_EPISODE = flags.DEFINE_integer(
'max_frames_per_episode', 108000, ''
) # 30 mins.
_NUM_ACTION_REPEATS = flags.DEFINE_integer('num_action_repeats', 4, '')
_NUM_STACKED_FRAMES = flags.DEFINE_integer('num_stacked_frames', 4, '')
_EXPLORATION_EPSILON_BEGIN_VALUE = flags.DEFINE_float(
'exploration_epsilon_begin_value', 1.0, ''
)
_EXPLORATION_EPSILON_END_VALUE = flags.DEFINE_float(
'exploration_epsilon_end_value', 0.01, ''
)
_EXPLORATION_EPSILON_DECAY_FRAME_FRACTION = flags.DEFINE_float(
'exploration_epsilon_decay_frame_fraction', 0.02, ''
)
_EVAL_EXPLORATION_EPSILON = flags.DEFINE_float(
'eval_exploration_epsilon', 0.001, ''
)
_TARGET_NETWORK_UPDATE_PERIOD = flags.DEFINE_integer(
'target_network_update_period', int(4e4), ''
)
_LEARNING_RATE = flags.DEFINE_float('learning_rate', 0.00025, '')
_OPTIMIZER_EPSILON = flags.DEFINE_float('optimizer_epsilon', 0.01 / 32, '')
_ADDITIONAL_DISCOUNT = flags.DEFINE_float('additional_discount', 0.99, '')
_MAX_ABS_REWARD = flags.DEFINE_float('max_abs_reward', 1.0, '')
_MAX_GLOBAL_GRAD_NORM = flags.DEFINE_float('max_global_grad_norm', 10.0, '')
_SEED = flags.DEFINE_integer('seed', 1, '') # GPU may introduce nondeterminism.
_NUM_ITERATIONS = flags.DEFINE_integer('num_iterations', 200, '')
_NUM_TRAIN_FRAMES = flags.DEFINE_integer(
'num_train_frames', int(1e6), ''
) # Per iteration.
_NUM_EVAL_FRAMES = flags.DEFINE_integer(
'num_eval_frames', int(5e5), ''
) # Per iteration.
_LEARN_PERIOD = flags.DEFINE_integer('learn_period', 16, '')
_RESULTS_CSV_PATH = flags.DEFINE_string(
'results_csv_path', '/tmp/results.csv', ''
)
_VMAX = flags.DEFINE_float('vmax', 10.0, '')
_NUM_ATOMS = flags.DEFINE_integer('num_atoms', 51, '')
def main(argv):
"""Trains C51 agent on Atari."""
del argv
logging.info('C51 on Atari on %s.', jax.lib.xla_bridge.get_backend().platform)
random_state = np.random.RandomState(_SEED.value)
rng_key = jax.random.PRNGKey(
random_state.randint(-sys.maxsize - 1, sys.maxsize + 1, dtype=np.int64)
)
if _RESULTS_CSV_PATH.value:
writer = parts.CsvWriter(_RESULTS_CSV_PATH.value)
else:
writer = parts.NullWriter()
def environment_builder():
"""Creates Atari environment."""
env = gym_atari.GymAtari(
_ENVIRONMENT_NAME.value, seed=random_state.randint(1, 2**32)
)
return gym_atari.RandomNoopsEnvironmentWrapper(
env,
min_noop_steps=1,
max_noop_steps=30,
seed=random_state.randint(1, 2**32),
)
env = environment_builder()
logging.info('Environment: %s', _ENVIRONMENT_NAME.value)
logging.info('Action spec: %s', env.action_spec())
logging.info('Observation spec: %s', env.observation_spec())
num_actions = env.action_spec().num_values
support = jnp.linspace(-_VMAX.value, _VMAX.value, _NUM_ATOMS.value)
network_fn = networks.c51_atari_network(num_actions, support)
network = hk.transform(network_fn)
def preprocessor_builder():
return processors.atari(
additional_discount=_ADDITIONAL_DISCOUNT.value,
max_abs_reward=_MAX_ABS_REWARD.value,
resize_shape=(_ENVIRONMENT_HEIGHT.value, _ENVIRONMENT_WIDTH.value),
num_action_repeats=_NUM_ACTION_REPEATS.value,
num_pooled_frames=2,
zero_discount_on_life_loss=True,
num_stacked_frames=_NUM_STACKED_FRAMES.value,
grayscaling=True,
)
# Create sample network input from sample preprocessor output.
sample_processed_timestep = preprocessor_builder()(env.reset())
sample_processed_timestep = typing.cast(
dm_env.TimeStep, sample_processed_timestep
)
sample_network_input = sample_processed_timestep.observation
chex.assert_shape(
sample_network_input,
(
_ENVIRONMENT_HEIGHT.value,
_ENVIRONMENT_WIDTH.value,
_NUM_STACKED_FRAMES.value,
),
)
exploration_epsilon_schedule = parts.LinearSchedule(
begin_t=int(
_MIN_REPLAY_CAPACITY_FRACTION.value
* _REPLAY_CAPACITY.value
* _NUM_ACTION_REPEATS.value
),
decay_steps=int(
_EXPLORATION_EPSILON_DECAY_FRAME_FRACTION.value
* _NUM_ITERATIONS.value
* _NUM_TRAIN_FRAMES.value
),
begin_value=_EXPLORATION_EPSILON_BEGIN_VALUE.value,
end_value=_EXPLORATION_EPSILON_END_VALUE.value,
)
if _COMPRESS_STATE.value:
def encoder(transition):
return transition._replace(
s_tm1=replay_lib.compress_array(transition.s_tm1),
s_t=replay_lib.compress_array(transition.s_t),
)
def decoder(transition):
return transition._replace(
s_tm1=replay_lib.uncompress_array(transition.s_tm1),
s_t=replay_lib.uncompress_array(transition.s_t),
)
else:
encoder = None
decoder = None
replay_structure = replay_lib.Transition(
s_tm1=None,
a_tm1=None,
r_t=None,
discount_t=None,
s_t=None,
)
replay = replay_lib.TransitionReplay(
_REPLAY_CAPACITY.value, replay_structure, random_state, encoder, decoder
)
optimizer = optax.adam(
learning_rate=_LEARNING_RATE.value, eps=_OPTIMIZER_EPSILON.value
)
if _MAX_GLOBAL_GRAD_NORM.value > 0:
optimizer = optax.chain(
optax.clip_by_global_norm(_MAX_GLOBAL_GRAD_NORM.value), optimizer
)
train_rng_key, eval_rng_key = jax.random.split(rng_key)
train_agent = agent.C51(
preprocessor=preprocessor_builder(),
sample_network_input=sample_network_input,
network=network,
support=support,
optimizer=optimizer,
transition_accumulator=replay_lib.TransitionAccumulator(),
replay=replay,
batch_size=_BATCH_SIZE.value,
exploration_epsilon=exploration_epsilon_schedule,
min_replay_capacity_fraction=_MIN_REPLAY_CAPACITY_FRACTION.value,
learn_period=_LEARN_PERIOD.value,
target_network_update_period=_TARGET_NETWORK_UPDATE_PERIOD.value,
rng_key=train_rng_key,
)
eval_agent = parts.EpsilonGreedyActor(
preprocessor=preprocessor_builder(),
network=network,
exploration_epsilon=_EVAL_EXPLORATION_EPSILON.value,
rng_key=eval_rng_key,
)
# Set up checkpointing.
checkpoint = parts.NullCheckpoint()
state = checkpoint.state
state.iteration = 0
state.train_agent = train_agent
state.eval_agent = eval_agent
state.random_state = random_state
state.writer = writer
if checkpoint.can_be_restored():
checkpoint.restore()
while state.iteration <= _NUM_ITERATIONS.value:
# New environment for each iteration to allow for determinism if preempted.
env = environment_builder()
logging.info('Training iteration %d.', state.iteration)
train_seq = parts.run_loop(train_agent, env, _MAX_FRAMES_PER_EPISODE.value)
num_train_frames = 0 if state.iteration == 0 else _NUM_TRAIN_FRAMES.value
train_seq_truncated = itertools.islice(train_seq, num_train_frames)
train_trackers = parts.make_default_trackers(train_agent)
train_stats = parts.generate_statistics(train_trackers, train_seq_truncated)
logging.info('Evaluation iteration %d.', state.iteration)
eval_agent.network_params = train_agent.online_params
eval_seq = parts.run_loop(eval_agent, env, _MAX_FRAMES_PER_EPISODE.value)
eval_seq_truncated = itertools.islice(eval_seq, _NUM_EVAL_FRAMES.value)
eval_trackers = parts.make_default_trackers(eval_agent)
eval_stats = parts.generate_statistics(eval_trackers, eval_seq_truncated)
# Logging and checkpointing.
human_normalized_score = atari_data.get_human_normalized_score(
_ENVIRONMENT_NAME.value, eval_stats['episode_return']
)
capped_human_normalized_score = np.amin([1.0, human_normalized_score])
log_output = [
('iteration', state.iteration, '%3d'),
('frame', state.iteration * _NUM_TRAIN_FRAMES.value, '%5d'),
('eval_episode_return', eval_stats['episode_return'], '% 2.2f'),
('train_episode_return', train_stats['episode_return'], '% 2.2f'),
('eval_num_episodes', eval_stats['num_episodes'], '%3d'),
('train_num_episodes', train_stats['num_episodes'], '%3d'),
('eval_frame_rate', eval_stats['step_rate'], '%4.0f'),
('train_frame_rate', train_stats['step_rate'], '%4.0f'),
('train_exploration_epsilon', train_agent.exploration_epsilon, '%.3f'),
('train_state_value', train_stats['state_value'], '%.3f'),
('normalized_return', human_normalized_score, '%.3f'),
('capped_normalized_return', capped_human_normalized_score, '%.3f'),
('human_gap', 1.0 - capped_human_normalized_score, '%.3f'),
]
log_output_str = ', '.join(('%s: ' + f) % (n, v) for n, v, f in log_output)
logging.info(log_output_str)
writer.write(collections.OrderedDict((n, v) for n, v, _ in log_output))
state.iteration += 1
checkpoint.save()
writer.close()
if __name__ == '__main__':
config.update('jax_platform_name', 'gpu') # Default to GPU.
config.update('jax_numpy_rank_promotion', 'raise')
config.config_with_absl()
app.run(main)
| dqn_zoo-master | dqn_zoo/c51/run_atari.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Rainbow agent classes."""
# pylint: disable=g-bad-import-order
from typing import Any, Mapping, Text
from absl import logging
import chex
import dm_env
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
from dqn_zoo import parts
from dqn_zoo import processors
from dqn_zoo import replay as replay_lib
# Batch variant of categorical_double_q_learning with fixed atoms across batch.
_batch_categorical_double_q_learning = jax.vmap(
rlax.categorical_double_q_learning, in_axes=(None, 0, 0, 0, 0, None, 0, 0)
)
class Rainbow(parts.Agent):
"""Rainbow agent."""
def __init__(
self,
preprocessor: processors.Processor,
sample_network_input: jnp.ndarray,
network: parts.Network,
support: jnp.ndarray,
optimizer: optax.GradientTransformation,
transition_accumulator: Any,
replay: replay_lib.PrioritizedTransitionReplay,
batch_size: int,
min_replay_capacity_fraction: float,
learn_period: int,
target_network_update_period: int,
rng_key: parts.PRNGKey,
):
self._preprocessor = preprocessor
self._replay = replay
self._transition_accumulator = transition_accumulator
self._batch_size = batch_size
self._min_replay_capacity = min_replay_capacity_fraction * replay.capacity
self._learn_period = learn_period
self._target_network_update_period = target_network_update_period
# Initialize network parameters and optimizer.
self._rng_key, network_rng_key = jax.random.split(rng_key)
self._online_params = network.init(
network_rng_key, sample_network_input[None, ...]
)
self._target_params = self._online_params
self._opt_state = optimizer.init(self._online_params)
# Other agent state: last action, frame count, etc.
self._action = None
self._frame_t = -1 # Current frame index.
self._statistics = {'state_value': np.nan}
self._max_seen_priority = 1.0
# Define jitted loss, update, and policy functions here instead of as
# class methods, to emphasize that these are meant to be pure functions
# and should not access the agent object's state via `self`.
def loss_fn(online_params, target_params, transitions, weights, rng_key):
"""Calculates loss given network parameters and transitions."""
_, *apply_keys = jax.random.split(rng_key, 4)
logits_q_tm1 = network.apply(
online_params, apply_keys[0], transitions.s_tm1
).q_logits
q_t = network.apply(
online_params, apply_keys[1], transitions.s_t
).q_values
logits_q_target_t = network.apply(
target_params, apply_keys[2], transitions.s_t
).q_logits
losses = _batch_categorical_double_q_learning(
support,
logits_q_tm1,
transitions.a_tm1,
transitions.r_t,
transitions.discount_t,
support,
logits_q_target_t,
q_t,
)
loss = jnp.mean(losses * weights)
chex.assert_shape((losses, weights), (self._batch_size,))
return loss, losses
def update(
rng_key, opt_state, online_params, target_params, transitions, weights
):
"""Computes learning update from batch of replay transitions."""
rng_key, update_key = jax.random.split(rng_key)
d_loss_d_params, losses = jax.grad(loss_fn, has_aux=True)(
online_params, target_params, transitions, weights, update_key
)
updates, new_opt_state = optimizer.update(d_loss_d_params, opt_state)
new_online_params = optax.apply_updates(online_params, updates)
return rng_key, new_opt_state, new_online_params, losses
self._update = jax.jit(update)
def select_action(rng_key, network_params, s_t):
"""Computes greedy (argmax) action wrt Q-values at given state."""
rng_key, apply_key, policy_key = jax.random.split(rng_key, 3)
q_t = network.apply(network_params, apply_key, s_t[None, ...]).q_values[0]
a_t = rlax.greedy().sample(policy_key, q_t)
v_t = jnp.max(q_t, axis=-1)
return rng_key, a_t, v_t
self._select_action = jax.jit(select_action)
def step(self, timestep: dm_env.TimeStep) -> parts.Action:
"""Selects action given timestep and potentially learns."""
self._frame_t += 1
timestep = self._preprocessor(timestep)
if timestep is None: # Repeat action.
action = self._action
else:
action = self._action = self._act(timestep)
for transition in self._transition_accumulator.step(timestep, action):
self._replay.add(transition, priority=self._max_seen_priority)
if self._replay.size < self._min_replay_capacity:
return action
if self._frame_t % self._learn_period == 0:
self._learn()
if self._frame_t % self._target_network_update_period == 0:
self._target_params = self._online_params
return action
def reset(self) -> None:
"""Resets the agent's episodic state such as frame stack and action repeat.
This method should be called at the beginning of every episode.
"""
self._transition_accumulator.reset()
processors.reset(self._preprocessor)
self._action = None
def _act(self, timestep) -> parts.Action:
"""Selects action given timestep, according to greedy policy."""
s_t = timestep.observation
self._rng_key, a_t, v_t = self._select_action(
self._rng_key, self._online_params, s_t
)
a_t, v_t = jax.device_get((a_t, v_t))
self._statistics['state_value'] = v_t
return parts.Action(a_t)
def _learn(self) -> None:
"""Samples a batch of transitions from replay and learns from it."""
logging.log_first_n(logging.INFO, 'Begin learning', 1)
transitions, indices, weights = self._replay.sample(self._batch_size)
self._rng_key, self._opt_state, self._online_params, losses = self._update(
self._rng_key,
self._opt_state,
self._online_params,
self._target_params,
transitions,
weights,
)
chex.assert_equal_shape((losses, weights))
priorities = jnp.clip(jnp.abs(losses), 0.0, 100.0)
priorities = jax.device_get(priorities)
max_priority = priorities.max()
self._max_seen_priority = np.max([self._max_seen_priority, max_priority])
self._replay.update_priorities(indices, priorities)
@property
def online_params(self) -> parts.NetworkParams:
"""Returns current parameters of Q-network."""
return self._online_params
@property
def statistics(self) -> Mapping[Text, float]:
"""Returns current agent statistics as a dictionary."""
# Check for DeviceArrays in values as this can be very slow.
assert all(
not isinstance(x, jnp.DeviceArray) for x in self._statistics.values()
)
return self._statistics
@property
def importance_sampling_exponent(self) -> float:
"""Returns current importance sampling exponent of prioritized replay."""
return self._replay.importance_sampling_exponent
@property
def max_seen_priority(self) -> float:
"""Returns maximum seen replay priority up until this time."""
return self._max_seen_priority
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves agent state as a dictionary (e.g. for serialization)."""
state = {
'rng_key': self._rng_key,
'frame_t': self._frame_t,
'opt_state': self._opt_state,
'online_params': self._online_params,
'target_params': self._target_params,
'replay': self._replay.get_state(),
'max_seen_priority': self._max_seen_priority,
}
return state
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets agent state from a (potentially de-serialized) dictionary."""
self._rng_key = state['rng_key']
self._frame_t = state['frame_t']
self._opt_state = jax.device_put(state['opt_state'])
self._online_params = jax.device_put(state['online_params'])
self._target_params = jax.device_put(state['target_params'])
self._replay.set_state(state['replay'])
self._max_seen_priority = state['max_seen_priority']
| dqn_zoo-master | dqn_zoo/rainbow/agent.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Rainbow."""
# pylint: disable=g-bad-import-order
from absl import flags
from absl.testing import flagsaver
from dqn_zoo.rainbow import run_atari
from absl.testing import absltest
FLAGS = flags.FLAGS
class RunAtariTest(absltest.TestCase):
@flagsaver.flagsaver
def test_can_run_agent(self):
FLAGS.environment_name = 'pong'
FLAGS.replay_capacity = 1000
FLAGS.target_network_update_period = 3
FLAGS.num_train_frames = 100
FLAGS.num_eval_frames = 50
FLAGS.num_iterations = 3
FLAGS.batch_size = 10
FLAGS.learn_period = 2
run_atari.main(None)
if __name__ == '__main__':
absltest.main()
| dqn_zoo-master | dqn_zoo/rainbow/run_atari_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Rainbow agent training on Atari.
From the paper "Rainbow: Combining Improvements in Deep Reinforcement Learning"
http://arxiv.org/abs/1710.02298.
This agent combines:
* Double Q-learning
* Prioritized experience replay (proportional variant)
* Dueling networks
* Multi-step learning
* Distributional RL (C51)
* Noisy networks
"""
# pylint: disable=g-bad-import-order
import collections
import itertools
import sys
import typing
from absl import app
from absl import flags
from absl import logging
import chex
import dm_env
import haiku as hk
import jax
from jax.config import config
import jax.numpy as jnp
import numpy as np
import optax
from dqn_zoo import atari_data
from dqn_zoo import gym_atari
from dqn_zoo import networks
from dqn_zoo import parts
from dqn_zoo import processors
from dqn_zoo import replay as replay_lib
from dqn_zoo.rainbow import agent
# Relevant flag values are expressed in terms of environment frames.
FLAGS = flags.FLAGS
_ENVIRONMENT_NAME = flags.DEFINE_string('environment_name', 'pong', '')
_ENVIRONMENT_HEIGHT = flags.DEFINE_integer('environment_height', 84, '')
_ENVIRONMENT_WIDTH = flags.DEFINE_integer('environment_width', 84, '')
_REPLAY_CAPACITY = flags.DEFINE_integer('replay_capacity', int(1e6), '')
_COMPRESS_STATE = flags.DEFINE_bool('compress_state', True, '')
_MIN_REPLAY_CAPACITY_FRACTION = flags.DEFINE_float(
'min_replay_capacity_fraction', 0.02, ''
)
_BATCH_SIZE = flags.DEFINE_integer('batch_size', 32, '')
_MAX_FRAMES_PER_EPISODE = flags.DEFINE_integer(
'max_frames_per_episode', 108000, ''
) # 30 mins.
_NUM_ACTION_REPEATS = flags.DEFINE_integer('num_action_repeats', 4, '')
_NUM_STACKED_FRAMES = flags.DEFINE_integer('num_stacked_frames', 4, '')
_TARGET_NETWORK_UPDATE_PERIOD = flags.DEFINE_integer(
'target_network_update_period', int(3.2e4), ''
)
_LEARNING_RATE = flags.DEFINE_float('learning_rate', 0.00025 / 4, '')
_OPTIMIZER_EPSILON = flags.DEFINE_float('optimizer_epsilon', 0.005 / 32, '')
_ADDITIONAL_DISCOUNT = flags.DEFINE_float('additional_discount', 0.99, '')
_MAX_ABS_REWARD = flags.DEFINE_float('max_abs_reward', 1.0, '')
_MAX_GLOBAL_GRAD_NORM = flags.DEFINE_float('max_global_grad_norm', 10.0, '')
_SEED = flags.DEFINE_integer('seed', 1, '') # GPU may introduce nondeterminism.
_NUM_ITERATIONS = flags.DEFINE_integer('num_iterations', 200, '')
_NUM_TRAIN_FRAMES = flags.DEFINE_integer(
'num_train_frames', int(1e6), ''
) # Per iteration.
_NUM_EVAL_FRAMES = flags.DEFINE_integer(
'num_eval_frames', int(5e5), ''
) # Per iteration.
_LEARN_PERIOD = flags.DEFINE_integer('learn_period', 16, '')
_RESULTS_CSV_PATH = flags.DEFINE_string(
'results_csv_path', '/tmp/results.csv', ''
)
_PRIORITY_EXPONENT = flags.DEFINE_float('priority_exponent', 0.5, '')
_IMPORTANCE_SAMPLING_EXPONENT_BEGIN_VALUE = flags.DEFINE_float(
'importance_sampling_exponent_begin_value', 0.4, ''
)
_IMPORTANCE_SAMPLING_EXPONENT_END_VALUE = flags.DEFINE_float(
'importance_sampling_exponent_end_value', 1.0, ''
)
_UNIFORM_SAMPLE_PROBABILITY = flags.DEFINE_float(
'uniform_sample_probability', 1e-3, ''
)
_NORMALIZE_WEIGHTS = flags.DEFINE_bool('normalize_weights', True, '')
_N_STEPS = flags.DEFINE_integer('n_steps', 3, '')
_VMAX = flags.DEFINE_float('vmax', 10.0, '')
_NUM_ATOMS = flags.DEFINE_integer('num_atoms', 51, '')
_NOISY_WEIGHT_INIT = flags.DEFINE_float('noisy_weight_init', 0.1, '')
def main(argv):
"""Trains Rainbow agent on Atari."""
del argv
logging.info(
'Rainbow on Atari on %s.', jax.lib.xla_bridge.get_backend().platform
)
random_state = np.random.RandomState(_SEED.value)
rng_key = jax.random.PRNGKey(
random_state.randint(-sys.maxsize - 1, sys.maxsize + 1, dtype=np.int64)
)
if _RESULTS_CSV_PATH.value:
writer = parts.CsvWriter(_RESULTS_CSV_PATH.value)
else:
writer = parts.NullWriter()
def environment_builder():
"""Creates Atari environment."""
env = gym_atari.GymAtari(
_ENVIRONMENT_NAME.value, seed=random_state.randint(1, 2**32)
)
return gym_atari.RandomNoopsEnvironmentWrapper(
env,
min_noop_steps=1,
max_noop_steps=30,
seed=random_state.randint(1, 2**32),
)
env = environment_builder()
logging.info('Environment: %s', _ENVIRONMENT_NAME.value)
logging.info('Action spec: %s', env.action_spec())
logging.info('Observation spec: %s', env.observation_spec())
num_actions = env.action_spec().num_values
support = jnp.linspace(-_VMAX.value, _VMAX.value, _NUM_ATOMS.value)
network_fn = networks.rainbow_atari_network(
num_actions, support, _NOISY_WEIGHT_INIT.value
)
network = hk.transform(network_fn)
def preprocessor_builder():
return processors.atari(
additional_discount=_ADDITIONAL_DISCOUNT.value,
max_abs_reward=_MAX_ABS_REWARD.value,
resize_shape=(_ENVIRONMENT_HEIGHT.value, _ENVIRONMENT_WIDTH.value),
num_action_repeats=_NUM_ACTION_REPEATS.value,
num_pooled_frames=2,
zero_discount_on_life_loss=True,
num_stacked_frames=_NUM_STACKED_FRAMES.value,
grayscaling=True,
)
# Create sample network input from sample preprocessor output.
sample_processed_timestep = preprocessor_builder()(env.reset())
sample_processed_timestep = typing.cast(
dm_env.TimeStep, sample_processed_timestep
)
sample_network_input = sample_processed_timestep.observation
chex.assert_shape(
sample_network_input,
(
_ENVIRONMENT_HEIGHT.value,
_ENVIRONMENT_WIDTH.value,
_NUM_STACKED_FRAMES.value,
),
)
# Note the t in the replay is not exactly aligned with the agent t.
importance_sampling_exponent_schedule = parts.LinearSchedule(
begin_t=int(_MIN_REPLAY_CAPACITY_FRACTION.value * _REPLAY_CAPACITY.value),
end_t=(
_NUM_ITERATIONS.value
* int(_NUM_TRAIN_FRAMES.value / _NUM_ACTION_REPEATS.value)
),
begin_value=_IMPORTANCE_SAMPLING_EXPONENT_BEGIN_VALUE.value,
end_value=_IMPORTANCE_SAMPLING_EXPONENT_END_VALUE.value,
)
if _COMPRESS_STATE.value:
def encoder(transition):
return transition._replace(
s_tm1=replay_lib.compress_array(transition.s_tm1),
s_t=replay_lib.compress_array(transition.s_t),
)
def decoder(transition):
return transition._replace(
s_tm1=replay_lib.uncompress_array(transition.s_tm1),
s_t=replay_lib.uncompress_array(transition.s_t),
)
else:
encoder = None
decoder = None
replay_structure = replay_lib.Transition(
s_tm1=None,
a_tm1=None,
r_t=None,
discount_t=None,
s_t=None,
)
transition_accumulator = replay_lib.NStepTransitionAccumulator(_N_STEPS.value)
replay = replay_lib.PrioritizedTransitionReplay(
_REPLAY_CAPACITY.value,
replay_structure,
_PRIORITY_EXPONENT.value,
importance_sampling_exponent_schedule,
_UNIFORM_SAMPLE_PROBABILITY.value,
_NORMALIZE_WEIGHTS.value,
random_state,
encoder,
decoder,
)
optimizer = optax.adam(
learning_rate=_LEARNING_RATE.value, eps=_OPTIMIZER_EPSILON.value
)
if _MAX_GLOBAL_GRAD_NORM.value > 0:
optimizer = optax.chain(
optax.clip_by_global_norm(_MAX_GLOBAL_GRAD_NORM.value), optimizer
)
train_rng_key, eval_rng_key = jax.random.split(rng_key)
train_agent = agent.Rainbow(
preprocessor=preprocessor_builder(),
sample_network_input=sample_network_input,
network=network,
support=support,
optimizer=optimizer,
transition_accumulator=transition_accumulator,
replay=replay,
batch_size=_BATCH_SIZE.value,
min_replay_capacity_fraction=_MIN_REPLAY_CAPACITY_FRACTION.value,
learn_period=_LEARN_PERIOD.value,
target_network_update_period=_TARGET_NETWORK_UPDATE_PERIOD.value,
rng_key=train_rng_key,
)
eval_agent = parts.EpsilonGreedyActor(
preprocessor=preprocessor_builder(),
network=network,
exploration_epsilon=0,
rng_key=eval_rng_key,
)
# Set up checkpointing.
checkpoint = parts.NullCheckpoint()
state = checkpoint.state
state.iteration = 0
state.train_agent = train_agent
state.eval_agent = eval_agent
state.random_state = random_state
state.writer = writer
if checkpoint.can_be_restored():
checkpoint.restore()
while state.iteration <= _NUM_ITERATIONS.value:
# New environment for each iteration to allow for determinism if preempted.
env = environment_builder()
logging.info('Training iteration %d.', state.iteration)
train_seq = parts.run_loop(train_agent, env, _MAX_FRAMES_PER_EPISODE.value)
num_train_frames = 0 if state.iteration == 0 else _NUM_TRAIN_FRAMES.value
train_seq_truncated = itertools.islice(train_seq, num_train_frames)
train_trackers = parts.make_default_trackers(train_agent)
train_stats = parts.generate_statistics(train_trackers, train_seq_truncated)
logging.info('Evaluation iteration %d.', state.iteration)
eval_agent.network_params = train_agent.online_params
eval_seq = parts.run_loop(eval_agent, env, _MAX_FRAMES_PER_EPISODE.value)
eval_seq_truncated = itertools.islice(eval_seq, _NUM_EVAL_FRAMES.value)
eval_trackers = parts.make_default_trackers(eval_agent)
eval_stats = parts.generate_statistics(eval_trackers, eval_seq_truncated)
# Logging and checkpointing.
human_normalized_score = atari_data.get_human_normalized_score(
_ENVIRONMENT_NAME.value, eval_stats['episode_return']
)
capped_human_normalized_score = np.amin([1.0, human_normalized_score])
log_output = [
('iteration', state.iteration, '%3d'),
('frame', state.iteration * _NUM_TRAIN_FRAMES.value, '%5d'),
('eval_episode_return', eval_stats['episode_return'], '% 2.2f'),
('train_episode_return', train_stats['episode_return'], '% 2.2f'),
('eval_num_episodes', eval_stats['num_episodes'], '%3d'),
('train_num_episodes', train_stats['num_episodes'], '%3d'),
('eval_frame_rate', eval_stats['step_rate'], '%4.0f'),
('train_frame_rate', train_stats['step_rate'], '%4.0f'),
('train_state_value', train_stats['state_value'], '%.3f'),
(
'importance_sampling_exponent',
train_agent.importance_sampling_exponent,
'%.3f',
),
('max_seen_priority', train_agent.max_seen_priority, '%.3f'),
('normalized_return', human_normalized_score, '%.3f'),
('capped_normalized_return', capped_human_normalized_score, '%.3f'),
('human_gap', 1.0 - capped_human_normalized_score, '%.3f'),
]
log_output_str = ', '.join(('%s: ' + f) % (n, v) for n, v, f in log_output)
logging.info(log_output_str)
writer.write(collections.OrderedDict((n, v) for n, v, _ in log_output))
state.iteration += 1
checkpoint.save()
writer.close()
if __name__ == '__main__':
config.update('jax_platform_name', 'gpu') # Default to GPU.
config.update('jax_numpy_rank_promotion', 'raise')
config.config_with_absl()
app.run(main)
| dqn_zoo-master | dqn_zoo/rainbow/run_atari.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""IQN agent classes."""
# pylint: disable=g-bad-import-order
from typing import Any, Callable, Mapping, Text, Tuple
from absl import logging
import chex
import distrax
import dm_env
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
from dqn_zoo import networks
from dqn_zoo import parts
from dqn_zoo import processors
from dqn_zoo import replay as replay_lib
# Batch variant of quantile_q_learning.
_batch_quantile_q_learning = jax.vmap(
rlax.quantile_q_learning, in_axes=(0, 0, 0, 0, 0, 0, 0, None)
)
IqnInputs = networks.IqnInputs
def _sample_tau(
rng_key: parts.PRNGKey,
shape: Tuple[int, ...],
) -> jnp.ndarray:
"""Samples tau values uniformly between 0 and 1."""
return jax.random.uniform(rng_key, shape=shape)
class IqnEpsilonGreedyActor(parts.Agent):
"""Agent that acts with a given set of IQN-network parameters and epsilon.
Network parameters are set on the actor. The actor can be checkpointed for
determinism.
"""
def __init__(
self,
preprocessor: processors.Processor,
network: parts.Network,
exploration_epsilon: float,
tau_samples: int,
rng_key: parts.PRNGKey,
):
self._preprocessor = preprocessor
self._rng_key = rng_key
self._action = None
self.network_params = None # Nest of arrays (haiku.Params), set externally.
def select_action(rng_key, network_params, s_t):
"""Samples action from eps-greedy policy wrt Q-values at given state."""
rng_key, tau_key, apply_key, policy_key = jax.random.split(rng_key, 4)
tau_t = _sample_tau(tau_key, (1, tau_samples))
q_t = network.apply(
network_params, apply_key, IqnInputs(s_t[None, ...], tau_t)
).q_values[0]
a_t = distrax.EpsilonGreedy(q_t, exploration_epsilon).sample(
seed=policy_key
)
return rng_key, a_t
self._select_action = jax.jit(select_action)
def step(self, timestep: dm_env.TimeStep) -> parts.Action:
"""Selects action given a timestep."""
timestep = self._preprocessor(timestep)
if timestep is None: # Repeat action.
return self._action
s_t = timestep.observation
self._rng_key, a_t = self._select_action(
self._rng_key, self.network_params, s_t
)
self._action = parts.Action(jax.device_get(a_t))
return self._action
def reset(self) -> None:
"""Resets the agent's episodic state such as frame stack and action repeat.
This method should be called at the beginning of every episode.
"""
processors.reset(self._preprocessor)
self._action = None
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves agent state as a dictionary (e.g. for serialization)."""
# State contains network params to make agent easy to run from a checkpoint.
return {
'rng_key': self._rng_key,
'network_params': self.network_params,
}
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets agent state from a (potentially de-serialized) dictionary."""
self._rng_key = state['rng_key']
self.network_params = state['network_params']
@property
def statistics(self) -> Mapping[Text, float]:
return {}
class Iqn(parts.Agent):
"""Implicit Quantile Network agent."""
def __init__(
self,
preprocessor: processors.Processor,
sample_network_input: IqnInputs,
network: parts.Network,
optimizer: optax.GradientTransformation,
transition_accumulator: Any,
replay: replay_lib.TransitionReplay,
batch_size: int,
exploration_epsilon: Callable[[int], float],
min_replay_capacity_fraction: float,
learn_period: int,
target_network_update_period: int,
huber_param: float,
tau_samples_policy: int,
tau_samples_s_tm1: int,
tau_samples_s_t: int,
rng_key: parts.PRNGKey,
):
self._preprocessor = preprocessor
self._replay = replay
self._transition_accumulator = transition_accumulator
self._batch_size = batch_size
self._exploration_epsilon = exploration_epsilon
self._min_replay_capacity = min_replay_capacity_fraction * replay.capacity
self._learn_period = learn_period
self._target_network_update_period = target_network_update_period
# Initialize network parameters and optimizer.
self._rng_key, network_rng_key = jax.random.split(rng_key)
self._online_params = network.init(
network_rng_key,
jax.tree_map(lambda x: x[None, ...], sample_network_input),
)
self._target_params = self._online_params
self._opt_state = optimizer.init(self._online_params)
# Other agent state: last action, frame count, etc.
self._action = None
self._frame_t = -1 # Current frame index.
self._statistics = {'state_value': np.nan}
# Define jitted loss, update, and policy functions here instead of as
# class methods, to emphasize that these are meant to be pure functions
# and should not access the agent object's state via `self`.
def loss_fn(online_params, target_params, transitions, rng_key):
"""Calculates loss given network parameters and transitions."""
# Sample tau values for q_tm1, q_t_selector, q_t.
batch_size = self._batch_size
rng_key, *sample_keys = jax.random.split(rng_key, 4)
tau_tm1 = _sample_tau(sample_keys[0], (batch_size, tau_samples_s_tm1))
tau_t_selector = _sample_tau(
sample_keys[1], (batch_size, tau_samples_policy)
)
tau_t = _sample_tau(sample_keys[2], (batch_size, tau_samples_s_t))
# Compute Q value distributions.
_, *apply_keys = jax.random.split(rng_key, 4)
dist_q_tm1 = network.apply(
online_params, apply_keys[0], IqnInputs(transitions.s_tm1, tau_tm1)
).q_dist # pytype: disable=wrong-arg-types # jax-ndarray
dist_q_t_selector = network.apply(
target_params,
apply_keys[1],
IqnInputs(transitions.s_t, tau_t_selector),
).q_dist # pytype: disable=wrong-arg-types # jax-ndarray
dist_q_target_t = network.apply(
target_params, apply_keys[2], IqnInputs(transitions.s_t, tau_t)
).q_dist # pytype: disable=wrong-arg-types # jax-ndarray
losses = _batch_quantile_q_learning(
dist_q_tm1,
tau_tm1,
transitions.a_tm1,
transitions.r_t,
transitions.discount_t,
dist_q_t_selector,
dist_q_target_t,
huber_param,
)
chex.assert_shape(losses, (self._batch_size,))
loss = jnp.mean(losses)
return loss
def update(rng_key, opt_state, online_params, target_params, transitions):
"""Computes learning update from batch of replay transitions."""
rng_key, update_key = jax.random.split(rng_key)
d_loss_d_params = jax.grad(loss_fn)(
online_params, target_params, transitions, update_key
)
updates, new_opt_state = optimizer.update(d_loss_d_params, opt_state)
new_online_params = optax.apply_updates(online_params, updates)
return rng_key, new_opt_state, new_online_params
self._update = jax.jit(update)
def select_action(rng_key, network_params, s_t, exploration_epsilon):
"""Samples action from eps-greedy policy wrt Q-values at given state."""
rng_key, sample_key, apply_key, policy_key = jax.random.split(rng_key, 4)
tau_t = _sample_tau(sample_key, (1, tau_samples_policy))
q_t = network.apply(
network_params, apply_key, IqnInputs(s_t[None, ...], tau_t)
).q_values[
0
] # pytype: disable=wrong-arg-types # jax-ndarray
a_t = distrax.EpsilonGreedy(q_t, exploration_epsilon).sample(
seed=policy_key
)
v_t = jnp.max(q_t, axis=-1)
return rng_key, a_t, v_t
self._select_action = jax.jit(select_action)
def step(self, timestep: dm_env.TimeStep) -> parts.Action:
"""Selects action given timestep and potentially learns."""
self._frame_t += 1
timestep = self._preprocessor(timestep)
if timestep is None: # Repeat action.
action = self._action
else:
action = self._action = self._act(timestep)
for transition in self._transition_accumulator.step(timestep, action):
self._replay.add(transition)
if self._replay.size < self._min_replay_capacity:
return action
if self._frame_t % self._learn_period == 0:
self._learn()
if self._frame_t % self._target_network_update_period == 0:
self._target_params = self._online_params
return action
def reset(self) -> None:
"""Resets the agent's episodic state such as frame stack and action repeat.
This method should be called at the beginning of every episode.
"""
self._transition_accumulator.reset()
processors.reset(self._preprocessor)
self._action = None
def _act(self, timestep) -> parts.Action:
"""Selects action given timestep, according to epsilon-greedy policy."""
s_t = timestep.observation
self._rng_key, a_t, v_t = self._select_action(
self._rng_key, self._online_params, s_t, self.exploration_epsilon
)
a_t, v_t = jax.device_get((a_t, v_t))
self._statistics['state_value'] = v_t
return parts.Action(a_t)
def _learn(self) -> None:
"""Samples a batch of transitions from replay and learns from it."""
logging.log_first_n(logging.INFO, 'Begin learning', 1)
transitions = self._replay.sample(self._batch_size)
self._rng_key, self._opt_state, self._online_params = self._update(
self._rng_key,
self._opt_state,
self._online_params,
self._target_params,
transitions,
)
@property
def online_params(self) -> parts.NetworkParams:
"""Returns current parameters of Q-network."""
return self._online_params
@property
def statistics(self) -> Mapping[Text, float]:
# Check for DeviceArrays in values as this can be very slow.
assert all(
not isinstance(x, jnp.DeviceArray) for x in self._statistics.values()
)
return self._statistics
@property
def exploration_epsilon(self) -> float:
"""Returns epsilon value currently used by (eps-greedy) behavior policy."""
return self._exploration_epsilon(self._frame_t)
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves agent state as a dictionary (e.g. for serialization)."""
state = {
'rng_key': self._rng_key,
'frame_t': self._frame_t,
'opt_state': self._opt_state,
'online_params': self._online_params,
'target_params': self._target_params,
'replay': self._replay.get_state(),
}
return state
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets agent state from a (potentially de-serialized) dictionary."""
self._rng_key = state['rng_key']
self._frame_t = state['frame_t']
self._opt_state = jax.device_put(state['opt_state'])
self._online_params = jax.device_put(state['online_params'])
self._target_params = jax.device_put(state['target_params'])
self._replay.set_state(state['replay'])
| dqn_zoo-master | dqn_zoo/iqn/agent.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for IQN."""
# pylint: disable=g-bad-import-order
from absl import flags
from absl.testing import flagsaver
from jax.config import config
from dqn_zoo.iqn import run_atari
from absl.testing import absltest
FLAGS = flags.FLAGS
class RunAtariTest(absltest.TestCase):
@flagsaver.flagsaver
def test_can_run_agent(self):
FLAGS.environment_name = 'pong'
FLAGS.replay_capacity = 1000
FLAGS.exploration_epsilon_decay_frame_fraction = 0.1
FLAGS.target_network_update_period = 4
FLAGS.num_train_frames = 100
FLAGS.num_eval_frames = 50
FLAGS.num_iterations = 2
FLAGS.batch_size = 10
FLAGS.learn_period = 2
FLAGS.tau_latent_dim = 5
FLAGS.tau_samples_policy = 7
FLAGS.tau_samples_s_tm1 = 9
FLAGS.tau_samples_s_t = 11
run_atari.main(None)
if __name__ == '__main__':
config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| dqn_zoo-master | dqn_zoo/iqn/run_atari_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An IQN agent training on Atari.
From the paper "Implicit Quantile Networks for Distributional Reinforcement
Learning" http://arxiv.org/abs/1806.06923.
"""
# pylint: disable=g-bad-import-order
import collections
import itertools
import sys
import typing
from absl import app
from absl import flags
from absl import logging
import chex
import dm_env
import haiku as hk
import jax
from jax.config import config
import numpy as np
import optax
from dqn_zoo import atari_data
from dqn_zoo import gym_atari
from dqn_zoo import networks
from dqn_zoo import parts
from dqn_zoo import processors
from dqn_zoo import replay as replay_lib
from dqn_zoo.iqn import agent
# Relevant flag values are expressed in terms of environment frames.
FLAGS = flags.FLAGS
_ENVIRONMENT_NAME = flags.DEFINE_string('environment_name', 'pong', '')
_ENVIRONMENT_HEIGHT = flags.DEFINE_integer('environment_height', 84, '')
_ENVIRONMENT_WIDTH = flags.DEFINE_integer('environment_width', 84, '')
_REPLAY_CAPACITY = flags.DEFINE_integer('replay_capacity', int(1e6), '')
_COMPRESS_STATE = flags.DEFINE_bool('compress_state', True, '')
_MIN_REPLAY_CAPACITY_FRACTION = flags.DEFINE_float(
'min_replay_capacity_fraction', 0.02, ''
)
_BATCH_SIZE = flags.DEFINE_integer('batch_size', 32, '')
_MAX_FRAMES_PER_EPISODE = flags.DEFINE_integer(
'max_frames_per_episode', 108000, ''
) # 30 mins.
_NUM_ACTION_REPEATS = flags.DEFINE_integer('num_action_repeats', 4, '')
_NUM_STACKED_FRAMES = flags.DEFINE_integer('num_stacked_frames', 4, '')
_EXPLORATION_EPSILON_BEGIN_VALUE = flags.DEFINE_float(
'exploration_epsilon_begin_value', 1.0, ''
)
_EXPLORATION_EPSILON_END_VALUE = flags.DEFINE_float(
'exploration_epsilon_end_value', 0.01, ''
)
_EXPLORATION_EPSILON_DECAY_FRAME_FRACTION = flags.DEFINE_float(
'exploration_epsilon_decay_frame_fraction', 0.02, ''
)
_EVAL_EXPLORATION_EPSILON = flags.DEFINE_float(
'eval_exploration_epsilon', 0.001, ''
)
_TARGET_NETWORK_UPDATE_PERIOD = flags.DEFINE_integer(
'target_network_update_period', int(4e4), ''
)
_HUBER_PARAM = flags.DEFINE_float('huber_param', 1.0, '')
_LEARNING_RATE = flags.DEFINE_float('learning_rate', 0.00005, '')
_OPTIMIZER_EPSILON = flags.DEFINE_float('optimizer_epsilon', 0.01 / 32, '')
_ADDITIONAL_DISCOUNT = flags.DEFINE_float('additional_discount', 0.99, '')
_MAX_ABS_REWARD = flags.DEFINE_float('max_abs_reward', 1.0, '')
_SEED = flags.DEFINE_integer('seed', 1, '') # GPU may introduce nondeterminism.
_NUM_ITERATIONS = flags.DEFINE_integer('num_iterations', 200, '')
_NUM_TRAIN_FRAMES = flags.DEFINE_integer(
'num_train_frames', int(1e6), ''
) # Per iteration.
_NUM_EVAL_FRAMES = flags.DEFINE_integer(
'num_eval_frames', int(5e5), ''
) # Per iteration.
_LEARN_PERIOD = flags.DEFINE_integer('learn_period', 16, '')
_RESULTS_CSV_PATH = flags.DEFINE_string(
'results_csv_path', '/tmp/results.csv', ''
)
_TAU_LATENT_DIM = flags.DEFINE_integer('tau_latent_dim', 64, '')
_TAU_SAMPLES_POLICY = flags.DEFINE_integer('tau_samples_policy', 64, '')
_TAU_SAMPLES_S_TM1 = flags.DEFINE_integer('tau_samples_s_tm1', 64, '')
_TAU_SAMPLES_S_T = flags.DEFINE_integer('tau_samples_s_t', 64, '')
def main(argv):
"""Trains IQN agent on Atari."""
del argv
logging.info('IQN on Atari on %s.', jax.lib.xla_bridge.get_backend().platform)
random_state = np.random.RandomState(_SEED.value)
rng_key = jax.random.PRNGKey(
random_state.randint(-sys.maxsize - 1, sys.maxsize + 1, dtype=np.int64)
)
if _RESULTS_CSV_PATH.value:
writer = parts.CsvWriter(_RESULTS_CSV_PATH.value)
else:
writer = parts.NullWriter()
def environment_builder():
"""Creates Atari environment."""
env = gym_atari.GymAtari(
_ENVIRONMENT_NAME.value, seed=random_state.randint(1, 2**32)
)
return gym_atari.RandomNoopsEnvironmentWrapper(
env,
min_noop_steps=1,
max_noop_steps=30,
seed=random_state.randint(1, 2**32),
)
env = environment_builder()
logging.info('Environment: %s', _ENVIRONMENT_NAME.value)
logging.info('Action spec: %s', env.action_spec())
logging.info('Observation spec: %s', env.observation_spec())
num_actions = env.action_spec().num_values
network_fn = networks.iqn_atari_network(num_actions, _TAU_LATENT_DIM.value)
network = hk.transform(network_fn)
def preprocessor_builder():
return processors.atari(
additional_discount=_ADDITIONAL_DISCOUNT.value,
max_abs_reward=_MAX_ABS_REWARD.value,
resize_shape=(_ENVIRONMENT_HEIGHT.value, _ENVIRONMENT_WIDTH.value),
num_action_repeats=_NUM_ACTION_REPEATS.value,
num_pooled_frames=2,
zero_discount_on_life_loss=True,
num_stacked_frames=_NUM_STACKED_FRAMES.value,
grayscaling=True,
)
# Create sample network input from sample preprocessor output.
sample_processed_timestep = preprocessor_builder()(env.reset())
sample_processed_timestep = typing.cast(
dm_env.TimeStep, sample_processed_timestep
)
sample_network_input = agent.IqnInputs(
state=sample_processed_timestep.observation,
taus=np.zeros(1, dtype=np.float32),
)
chex.assert_shape(
sample_network_input.state,
(
_ENVIRONMENT_HEIGHT.value,
_ENVIRONMENT_WIDTH.value,
_NUM_STACKED_FRAMES.value,
),
)
exploration_epsilon_schedule = parts.LinearSchedule(
begin_t=int(
_MIN_REPLAY_CAPACITY_FRACTION.value
* _REPLAY_CAPACITY.value
* _NUM_ACTION_REPEATS.value
),
decay_steps=int(
_EXPLORATION_EPSILON_DECAY_FRAME_FRACTION.value
* _NUM_ITERATIONS.value
* _NUM_TRAIN_FRAMES.value
),
begin_value=_EXPLORATION_EPSILON_BEGIN_VALUE.value,
end_value=_EXPLORATION_EPSILON_END_VALUE.value,
)
if _COMPRESS_STATE.value:
def encoder(transition):
return transition._replace(
s_tm1=replay_lib.compress_array(transition.s_tm1),
s_t=replay_lib.compress_array(transition.s_t),
)
def decoder(transition):
return transition._replace(
s_tm1=replay_lib.uncompress_array(transition.s_tm1),
s_t=replay_lib.uncompress_array(transition.s_t),
)
else:
encoder = None
decoder = None
replay_structure = replay_lib.Transition(
s_tm1=None,
a_tm1=None,
r_t=None,
discount_t=None,
s_t=None,
)
replay = replay_lib.TransitionReplay(
_REPLAY_CAPACITY.value, replay_structure, random_state, encoder, decoder
)
optimizer = optax.adam(
learning_rate=_LEARNING_RATE.value, eps=_OPTIMIZER_EPSILON.value
)
train_rng_key, eval_rng_key = jax.random.split(rng_key)
train_agent = agent.Iqn(
preprocessor=preprocessor_builder(),
sample_network_input=sample_network_input,
network=network,
optimizer=optimizer,
transition_accumulator=replay_lib.TransitionAccumulator(),
replay=replay,
batch_size=_BATCH_SIZE.value,
exploration_epsilon=exploration_epsilon_schedule,
min_replay_capacity_fraction=_MIN_REPLAY_CAPACITY_FRACTION.value,
learn_period=_LEARN_PERIOD.value,
target_network_update_period=_TARGET_NETWORK_UPDATE_PERIOD.value,
huber_param=_HUBER_PARAM.value,
tau_samples_policy=_TAU_SAMPLES_POLICY.value,
tau_samples_s_tm1=_TAU_SAMPLES_S_TM1.value,
tau_samples_s_t=_TAU_SAMPLES_S_T.value,
rng_key=train_rng_key,
)
eval_agent = agent.IqnEpsilonGreedyActor(
preprocessor=preprocessor_builder(),
network=network,
exploration_epsilon=_EVAL_EXPLORATION_EPSILON.value,
tau_samples=_TAU_SAMPLES_POLICY.value,
rng_key=eval_rng_key,
)
# Set up checkpointing.
checkpoint = parts.NullCheckpoint()
state = checkpoint.state
state.iteration = 0
state.train_agent = train_agent
state.eval_agent = eval_agent
state.random_state = random_state
state.writer = writer
if checkpoint.can_be_restored():
checkpoint.restore()
while state.iteration <= _NUM_ITERATIONS.value:
# New environment for each iteration to allow for determinism if preempted.
env = environment_builder()
logging.info('Training iteration %d.', state.iteration)
train_seq = parts.run_loop(train_agent, env, _MAX_FRAMES_PER_EPISODE.value)
num_train_frames = 0 if state.iteration == 0 else _NUM_TRAIN_FRAMES.value
train_seq_truncated = itertools.islice(train_seq, num_train_frames)
train_trackers = parts.make_default_trackers(train_agent)
train_stats = parts.generate_statistics(train_trackers, train_seq_truncated)
logging.info('Evaluation iteration %d.', state.iteration)
eval_agent.network_params = train_agent.online_params
eval_seq = parts.run_loop(eval_agent, env, _MAX_FRAMES_PER_EPISODE.value)
eval_seq_truncated = itertools.islice(eval_seq, _NUM_EVAL_FRAMES.value)
eval_trackers = parts.make_default_trackers(eval_agent)
eval_stats = parts.generate_statistics(eval_trackers, eval_seq_truncated)
# Logging and checkpointing.
human_normalized_score = atari_data.get_human_normalized_score(
_ENVIRONMENT_NAME.value, eval_stats['episode_return']
)
capped_human_normalized_score = np.amin([1.0, human_normalized_score])
log_output = [
('iteration', state.iteration, '%3d'),
('frame', state.iteration * _NUM_TRAIN_FRAMES.value, '%5d'),
('eval_episode_return', eval_stats['episode_return'], '% 2.2f'),
('train_episode_return', train_stats['episode_return'], '% 2.2f'),
('eval_num_episodes', eval_stats['num_episodes'], '%3d'),
('train_num_episodes', train_stats['num_episodes'], '%3d'),
('eval_frame_rate', eval_stats['step_rate'], '%4.0f'),
('train_frame_rate', train_stats['step_rate'], '%4.0f'),
('train_exploration_epsilon', train_agent.exploration_epsilon, '%.3f'),
('train_state_value', train_stats['state_value'], '%.3f'),
('normalized_return', human_normalized_score, '%.3f'),
('capped_normalized_return', capped_human_normalized_score, '%.3f'),
('human_gap', 1.0 - capped_human_normalized_score, '%.3f'),
]
log_output_str = ', '.join(('%s: ' + f) % (n, v) for n, v, f in log_output)
logging.info(log_output_str)
writer.write(collections.OrderedDict((n, v) for n, v, _ in log_output))
state.iteration += 1
checkpoint.save()
writer.close()
if __name__ == '__main__':
config.update('jax_platform_name', 'gpu') # Default to GPU.
config.update('jax_numpy_rank_promotion', 'raise')
config.config_with_absl()
app.run(main)
| dqn_zoo-master | dqn_zoo/iqn/run_atari.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EnvLogger Tests."""
import concurrent.futures
import glob
import os
import pickle
import tempfile
import threading
from typing import List, Optional
from unittest import mock
import uuid
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import dm_env
from dm_env import specs
from envlogger import environment_logger
from envlogger import reader
from envlogger import step_data
from envlogger.backends import backend_type
from envlogger.backends import in_memory_backend
from envlogger.backends import schedulers
from envlogger.converters import codec
from envlogger.converters import spec_codec
from envlogger.proto import storage_pb2
from envlogger.testing import catch_env
import numpy as np
import riegeli
class CustomSpecsEnvironment(dm_env.Environment):
"""An Environment that allows us to customize its specs."""
def __init__(self,
observation_spec,
action_spec,
reward_spec,
discount_spec,
episode_length=10):
self._observation_spec = observation_spec
self._action_spec = action_spec
self._reward_spec = reward_spec
self._discount_spec = discount_spec
self._episode_length = episode_length
self._step_counter = 0
def reset(self):
self._step_counter = 0
return dm_env.restart(123) # Return whatever, we won't check it.
def step(self, actions):
self._step_counter += 1
if self._step_counter >= self._episode_length:
return dm_env.termination(-1.0, 987)
return dm_env.transition(1.0, 321) # Return whatever, we won't check it.
def discount_spec(self):
return self._discount_spec
def reward_spec(self):
return self._reward_spec
def observation_spec(self):
return self._observation_spec
def action_spec(self):
return self._action_spec
class RandomDataEnvironment(dm_env.Environment):
"""An Environment that produces random data of a particular shape."""
def __init__(self, data_size=1000, prob_episode_end=0.01):
self._data_size = data_size
self._prob_episode_end = prob_episode_end
def reset(self):
return dm_env.restart(self._obs())
def step(self, actions):
if np.random.rand() < self._prob_episode_end:
return dm_env.termination(1.0, self._obs())
return dm_env.transition(1.0, self._obs())
def observation_spec(self):
return specs.Array(shape=(self._data_size,), dtype=np.float32)
def action_spec(self):
return specs.Array(shape=(), dtype=np.int32)
def _obs(self):
return np.random.rand(self._data_size,)
def _train(env: dm_env.Environment,
num_episodes: int) -> List[step_data.StepData]:
logging.info('Training a random agent for %r episodes...', num_episodes)
num_actions = 3
episodes_data = []
for _ in range(num_episodes):
timestep = env.reset()
episodes_data.append(step_data.StepData(timestep, None, None))
while not timestep.last():
action = np.random.choice(num_actions)
timestep = env.step(action)
episodes_data.append(step_data.StepData(timestep, action, None))
logging.info('Done training a random agent for %r episodes.', num_episodes)
env.close()
return episodes_data
class EnvLoggerTest(parameterized.TestCase):
def setUp(self):
super(EnvLoggerTest, self).setUp()
self._temp_dir = tempfile.TemporaryDirectory(
dir=absltest.get_default_test_tmpdir())
self.dataset_path = os.path.join(self._temp_dir.name,
'environment_logger_test')
os.makedirs(self.dataset_path, exist_ok=True)
def tearDown(self):
self._temp_dir.cleanup()
super(EnvLoggerTest, self).tearDown()
@parameterized.named_parameters(
('bare_spec', specs.Array(shape=(1, 2, 3), dtype=np.int8)),
('bounded_spec',
specs.BoundedArray(shape=(4, 5), dtype=np.int8, minimum=10, maximum=50)),
('discrete_array_spec', specs.DiscreteArray(num_values=73)),
('list_spec', [
specs.Array(shape=(1, 2, 3), dtype=np.int8),
specs.Array(shape=(), dtype=np.float64)
]), ('tuple_spec',
(specs.Array(shape=(1,), dtype=np.float32),
specs.Array(shape=(1, 2, 3), dtype=np.int8))), ('dict_spec', {
'my_float': specs.Array(shape=(1,), dtype=np.float32),
'integers': specs.Array(shape=(1, 2, 3), dtype=np.int8),
}))
def test_specs_of_different_types_are_supported(self, spec):
"""Ensures that different spec types are supported."""
env = CustomSpecsEnvironment(
observation_spec=spec,
action_spec=spec,
reward_spec=spec,
discount_spec=spec)
env = environment_logger.EnvLogger(
env,
data_directory=self.dataset_path,
backend=backend_type.BackendType.RIEGELI)
_train(env, num_episodes=1)
with reader.Reader(self.dataset_path) as data_reader:
self.assertEqual(data_reader.observation_spec(), spec)
self.assertEqual(type(data_reader.observation_spec()), type(spec))
self.assertEqual(data_reader.action_spec(), spec)
self.assertEqual(type(data_reader.action_spec()), type(spec))
self.assertEqual(data_reader.reward_spec(), spec)
self.assertEqual(type(data_reader.reward_spec()), type(spec))
self.assertEqual(data_reader.discount_spec(), spec)
self.assertEqual(type(data_reader.discount_spec()), type(spec))
def test_different_specs_are_actually_different(self):
"""Ensures that different spec types are maintained."""
spec1 = specs.Array(shape=(1, 2, 3), dtype=np.int8)
spec2 = specs.Array(shape=(1,), dtype=np.int64)
spec3 = specs.BoundedArray(
shape=(4, 5, 6), dtype=np.float32, minimum=10.0, maximum=11.0)
spec4 = specs.DiscreteArray(num_values=321, dtype=np.int16)
env = CustomSpecsEnvironment(
observation_spec=spec1,
action_spec=spec2,
reward_spec=spec3,
discount_spec=spec4)
env = environment_logger.EnvLogger(
env,
data_directory=self.dataset_path,
backend=backend_type.BackendType.RIEGELI)
_train(env, num_episodes=1)
with reader.Reader(self.dataset_path) as data_reader:
self.assertEqual(data_reader.observation_spec(), spec1)
self.assertEqual(data_reader.action_spec(), spec2)
self.assertEqual(data_reader.reward_spec(), spec3)
self.assertEqual(data_reader.discount_spec(), spec4)
self.assertNotEqual(data_reader.observation_spec(),
data_reader.action_spec())
self.assertNotEqual(data_reader.observation_spec(),
data_reader.reward_spec())
self.assertNotEqual(data_reader.observation_spec(),
data_reader.discount_spec())
self.assertNotEqual(data_reader.action_spec(), data_reader.reward_spec())
self.assertNotEqual(data_reader.action_spec(),
data_reader.discount_spec())
self.assertNotEqual(data_reader.reward_spec(),
data_reader.discount_spec())
def test_metadata_is_available(self):
"""Ensures that if `metadata` is passed, it can be read."""
env = catch_env.Catch()
env = environment_logger.EnvLogger(
env,
data_directory=self.dataset_path,
metadata={'do_not_forget_me': 'i am important!'},
max_episodes_per_file=973,
writer_options='transpose,brotli:1,chunk_size:50M',
backend=backend_type.BackendType.RIEGELI)
_train(env, num_episodes=1)
with reader.Reader(data_directory=self.dataset_path) as data_reader:
metadata = data_reader.metadata()
environment_specs = metadata.pop('environment_specs')
for k, v in spec_codec.encode_environment_specs(env).items():
for spec_name, spec_value in v.items():
if isinstance(spec_value, np.ndarray):
np.testing.assert_array_equal(
environment_specs[k][spec_name], spec_value)
else:
self.assertEqual(environment_specs[k][spec_name], spec_value)
self.assertDictEqual(data_reader.metadata(),
{'do_not_forget_me': 'i am important!'})
def test_data_reader_get_timestep(self):
"""Ensures that we can fetch single timesteps from a Reader."""
num_episodes = 13
num_steps_per_episode = 10
num_steps = num_episodes * num_steps_per_episode
env = catch_env.Catch()
backend = in_memory_backend.InMemoryBackendWriter()
env = environment_logger.EnvLogger(
env, data_directory=self.dataset_path, backend=backend)
expected_data = _train(env, num_episodes=num_episodes)
self.assertLen(
expected_data,
num_steps,
msg=(f'We expect {num_steps} steps when running an actor for '
f'{num_episodes} episodes of {num_steps_per_episode} steps each.'))
data_reader = in_memory_backend.InMemoryBackendReader(backend)
self.assertLen(
data_reader.steps,
num_steps,
msg=(f'We expect {num_steps} steps when running an actor for '
f'{num_episodes} episodes of {num_steps_per_episode} steps each.'))
# All 130 steps should be accessible with __getitem__().
for i in range(num_steps):
np.testing.assert_equal(data_reader.steps[i], expected_data[i])
# All 130 steps should be accessible with __iter__().
step_index = 0
for step_index, (actual, expected) in enumerate(
zip(data_reader.steps, expected_data)):
np.testing.assert_equal(actual, expected)
self.assertEqual(step_index, num_steps - 1) # step_index is 0-based
def test_step_fn(self):
"""Checks that `step_fn` produces expected custom data."""
v = np.random.randint(1000)
expected_custom_data = list(range(v + 1, v + 1 + 20))
def increment_fn(unused_timestep, unused_action, unused_env):
"""A function that increments `v` then returns it."""
nonlocal v
v += 1
return v
env = catch_env.Catch()
env = environment_logger.EnvLogger(
env,
data_directory=self.dataset_path,
step_fn=increment_fn,
backend=backend_type.BackendType.RIEGELI)
_train(env, num_episodes=2)
actual_data = []
with reader.Reader(self.dataset_path) as data_reader:
tag_data = list(data_reader.steps)
actual_data += tag_data
self.assertLen(
data_reader.steps,
20,
msg='Expected 20 steps in total from an actor running 2 episodes '
'of 10 steps each.')
self.assertLen(
data_reader.episodes,
2,
msg='Expected 2 episodes in total from an actor running 2 '
'episodes.')
np.testing.assert_equal([x.custom_data for x in actual_data],
expected_custom_data)
def test_episode_fn(self):
"""Checks that `episode_fn` produces expected custom data."""
v = 100
def increment_fn(timestep, unused_action, unused_env) -> Optional[int]:
"""Increments `v` on the last timestep and returns it in even episodes."""
nonlocal v
if timestep.first():
v += 1
return np.int32(v) if v % 2 == 0 else None
env = catch_env.Catch()
env = environment_logger.EnvLogger(
env,
data_directory=self.dataset_path,
episode_fn=increment_fn,
backend=backend_type.BackendType.RIEGELI)
_train(env, num_episodes=11)
actual_metadata = []
with reader.Reader(self.dataset_path) as data_reader:
for metadata in data_reader.episode_metadata():
actual_metadata.append(metadata)
self.assertEqual(
actual_metadata,
[None, 102, None, 104, None, 106, None, 108, None, 110, None])
def test_truncated_trajectory(self):
"""Ensures that the reader handles a truncated trajectory."""
env = catch_env.Catch()
env = environment_logger.EnvLogger(
env,
data_directory=self.dataset_path,
max_episodes_per_file=2,
backend=backend_type.BackendType.RIEGELI)
expected_data = _train(env, num_episodes=5)
# Remove the last 10 steps from the last episode to simulate a truncated
# trajectory.
expected_data = expected_data[:-10]
# Truncate the last timestamp dir of the first (and only) actor.
first_actor = self.dataset_path
dir_contents = os.listdir(first_actor)
dir_contents = [
d for d in dir_contents if os.path.isdir(os.path.join(first_actor, d))
]
dir_contents = sorted(dir_contents)
last_timestamp_dir = os.path.join(first_actor, dir_contents[-1])
for fname in [
'steps.riegeli', 'step_offsets.riegeli', 'episode_metadata.riegeli',
'episode_index.riegeli'
]:
with open(os.path.join(last_timestamp_dir, fname), 'w') as f:
f.truncate()
actual_data = []
with reader.Reader(first_actor) as data_reader:
tag_data = list(data_reader.steps)
actual_data += tag_data
self.assertLen(
data_reader.steps,
40,
msg='Expected 40 steps in total from an actor running 4 episodes '
'of 10 steps each (last shard should not be included).')
self.assertLen(
data_reader.episodes,
4,
msg='Expected 4 episodes in total from an actor running 4 '
'episodes (last shard should not be included).')
np.testing.assert_equal(actual_data, expected_data)
def test_episode_starts_monotonically_increasing(self):
"""Ensures that all episode starts form an increasing sequence."""
env = RandomDataEnvironment(data_size=100, prob_episode_end=0.01)
env = environment_logger.EnvLogger(
env,
data_directory=self.dataset_path,
max_episodes_per_file=10_000_000_000,
flush_scheduler=schedulers.BernoulliStepScheduler(1.0 / 13),
backend=backend_type.BackendType.RIEGELI)
_train(env, num_episodes=100)
actor = self.dataset_path
dir_contents = os.listdir(actor)
dir_contents = [
d for d in dir_contents if os.path.isdir(os.path.join(actor, d))
]
for d in dir_contents:
timestamp_dir = os.path.join(actor, d)
episode_index_file = os.path.join(timestamp_dir,
'episode_index.riegeli')
with riegeli.RecordReader(open(episode_index_file,
'rb')) as riegeli_reader:
previous = None
for record in riegeli_reader.read_messages(storage_pb2.Datum):
decoded = codec.decode_datum(record)
for episode_start, _ in decoded:
if previous is None:
continue
self.assertGreater(episode_start, previous)
previous = episode_start
with reader.Reader(actor) as tagreader:
for episode in tagreader.episodes:
self.assertGreaterEqual(len(episode), 0)
for episode_metadata in tagreader.episode_metadata():
self.assertIsNone(episode_metadata)
def test_logger_close(self):
"""Ensures that `.close()` is idempotent and can be called multiple times."""
env = catch_env.Catch()
env = environment_logger.EnvLogger(
env,
data_directory=self.dataset_path,
backend=backend_type.BackendType.RIEGELI,
)
_train(env, num_episodes=1)
for _ in range(10):
env.close() # Can be called multiple times.
def test_logger_as_context(self):
"""Ensures that EnvLogger can be used as a context."""
env = catch_env.Catch()
with environment_logger.EnvLogger(
env,
data_directory=self.dataset_path,
backend=backend_type.BackendType.RIEGELI) as env:
_train(env, num_episodes=1)
def test_scheduler_skips_first_timestep(self):
"""Checks that a scheduler that skips the first timestep raises an exception."""
def no_first_step(data: step_data.StepData) -> bool:
return not data.timestep.first()
with environment_logger.EnvLogger(
catch_env.Catch(),
data_directory=self.dataset_path,
scheduler=no_first_step,
backend=backend_type.BackendType.RIEGELI) as env:
_ = env.reset()
# On a subsequent step, it'll try to write to an episode that has not been
# initialized so it should raise an exception.
self.assertRaises(RuntimeError, env.step, 1)
def test_only_episodic_metadata(self):
"""Episodic metadata can be stored by storing only first steps."""
def only_first_step(data: step_data.StepData) -> bool:
return data.timestep.first()
def my_episode_fn(timestep, unused_action, unused_env):
if timestep.last():
my_episode_fn.x += 1
return my_episode_fn.x
my_episode_fn.x = 0
with environment_logger.EnvLogger(
catch_env.Catch(),
data_directory=self.dataset_path,
episode_fn=my_episode_fn,
scheduler=only_first_step,
backend=backend_type.BackendType.RIEGELI) as env:
for _ in range(3):
ts = env.reset()
while not ts.last():
ts = env.step(1)
with reader.Reader(self.dataset_path) as data_reader:
self.assertLen(data_reader.episodes, 3)
self.assertLen(data_reader.episode_metadata(), 3)
self.assertEqual(list(data_reader.episode_metadata()), [1, 2, 3])
def test_parallel_readers(self):
"""Ensures that multiple Readers can be used simultaneously."""
# Record a trajectory.
env = catch_env.Catch()
with environment_logger.EnvLogger(
env,
data_directory=self.dataset_path,
backend=backend_type.BackendType.RIEGELI) as env:
_train(env, num_episodes=10)
# Read data and create `n` Reader copies.
n = 50
with reader.Reader(self.dataset_path) as original_reader:
original_reader = reader.Reader(self.dataset_path)
steps = list(original_reader.steps)
copies = [original_reader.copy() for _ in range(n)]
# Check the data in parallel with `n` workers.
def _check_data(r: reader.Reader):
np.testing.assert_equal(list(r.steps), steps)
futures = []
with concurrent.futures.ThreadPoolExecutor(max_workers=n) as executor:
for copy in copies:
futures.append(executor.submit(lambda c=copy: _check_data(c)))
for f in futures:
f.result(timeout=5) # Wait for up to 5 seconds.
# Close the copies.
for copy in copies:
copy.close()
def test_envlogger_pickling(self):
"""Checks EnvLogger pickling support."""
env = catch_env.Catch()
env = environment_logger.EnvLogger(
env,
backend=backend_type.BackendType.RIEGELI,
data_directory=self.dataset_path)
_ = _train(env, num_episodes=7)
serialized = pickle.dumps(env)
another_env = pickle.loads(serialized)
# The env should also work as usual.
another_data = _train(another_env, num_episodes=11)
self.assertNotEmpty(another_data)
if __name__ == '__main__':
absltest.main()
| envlogger-main | envlogger/environment_logger_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data type that's written to and returned from storage.
"""
from typing import Any, NamedTuple
import dm_env
class StepData(NamedTuple):
"""Payload that's written at every dm_env.Environment.step() call.
`StepData` contains the data that's written to logs (i.e. to disk somewhere).
Attributes:
timestep: The dm_env.TimeStep generated by the environment.
action: The action that led generated `timestep`.
custom_data: Any client-specific data to be written along-side `timestep`
and `action`. It must be supported by converters/codec.py.
"""
timestep: dm_env.TimeStep
action: Any
custom_data: Any = None
| envlogger-main | envlogger/step_data.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A one-stop import for commonly used modules in EnvLogger."""
from envlogger import environment_logger
from envlogger import reader
from envlogger import step_data
from envlogger.backends import backend_type
from envlogger.backends import riegeli_backend_writer
from envlogger.backends import schedulers
from envlogger.proto import storage_pb2
EnvLogger = environment_logger.EnvLogger
Reader = reader.Reader
BackendType = backend_type.BackendType
StepData = step_data.StepData
Scheduler = schedulers.Scheduler
RiegeliBackendWriter = riegeli_backend_writer.RiegeliBackendWriter
Data = storage_pb2.Data
Datum = storage_pb2.Datum
| envlogger-main | envlogger/__init__.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reader of EnvironmentLogger data."""
import copy
from typing import Any, Optional, Sequence, Union
from absl import logging
from envlogger import step_data
from envlogger.backends import backend_reader
from envlogger.backends import backend_type
from envlogger.backends import in_memory_backend
from envlogger.backends import riegeli_backend_reader
from envlogger.converters import spec_codec
class Reader:
"""Reader of trajectories generated by EnvLogger."""
def __init__(self,
*backend_args,
backend: Union[
backend_reader.BackendReader,
backend_type.BackendType] = backend_type.BackendType.RIEGELI,
**backend_kwargs):
logging.info('backend: %r', backend)
logging.info('backend_args: %r', backend_args)
logging.info('backend_kwargs: %r', backend_kwargs)
# Set backend.
if isinstance(backend, backend_reader.BackendReader):
self._backend = backend
elif isinstance(backend, backend_type.BackendType):
self._backend = {
backend_type.BackendType.RIEGELI:
riegeli_backend_reader.RiegeliBackendReader,
backend_type.BackendType.IN_MEMORY:
in_memory_backend.InMemoryBackendReader,
}[backend](*backend_args, **backend_kwargs)
else:
raise TypeError(f'Unsupported backend: {backend}')
self._set_specs()
def copy(self):
c = copy.copy(self)
c._backend = self._backend.copy()
c._observation = self._observation_spec
c._action_spec = self._action_spec
c._reward_spec = self._reward_spec
c._discount_spec = self._discount_spec
return c
def close(self):
self._backend.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def __del__(self):
self.close()
def metadata(self):
return self._backend.metadata()
@property
def steps(self) -> Sequence[step_data.StepData]:
return self._backend.steps
@property
def episodes(self) -> Sequence[Sequence[step_data.StepData]]:
return self._backend.episodes
def episode_metadata(self) -> Sequence[Optional[Any]]:
return self._backend.episode_metadata()
def observation_spec(self):
return self._observation_spec
def action_spec(self):
return self._action_spec
def reward_spec(self):
return self._reward_spec
def discount_spec(self):
return self._discount_spec
def _set_specs(self) -> None:
"""Extracts and decodes environment specs from the logged data."""
metadata = self._backend.metadata() or {}
env_specs = spec_codec.decode_environment_specs(
metadata.get('environment_specs', {}))
self._observation_spec = env_specs['observation_spec']
self._action_spec = env_specs['action_spec']
self._reward_spec = env_specs['reward_spec']
self._discount_spec = env_specs['discount_spec']
| envlogger-main | envlogger/reader.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install script for setuptools."""
import os
import posixpath
import shutil
import pkg_resources
import setuptools
from setuptools.command import build_ext
from setuptools.command import build_py
PROJECT_NAME = 'envlogger'
__version__ = '1.1'
_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
_ENVLOGGER_PROTOS = (
'proto/storage.proto',
)
class _GenerateProtoFiles(setuptools.Command):
"""Command to generate protobuf bindings for EnvLogger protos."""
descriptions = 'Generates Python protobuf bindings for EnvLogger protos.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# We have to import grpc_tools here, after setuptools has installed
# setup_requires dependencies.
from grpc_tools import protoc
grpc_protos_include = pkg_resources.resource_filename(
'grpc_tools', '_proto')
for proto_path in _ENVLOGGER_PROTOS:
proto_args = [
'grpc_tools.protoc',
'--proto_path={}'.format(grpc_protos_include),
'--proto_path={}'.format(_ROOT_DIR),
'--python_out={}'.format(_ROOT_DIR),
'--grpc_python_out={}'.format(_ROOT_DIR),
os.path.join(_ROOT_DIR, proto_path),
]
if protoc.main(proto_args) != 0:
raise RuntimeError('ERROR: {}'.format(proto_args))
class _BuildPy(build_py.build_py):
"""Generate protobuf bindings in build_py stage."""
def run(self):
self.run_command('generate_protos')
build_py.build_py.run(self)
class BazelExtension(setuptools.Extension):
"""A C/C++ extension that is defined as a Bazel BUILD target."""
def __init__(self, bazel_target):
self.bazel_target = bazel_target
self.relpath, self.target_name = (
posixpath.relpath(bazel_target, '//').split(':'))
ext_name = os.path.join(
self.relpath.replace(posixpath.sep, os.path.sep), self.target_name)
super().__init__(ext_name, sources=[])
class _BuildExt(build_ext.build_ext):
"""A command that runs Bazel to build a C/C++ extension."""
def run(self):
self.run_command('generate_protos')
self.bazel_build()
build_ext.build_ext.run(self)
def bazel_build(self):
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
bazel_argv = [
'bazel',
'build',
'...',
'--symlink_prefix=' + os.path.join(self.build_temp, 'bazel-'),
'--compilation_mode=' + ('dbg' if self.debug else 'opt'),
'--verbose_failures',
]
self.spawn(bazel_argv)
for ext in self.extensions:
ext_bazel_bin_path = os.path.join(
self.build_temp, 'bazel-bin',
ext.relpath, ext.target_name + '.so')
ext_name = ext.name
ext_dest_path = self.get_ext_fullpath(ext_name)
ext_dest_dir = os.path.dirname(ext_dest_path)
if not os.path.exists(ext_dest_dir):
os.makedirs(ext_dest_dir)
shutil.copyfile(ext_bazel_bin_path, ext_dest_path)
# Copy things from /external to their own libs
# E.g. /external/some_repo/some_lib --> /some_lib
if ext_name.startswith('external/'):
split_path = ext_name.split('/')
ext_name = '/'.join(split_path[2:])
ext_dest_path = self.get_ext_fullpath(ext_name)
ext_dest_dir = os.path.dirname(ext_dest_path)
if not os.path.exists(ext_dest_dir):
os.makedirs(ext_dest_dir)
shutil.copyfile(ext_bazel_bin_path, ext_dest_path)
setuptools.setup(
name=PROJECT_NAME,
version=__version__,
description='EnvLogger: A tool for recording trajectories.',
author='DeepMind',
license='Apache 2.0',
ext_modules=[
BazelExtension('//envlogger/backends/python:episode_info'),
BazelExtension('//envlogger/backends/python:riegeli_dataset_reader'),
BazelExtension('//envlogger/backends/python:riegeli_dataset_writer'),
],
cmdclass={
'build_ext': _BuildExt,
'build_py': _BuildPy,
'generate_protos': _GenerateProtoFiles,
},
packages=setuptools.find_packages(),
setup_requires=[
# Some software packages have problems with older versions already
# installed by pip. In particular DeepMind Acme uses grpcio-tools 1.45.0
# (as of 2022-04-20) so we use the same version here.
'grpcio-tools>=1.45.0',
],
install_requires=[
'absl-py',
'dm_env',
'numpy',
'protobuf>=3.14',
'setuptools!=50.0.0', # https://github.com/pypa/setuptools/issues/2350
],
extras_require={
'tfds': [
'tensorflow',
'tfds-nightly',
],
})
| envlogger-main | envlogger/setup.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for implementing environment wrappers.."""
import pickle
import dm_env
class EnvironmentWrapper(dm_env.Environment):
"""An Environment which delegates calls to another environment.
Subclasses should override one or more methods to modify the behavior of the
backing environment as desired per the Decorator Pattern.
This exposes the wrapped environment to subclasses with the `._environment`
property and also defines `__getattr__` so that attributes are invisibly
forwarded to the wrapped environment (and hence enabling duck-typing).
"""
def __init__(self, environment: dm_env.Environment):
self._environment = environment
def __getattr__(self, name):
return getattr(self._environment, name)
def __getstate__(self):
return pickle.dumps(self._environment)
def __setstate__(self, state):
self._environment = pickle.loads(state)
def step(self, action) -> dm_env.TimeStep:
return self._environment.step(action)
def reset(self) -> dm_env.TimeStep:
return self._environment.reset()
def action_spec(self):
return self._environment.action_spec()
def discount_spec(self):
return self._environment.discount_spec()
def observation_spec(self):
return self._environment.observation_spec()
def reward_spec(self):
return self._environment.reward_spec()
def close(self):
return self._environment.close()
| envlogger-main | envlogger/environment_wrapper.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper that logs `TimeStep`s and actions, metadata and episodes.
Data can be read back using:
- The python reader API (environment_logger.reader).
"""
import pickle
from typing import Any, Callable, Dict, Optional, Union
import dm_env
from envlogger import environment_wrapper
from envlogger import step_data
from envlogger.backends import backend_type
from envlogger.backends import backend_writer
from envlogger.backends import in_memory_backend
from envlogger.backends import riegeli_backend_writer
from envlogger.converters import spec_codec
_DEFAULT_BACKEND = backend_type.BackendType.RIEGELI
class EnvLogger(environment_wrapper.EnvironmentWrapper):
"""Wrapper that logs timestep and actions."""
def __init__(
self,
env: dm_env.Environment,
step_fn: Optional[Callable[[dm_env.TimeStep, Any, dm_env.Environment],
Any]] = None,
episode_fn: Optional[Callable[[dm_env.TimeStep, Any, dm_env.Environment],
Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
backend: Union[backend_writer.BackendWriter, backend_type.BackendType,
Callable[...,
backend_writer.BackendWriter]] = _DEFAULT_BACKEND,
**backend_kwargs):
"""Constructor.
Usage:
my_env = MyDmEnvironment()
with EnvLogger(my_env, data_directory='/some/path/', ...) as env:
# Use `env` just like `my_env`.
# `.close()` is automatically called when the context is over.
Calling `close()` will flush the trajectories and the index to disk and will
ensure that they can be read later on. If it isn't called, there is a large
risk of losing data. This is particularly common in some RL frameworks that
do not clean up their environments. If the environment runs for a very long
time, this can happen only to the last shard, but if the instance is
short-lived, then a large portion of the trajectories can disappear.
Args:
env: The wrapped environment.
step_fn: A function that takes the current timestep, current action, the
environment itself and returns custom data that's written at every
step() if it's not None.
episode_fn: A function that takes the current timestep, current action,
the environment itself and returns custom episodic data that's written
when the current episode is over. If it is None or if it returns None,
nothing is written. This function is called at every step during the
course of an episode, but only the last value it returns will actually
be stored (all intermediate return values are ignored).
metadata: Any dataset-level custom data to be written.
backend: One of the following:
* A `LoggingBackend` instance: `EnvLogger` will simply use this instance
as is.
* A `BackendType` enum indicating the backend to use: `EnvLogger` will
construct a `LoggingBackend` from a list of predefined backends
passing `backend_kwargs`.
* A `Callable`: `EnvLogger` will call the given function passing
`backend_kwargs`. The function _must_ return a `LoggingBackend`
instance.
**backend_kwargs: Extra arguments use to construct the backend. These will
be handed to `backend` without any modification.
"""
super().__init__(env)
self._step_fn = step_fn
self._episode_fn = episode_fn
self._reset_next_step = True
metadata = metadata or {}
metadata['environment_specs'] = spec_codec.encode_environment_specs(env)
backend_kwargs['metadata'] = metadata
# Set backend.
if isinstance(backend, backend_writer.BackendWriter):
self._backend = backend
elif isinstance(backend, backend_type.BackendType):
self._backend = {
backend_type.BackendType.RIEGELI:
riegeli_backend_writer.RiegeliBackendWriter,
backend_type.BackendType.IN_MEMORY:
in_memory_backend.InMemoryBackendWriter,
}[backend](**backend_kwargs)
else:
self._backend = backend(**backend_kwargs)
def _transform_step(self,
timestep: dm_env.TimeStep,
action: Optional[Any] = None) -> step_data.StepData:
"""Puts all data into a StepData named tuple."""
custom_data = None
if self._step_fn is not None:
custom_data = self._step_fn(timestep, action, self._environment)
return step_data.StepData(timestep, action, custom_data)
def reset(self):
self._reset_next_step = False
timestep = self._environment.reset()
data = self._transform_step(timestep, None)
self._backend.record_step(data, is_new_episode=True)
if self._episode_fn is not None:
episode_metadata = self._episode_fn(timestep, None, self._environment)
if episode_metadata is not None:
self._backend.set_episode_metadata(episode_metadata)
return timestep
def step(self, action):
if self._reset_next_step:
return self.reset()
timestep = self._environment.step(action)
self._reset_next_step = timestep.last()
data = self._transform_step(timestep, action)
self._backend.record_step(data, is_new_episode=False)
if self._episode_fn is not None:
episode_metadata = self._episode_fn(timestep, action, self._environment)
if episode_metadata is not None:
self._backend.set_episode_metadata(episode_metadata)
return timestep
def close(self):
self._environment.close()
self._backend.close()
def __getstate__(self):
return {
'env': pickle.dumps(self._environment),
'step_fn': pickle.dumps(self._step_fn),
'episode_fn': pickle.dumps(self._episode_fn),
'reset_next_step': pickle.dumps(self._reset_next_step),
'backend': pickle.dumps(self._backend),
}
def __setstate__(self, state):
self._environment = pickle.loads(state['env'])
self._step_fn = pickle.loads(state['step_fn'])
self._episode_fn = pickle.loads(state['episode_fn'])
self._reset_next_step = pickle.loads(state['reset_next_step'])
self._backend = pickle.loads(state['backend'])
| envlogger-main | envlogger/environment_logger.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| envlogger-main | envlogger/proto/__init__.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils to convert Envlogger data into RLDS."""
from typing import Any, Dict, Optional
from absl import logging
from envlogger import step_data
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
Step = Dict[str, Any]
def to_rlds_step(prev_step: step_data.StepData,
step: Optional[step_data.StepData]) -> Step:
"""Builds an RLDS step from two Envlogger steps.
Steps follow the RLDS convention from https://github.com/google-research/rlds.
Args:
prev_step: previous step.
step: current step. If None, it builds the last step (where the observation
is the last one, and the action, reward and discount are undefined).
Returns:
RLDS Step.
"""
metadata = {}
if isinstance(prev_step.custom_data, dict):
metadata = prev_step.custom_data
return {
'action':
step.action if step else tf.nest.map_structure(
np.zeros_like, prev_step.action),
'discount':
step.timestep.discount if step else tf.nest.map_structure(
np.zeros_like, prev_step.timestep.discount),
'is_first':
prev_step.timestep.first(),
'is_last':
prev_step.timestep.last(),
'is_terminal': (prev_step.timestep.last() and
prev_step.timestep.discount == 0.0),
'observation':
prev_step.timestep.observation,
'reward':
step.timestep.reward if step else tf.nest.map_structure(
np.zeros_like, prev_step.timestep.reward),
**metadata,
}
def _find_extra_shard(split_info: tfds.core.SplitInfo) -> Optional[Any]:
"""Returns the filename of the extra shard, or None if all shards are in the metadata."""
filepath = split_info.filename_template.sharded_filepath(
shard_index=split_info.num_shards, num_shards=split_info.num_shards + 1)
if tf.io.gfile.exists(filepath):
# There is one extra shard for which we don't have metadata.
return filepath
return None
def maybe_recover_last_shard(builder: tfds.core.DatasetBuilder):
"""Goes through the splits and recovers the incomplete shards.
It checks if the last shard is missing. If that is the case, it rewrites the
metadata. This requires to read the full shard so it may take some time.
We assume that only the last shard can be unaccounted for in the
metadata because the logger generates shards sequentially and it updates the
metadata once a shard is done and before starting the new shard.
Args:
builder: TFDS builder of the dataset that may have incomplete shards.
Returns:
A builder with the new split information.
"""
split_infos = builder.info.splits
splits_to_update = 0
for _, split_info in split_infos.items():
extra_shard = _find_extra_shard(split_info)
if extra_shard is None:
continue
logging.info('Recovering data for shard %s.', extra_shard)
splits_to_update += 1
ds = tf.data.TFRecordDataset(extra_shard)
num_examples = 0
num_bytes = 0
for ex in ds:
num_examples += 1
num_bytes += len(ex.numpy())
new_split_info = split_info.replace(
shard_lengths=split_info.shard_lengths + [num_examples],
num_bytes=split_info.num_bytes + num_bytes)
old_splits = [
v for k, v in builder.info.splits.items() if k != new_split_info.name
]
builder.info.set_splits(tfds.core.SplitDict(old_splits + [new_split_info]))
if splits_to_update > 0:
builder.info.write_to_directory(builder.data_dir)
return builder
| envlogger-main | envlogger/backends/rlds_utils.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract trajectory logging interface."""
import abc
from typing import Any, Dict, Optional
from envlogger import step_data
from envlogger.backends import schedulers
class BackendWriter(metaclass=abc.ABCMeta):
"""Abstract trajectory logging interface."""
def __init__(self,
metadata: Optional[Dict[str, Any]] = None,
scheduler: Optional[schedulers.Scheduler] = None):
"""BackendWriter base class.
Args:
metadata: Any dataset-level custom data to be written.
scheduler: A callable that takes the current timestep, current
action, the environment itself and returns True if the current step
should be logged, False otherwise. This function is called _before_
`step_fn`, meaning that if it returns False, `step_fn` will not be
called at all. NOTE: This scheduler should NEVER skip the first timestep
in the episode, otherwise `EnvLogger` will not know that such episode
really exists.
"""
self._scheduler = scheduler
self._metadata = metadata
def record_step(self, data: step_data.StepData, is_new_episode: bool) -> None:
if (self._scheduler is not None and not self._scheduler(data)):
return
self._record_step(data, is_new_episode)
@abc.abstractmethod
def set_episode_metadata(self, data: Any) -> None:
pass
@abc.abstractmethod
def _record_step(self, data: step_data.StepData,
is_new_episode: bool) -> None:
pass
@abc.abstractmethod
def close(self) -> None:
pass
def __del__(self):
self.close()
def metadata(self):
return self._metadata
| envlogger-main | envlogger/backends/backend_writer.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract interface for reading trajectories."""
import abc
from typing import Any, Callable, Dict, Generic, Iterator, List, Optional, Sequence, TypeVar, Union
from absl import logging
from envlogger import step_data
from envlogger.backends.python import episode_info
from envlogger.converters import codec
T = TypeVar('T')
class _SequenceAdapter(Generic[T], Sequence[T]):
"""Convenient visitor for episodes/steps."""
def __init__(self, count: int, get_nth_item: Callable[[int], T]):
"""Constructor.
Args:
count: Total number of items.
get_nth_item: Function to get the nth item.
"""
self._count = count
self._index = 0
self._get_nth_item = get_nth_item
def __getitem__(self, index: Union[int, slice]) -> Union[T, List[T]]:
"""Retrieves items from this sequence.
Args:
index: item index or slice of indices.
Returns:
The item at `index` if index is of type `int`, or a list of items if
`index` is a slice. If `index` is a negative integer, then it is
equivalent to index + len(self).
Raises:
IndexError: if index is an integer outside of the bounds [-length,
length - 1].
"""
if isinstance(index, slice):
indices = index.indices(len(self))
return [self._get_nth_item(i) for i in range(*indices)]
if index >= self._count or index < -self._count:
raise IndexError(f'`index`=={index} is out of the range [{-self._count}, '
f'{self._count - 1}].')
index = index if index >= 0 else index + self._count
return self._get_nth_item(index)
def __len__(self) -> int:
return self._count
def __iter__(self) -> Iterator[T]:
while self._index < len(self):
yield self[self._index]
self._index += 1
self._index = 0
def __next__(self) -> T:
if self._index < len(self):
index = self._index
self._index += 1
return self[index]
else:
raise StopIteration()
class BackendReader(metaclass=abc.ABCMeta):
"""Base class for trajectory readers."""
def __init__(self):
self._init_visitors()
def copy(self) -> 'BackendReader':
"""Returns a copy of self."""
c = self._copy()
c._init_visitors()
return c
@abc.abstractmethod
def _copy(self) -> 'BackendReader':
"""Implementation-specific copy behavior."""
def _init_visitors(self):
"""Initializes visitors."""
logging.info('Creating visitors.')
self._steps = _SequenceAdapter(
count=self._get_num_steps(), get_nth_item=self._get_nth_step)
self._episodes = _SequenceAdapter(
count=self._get_num_episodes(), get_nth_item=self._get_nth_episode)
self._episode_metadata = _SequenceAdapter(
count=self._get_num_episodes(),
get_nth_item=self._get_nth_episode_metadata)
logging.info('Done creating visitors.')
@abc.abstractmethod
def _get_nth_step(self, i: int) -> step_data.StepData:
pass
@abc.abstractmethod
def _get_num_steps(self) -> int:
pass
@abc.abstractmethod
def _get_num_episodes(self) -> int:
pass
@abc.abstractmethod
def _get_nth_episode_info(self,
i: int,
include_metadata: bool = False
) -> episode_info.EpisodeInfo:
pass
def _get_nth_episode(self, i: int) -> Sequence[step_data.StepData]:
"""Yields timesteps for episode `i` (0-based)."""
episode = self._get_nth_episode_info(i, include_metadata=False)
def get_nth_step_from_episode(j: int):
return self._get_nth_step(episode.start + j)
return _SequenceAdapter(
count=episode.num_steps, get_nth_item=get_nth_step_from_episode)
def _get_nth_episode_metadata(self, i: int) -> Optional[Any]:
"""Returns the metadata for episode `i` (0-based)."""
episode = self._get_nth_episode_info(i, include_metadata=True)
return codec.decode(episode.metadata)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def __del__(self):
self.close()
@abc.abstractmethod
def close(self) -> None:
pass
@abc.abstractmethod
def metadata(self) -> Dict[str, Any]:
pass
@property
def episodes(self) -> Sequence[Sequence[step_data.StepData]]:
return self._episodes
def episode_metadata(self) -> Sequence[Optional[Any]]:
return self._episode_metadata
@property
def steps(self) -> Sequence[step_data.StepData]:
return self._steps
| envlogger-main | envlogger/backends/backend_reader.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| envlogger-main | envlogger/backends/__init__.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfds_backend_writer."""
from typing import List
from absl.testing import absltest
import dm_env
from envlogger import step_data
from envlogger.backends import rlds_utils
from envlogger.backends import tfds_backend_testlib
from envlogger.backends import tfds_backend_writer
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
def _create_step(value: int, step_type: dm_env.StepType) -> step_data.StepData:
return step_data.StepData(
action=value, timestep=dm_env.TimeStep(step_type, value, value, value))
def _tfds_features() -> tfds.features.FeaturesDict:
return tfds.features.FeaturesDict({
'steps':
tfds.features.Dataset({
'observation': tf.int64,
'action': tf.int64,
'reward': tf.int64,
'is_terminal': tf.bool,
'is_first': tf.bool,
'is_last': tf.bool,
'discount': tf.int64,
}),
})
class TfdsBackendWriterEpisodeTest(absltest.TestCase):
def test_add_step(self):
episode = tfds_backend_writer.Episode(
_create_step(0, dm_env.StepType.FIRST))
step = _create_step(1, dm_env.StepType.MID)
episode.add_step(step)
self.assertEqual(episode.prev_step, step)
self.assertLen(episode.steps, 1)
expected_rlds_step = {
'observation': 0,
'action': 1,
'reward': 1,
'discount': 1,
'is_first': True,
'is_last': False,
'is_terminal': False,
}
self.assertEqual(episode.steps[0], expected_rlds_step)
def test_get_rlds_episode(self):
episode = tfds_backend_writer.Episode(
_create_step(0, dm_env.StepType.FIRST))
episode.add_step(_create_step(1, dm_env.StepType.MID))
episode.add_step(_create_step(2, dm_env.StepType.LAST))
rlds_episode = episode.get_rlds_episode()
self.assertIsInstance(rlds_episode, dict)
self.assertIn('steps', rlds_episode)
steps_counter = 0
for index, step in enumerate(rlds_episode['steps']):
self.assertEqual(index, step['observation'])
self.assertFalse(step['is_terminal'])
self.assertEqual(index == 0, step['is_first'])
self.assertEqual(index == 2, step['is_last'])
next_value = 0 if index == 2 else index + 1
for key in ['action', 'reward', 'discount']:
self.assertEqual(next_value, step[key])
steps_counter += 1
self.assertEqual(steps_counter, 3)
class TfdsBackendWriterTest(absltest.TestCase):
def _assert_steps(self, expected_steps: List[step_data.StepData],
steps: tf.data.Dataset):
steps = steps.as_numpy_iterator()
for idx, rlds_step in enumerate(steps):
step = expected_steps[idx + 1] if idx < len(expected_steps) - 1 else None
expected_step = rlds_utils.to_rlds_step(expected_steps[idx], step)
np.testing.assert_equal(expected_step, rlds_step)
def test_backend_writer(self):
num_episodes = 5
max_episodes_per_file = 3
data_dir = self.create_tempdir(name='my_data_dir').full_path
expected_episodes = tfds_backend_testlib.generate_episode_data(
backend=tfds_backend_testlib.tfds_backend_catch_env(
data_directory=data_dir,
max_episodes_per_file=max_episodes_per_file),
num_episodes=num_episodes)
builder = tfds.builder_from_directory(data_dir)
ds = builder.as_dataset(split='train')
num_episodes = 0
for index, episode in enumerate(ds):
self._assert_steps(expected_episodes[index], episode['steps'])
self.assertEqual(episode['episode_id'], index)
num_episodes += 1
self.assertLen(expected_episodes, num_episodes)
def test_backend_writer_with_split_name(self):
num_episodes = 1
max_episodes_per_file = 1
data_dir = self.create_tempdir(name='my_data_dir').full_path
expected_episodes = tfds_backend_testlib.generate_episode_data(
backend=tfds_backend_testlib.tfds_backend_catch_env(
data_directory=data_dir,
max_episodes_per_file=max_episodes_per_file,
split_name='split'),
num_episodes=num_episodes)
builder = tfds.builder_from_directory(data_dir)
ds = builder.as_dataset(split='split')
num_episodes = 0
for index, episode in enumerate(ds):
self._assert_steps(expected_episodes[index], episode['steps'])
self.assertEqual(episode['episode_id'], index)
num_episodes += 1
self.assertLen(expected_episodes, num_episodes)
def test_backend_writer_with_dataset_metadata(self):
num_episodes = 5
max_episodes_per_file = 3
data_dir = self.create_tempdir(name='my_data_dir').full_path
_ = tfds_backend_testlib.generate_episode_data(
backend=tfds_backend_testlib.tfds_backend_catch_env(
data_directory=data_dir,
max_episodes_per_file=max_episodes_per_file,
ds_metadata={'env_name': 'catch'},
store_ds_metadata=True),
num_episodes=num_episodes)
builder = tfds.builder_from_directory(data_dir)
info = builder.info
self.assertDictEqual(info.metadata, {'env_name': 'catch'})
def test_backend_writer_without_dataset_metadata(self):
num_episodes = 5
max_episodes_per_file = 3
data_dir = self.create_tempdir(name='my_data_dir').full_path
_ = tfds_backend_testlib.generate_episode_data(
backend=tfds_backend_testlib.tfds_backend_catch_env(
data_directory=data_dir,
max_episodes_per_file=max_episodes_per_file,
ds_metadata=None,
store_ds_metadata=True),
num_episodes=num_episodes)
builder = tfds.builder_from_directory(data_dir)
info = builder.info
self.assertIsNone(info.metadata)
def test_backend_writer_ignore_dataset_metadata(self):
num_episodes = 5
max_episodes_per_file = 3
data_dir = self.create_tempdir(name='my_data_dir').full_path
_ = tfds_backend_testlib.generate_episode_data(
backend=tfds_backend_testlib.tfds_backend_catch_env(
data_directory=data_dir,
max_episodes_per_file=max_episodes_per_file,
ds_metadata={'env_name': 'catch'},
store_ds_metadata=False),
num_episodes=num_episodes)
builder = tfds.builder_from_directory(data_dir)
info = builder.info
self.assertIsNone(info.metadata)
if __name__ == '__main__':
absltest.main()
| envlogger-main | envlogger/backends/tfds_backend_writer_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for schedulers."""
from absl.testing import absltest
from absl.testing import parameterized
import dm_env
from envlogger import step_data
from envlogger.backends import schedulers
import numpy as np
def _create_episode(num_transitions: int):
"""Creates an episode with `num_transition` transitions."""
episode = [step_data.StepData(dm_env.restart(observation=None), action=None)]
for _ in range(num_transitions):
episode.append(
step_data.StepData(
dm_env.transition(observation=None, reward=None), action=None))
episode.append(
step_data.StepData(
dm_env.termination(observation=None, reward=None), action=None))
return episode
class DefaultSchedulersTest(parameterized.TestCase):
@parameterized.named_parameters(
('negative_interval', -1),
('zero_interval', 0),
)
def test_n_steps_invalid_args(self, step_interval):
"""NStepScheduler should raise an error if given invalid intervals."""
self.assertRaises(
ValueError, schedulers.NStepScheduler, step_interval=step_interval)
def test_n_steps_interval_1(self):
"""NStepScheduler should always return True if interval is 1."""
scheduler = schedulers.NStepScheduler(step_interval=1)
for _ in range(100):
self.assertTrue(scheduler(None))
def test_n_steps_interval_3(self):
"""NStepScheduler should return True only every 3 steps."""
n = 3
scheduler = schedulers.NStepScheduler(step_interval=n)
self.assertTrue(scheduler(None))
self.assertFalse(scheduler(None))
self.assertFalse(scheduler(None))
self.assertTrue(scheduler(None))
self.assertFalse(scheduler(None))
self.assertFalse(scheduler(None))
def test_n_steps_interval_n(self):
"""NStepScheduler should return True only every n steps."""
for _ in range(10):
n = np.random.randint(1, 50)
scheduler = schedulers.NStepScheduler(step_interval=n)
for i in range(0, n * 10):
if i % n == 0:
self.assertTrue(scheduler(None))
else:
self.assertFalse(scheduler(None))
@parameterized.named_parameters(
('negative_probability', -1),
('greater_than_1_probability', 1.01),
)
def test_bernoulli_steps_invalid_args(self, keep_probability):
"""The scheduler should raise an error with negative probabilities."""
self.assertRaises(
ValueError,
schedulers.BernoulliStepScheduler,
keep_probability=keep_probability)
def test_bernoulli_steps_probability_0(self):
"""BernoulliStepScheduler should return False if given probability 0.0."""
scheduler = schedulers.BernoulliStepScheduler(keep_probability=0)
for _ in range(100):
self.assertFalse(scheduler(None))
def test_bernoulli_steps_probability_1(self):
"""BernoulliStepScheduler should return True if given probability 1.0."""
scheduler = schedulers.BernoulliStepScheduler(keep_probability=1)
for _ in range(100):
self.assertTrue(scheduler(None))
def test_bernoulli_steps_probability_1pct(self):
"""BernoulliStepScheduler should return more False than True with p=0.01."""
scheduler = schedulers.BernoulliStepScheduler(keep_probability=0.01)
num_true = 0
for _ in range(1000):
num_true += scheduler(None)
num_false = 1000 - num_true
self.assertGreater(num_false, num_true)
def test_bernoulli_steps_probability_99pct(self):
"""BernoulliStepScheduler should return more True than False with p=0.99."""
scheduler = schedulers.BernoulliStepScheduler(keep_probability=0.99)
num_true = 0
for _ in range(1000):
num_true += scheduler(None)
num_false = 1000 - num_true
self.assertGreater(num_true, num_false)
def test_bernoulli_step_fixed_seed(self):
"""BernoulliStepScheduler should return deterministic outcomes."""
seed = np.random.default_rng().integers(10000)
# Run one trial with `seed`.
scheduler = schedulers.BernoulliStepScheduler(
keep_probability=0.5, seed=seed)
outcomes = [scheduler(None) for _ in range(100)]
# Repeat the trial with the same `seed`.
other_scheduler = schedulers.BernoulliStepScheduler(
keep_probability=0.5, seed=seed)
other_outcomes = [other_scheduler(None) for _ in range(100)]
# Assert that the outcomes are exactly the same.
self.assertEqual(outcomes, other_outcomes)
@parameterized.named_parameters(
('negative_interval', -1),
('zero_interval', 0),
)
def test_n_episode_invalid_args(self, episode_interval):
"""NEpisodeScheduler should raise an error if given invalid intervals."""
self.assertRaises(
ValueError,
schedulers.NEpisodeScheduler,
episode_interval=episode_interval)
def test_n_episode_interval_1(self):
"""NEpisodeScheduler should always return True if interval is 1."""
scheduler = schedulers.NEpisodeScheduler(episode_interval=1)
for _ in range(100):
for timestep in _create_episode(num_transitions=np.random.randint(100)):
self.assertTrue(scheduler(timestep))
def test_n_episode_interval_2(self):
"""NEpisodeScheduler should return True every other episode."""
scheduler = schedulers.NEpisodeScheduler(episode_interval=2)
for _ in range(100):
for timestep in _create_episode(num_transitions=np.random.randint(100)):
self.assertTrue(scheduler(timestep))
for timestep in _create_episode(num_transitions=np.random.randint(100)):
self.assertFalse(scheduler(timestep))
def test_n_episode_interval_n(self):
"""NEpisodeScheduler should return True only every n episodes."""
for _ in range(10):
n = np.random.randint(1, 50)
scheduler = schedulers.NEpisodeScheduler(episode_interval=n)
for i in range(0, n * 10):
if i % n == 0:
for timestep in _create_episode(
num_transitions=np.random.randint(100)):
self.assertTrue(scheduler(timestep))
else:
for timestep in _create_episode(
num_transitions=np.random.randint(100)):
self.assertFalse(scheduler(timestep))
@parameterized.named_parameters(
('negative_probability', -1),
('greater_than_1_probability', 1.01),
)
def test_bernoulli_episodes_invalid_args(self, keep_probability):
"""The scheduler should raise an error with negative probabilities."""
self.assertRaises(
ValueError,
schedulers.BernoulliEpisodeScheduler,
keep_probability=keep_probability)
def test_bernoulli_episodes_probability_0(self):
"""BernoulliEpisodeScheduler should return False if given probability 0.0."""
scheduler = schedulers.BernoulliEpisodeScheduler(keep_probability=0.0)
for _ in range(100):
for timestep in _create_episode(num_transitions=np.random.randint(100)):
self.assertFalse(scheduler(timestep))
def test_bernoulli_episodes_probability_1(self):
"""BernoulliEpisodeScheduler should return True if given probability 1.0."""
scheduler = schedulers.BernoulliEpisodeScheduler(keep_probability=1.0)
for _ in range(100):
for timestep in _create_episode(num_transitions=np.random.randint(100)):
self.assertTrue(scheduler(timestep))
def test_bernoulli_episodes_probability_1pct(self):
"""BernoulliEpisodeScheduler should return more False with p=0.01."""
scheduler = schedulers.BernoulliEpisodeScheduler(keep_probability=0.01)
num_true = 0
num_false = 0
for _ in range(1000):
for timestep in _create_episode(num_transitions=np.random.randint(100)):
outcome = scheduler(timestep)
if outcome:
num_true += 1
else:
num_false += 1
self.assertGreater(num_false, num_true)
def test_bernoulli_episodes_probability_99pct(self):
"""BernoulliEpisodeScheduler should return more True with p=0.99."""
scheduler = schedulers.BernoulliEpisodeScheduler(keep_probability=0.99)
num_true = 0
num_false = 0
for _ in range(1000):
for timestep in _create_episode(num_transitions=np.random.randint(100)):
outcome = scheduler(timestep)
if outcome:
num_true += 1
else:
num_false += 1
self.assertGreater(num_true, num_false)
def test_bernoulli_episode_fixed_seed(self):
"""BernoulliEpisodeScheduler should return deterministic outcomes."""
seed = np.random.default_rng().integers(10000)
episodes = [
_create_episode(num_transitions=np.random.randint(100))
for _ in range(1000)
]
# Run one trial with `seed`.
scheduler = schedulers.BernoulliEpisodeScheduler(
keep_probability=0.5, seed=seed)
outcomes = []
for episode in episodes:
for timestep in episode:
outcomes.append(scheduler(timestep))
# Repeat the trial with the same `seed`.
other_scheduler = schedulers.BernoulliEpisodeScheduler(
keep_probability=0.5, seed=seed)
other_outcomes = []
for episode in episodes:
for timestep in episode:
other_outcomes.append(other_scheduler(timestep))
# Assert that the outcomes are exactly the same.
self.assertEqual(outcomes, other_outcomes)
@parameterized.named_parameters(
('empty_list', []),
('empty_ndarray', np.array([], dtype=np.int64)),
)
def test_list_steps_empty_steps(self, desired_steps):
"""ListStepScheduler should raise an error if given invalid steps."""
self.assertRaises(
ValueError, schedulers.ListStepScheduler, desired_steps=desired_steps)
def test_list_np_array_wrong_type(self):
"""ListStepScheduler should raise an error if given invalid steps."""
self.assertRaises(
TypeError,
schedulers.ListStepScheduler,
desired_steps=np.array([1.0, 10.0, 100.0], dtype=np.float32))
def test_list_steps_single_item(self):
"""ListStepScheduler should return True if step is in `desired_steps`."""
scheduler = schedulers.ListStepScheduler(desired_steps=[3])
self.assertFalse(scheduler(None))
self.assertFalse(scheduler(None))
self.assertFalse(scheduler(None))
self.assertTrue(scheduler(None)) # 4th step should be True.
for _ in range(100):
self.assertFalse(scheduler(None))
def test_list_steps_first_10(self):
"""ListStepScheduler should return True if step is in `desired_steps`."""
scheduler = schedulers.ListStepScheduler(desired_steps=list(range(10)))
for _ in range(10): # First 10 steps should be True.
self.assertTrue(scheduler(None))
for _ in range(100):
self.assertFalse(scheduler(None))
def test_list_steps_logspace(self):
"""ListStepScheduler should return True if step is in `desired_steps`."""
desired_steps = np.logspace(
start=0, stop=3, num=10, base=10.0).astype(np.int32) - 1
# At this point: desired_steps = [0, 1, 3, 9, 20, 45, 99, 214, 463, 999]
scheduler = schedulers.ListStepScheduler(desired_steps=desired_steps)
for i in range(1000):
if i in [0, 1, 3, 9, 20, 45, 99, 214, 463, 999]:
self.assertTrue(scheduler(None))
else:
self.assertFalse(scheduler(None))
@parameterized.named_parameters(
('empty_list', []),
('empty_ndarray', np.array([], dtype=np.int64)),
)
def test_list_empty_episodes(self, desired_episodes):
"""ListEpisodeScheduler should raise an error if given invalid episodes."""
self.assertRaises(
ValueError,
schedulers.ListEpisodeScheduler,
desired_episodes=desired_episodes)
def test_list_episodes_np_array_wrong_type(self):
"""ListEpisodeScheduler should raise an error if given invalid episodes."""
self.assertRaises(
TypeError,
schedulers.ListEpisodeScheduler,
desired_episodes=np.array([1.0, 10.0, 100.0], dtype=np.float32))
def test_list_episodes_single_item(self):
"""Scheduler should return True if episode is in `desired_episodes`."""
scheduler = schedulers.ListEpisodeScheduler(desired_episodes=[3])
for timestep in _create_episode(num_transitions=np.random.randint(100)):
self.assertFalse(scheduler(timestep))
for timestep in _create_episode(num_transitions=np.random.randint(100)):
self.assertFalse(scheduler(timestep))
for timestep in _create_episode(num_transitions=np.random.randint(100)):
self.assertFalse(scheduler(timestep))
for timestep in _create_episode(
num_transitions=np.random.randint(100)): # 4th episode should be True.
self.assertTrue(scheduler(timestep))
for _ in range(100):
for timestep in _create_episode(num_transitions=np.random.randint(100)):
self.assertFalse(scheduler(timestep))
def test_list_episodes_first_10(self):
"""The scheduler should return True if episode is in `desired_episodes`."""
scheduler = schedulers.ListEpisodeScheduler(
desired_episodes=list(range(10)))
for _ in range(10): # First 10 episodes should be True.
for timestep in _create_episode(num_transitions=np.random.randint(100)):
self.assertTrue(scheduler(timestep))
for _ in range(100):
for timestep in _create_episode(num_transitions=np.random.randint(100)):
self.assertFalse(scheduler(timestep))
def test_list_episodes_logspace(self):
"""The scheduler should return True if episode is in `desired_episodes`."""
desired_episodes = np.logspace(
start=0, stop=3, num=10, base=10.0).astype(np.int32) - 1
# At this point: desired_episodes = [0, 1, 3, 9, 20, 45, 99, 214, 463, 999]
scheduler = schedulers.ListEpisodeScheduler(
desired_episodes=desired_episodes)
for i in range(1000):
if i in [0, 1, 3, 9, 20, 45, 99, 214, 463, 999]:
for timestep in _create_episode(num_transitions=np.random.randint(100)):
self.assertTrue(scheduler(timestep))
else:
for timestep in _create_episode(num_transitions=np.random.randint(100)):
self.assertFalse(scheduler(timestep))
if __name__ == '__main__':
absltest.main()
| envlogger-main | envlogger/backends/schedulers_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for rlds_utils."""
from absl.testing import absltest
import dm_env
from envlogger import step_data
from envlogger.backends import rlds_utils
from envlogger.backends import tfds_backend_testlib
import numpy as np
import tensorflow_datasets as tfds
class RldsUtilsTest(absltest.TestCase):
def test_build_step(self):
prev_step = step_data.StepData(
timestep=dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
reward=1,
observation=2,
discount=3),
action=4)
step = step_data.StepData(
timestep=dm_env.TimeStep(
step_type=dm_env.StepType.LAST, reward=5, observation=6,
discount=7),
action=8)
expected_step = {
'observation': 2,
'action': 8,
'reward': 5,
'discount': 7,
'is_terminal': False,
'is_first': True,
'is_last': False,
}
rlds_step = rlds_utils.to_rlds_step(prev_step, step)
self.assertEqual(rlds_step, expected_step)
def test_build_last_step(self):
prev_step = step_data.StepData(
timestep=dm_env.TimeStep(
step_type=dm_env.StepType.LAST, reward=1, observation=2,
discount=1),
action=4)
expected_step = {
'observation': 2,
'action': 0,
'reward': 0,
'discount': 0,
'is_terminal': False,
'is_first': False,
'is_last': True,
}
rlds_step = rlds_utils.to_rlds_step(prev_step, None)
self.assertEqual(rlds_step, expected_step)
def test_build_nested_last_step(self):
def gen_oar(mode):
gen_fn = np.ones if mode == 'random' else np.zeros
obs = {'0': gen_fn((1, 2)), '1': gen_fn((3, 4))}
action = {'0': gen_fn((2, 3)), '1': gen_fn((4, 5))}
reward = {'0': gen_fn((1, 1)), '1': gen_fn((2, 2))}
return obs, action, reward
prev_obs, prev_action, prev_reward = gen_oar('ones')
prev_step = step_data.StepData(
timestep=dm_env.TimeStep(
step_type=dm_env.StepType.LAST,
reward=prev_reward,
observation=prev_obs,
discount=1),
action=prev_action)
_, zero_action, zero_reward = gen_oar('zeros')
expected_step = {
'observation': prev_obs,
'action': zero_action,
'reward': zero_reward,
'discount': 0,
'is_terminal': False,
'is_first': False,
'is_last': True,
}
rlds_step = rlds_utils.to_rlds_step(prev_step, None)
for key in rlds_step.keys():
if isinstance(rlds_step[key], dict): # obs, action, reward dicts
for rv, ev in zip(rlds_step[key].values(), expected_step[key].values()):
np.testing.assert_equal(rv, ev)
else:
self.assertEqual(rlds_step[key], expected_step[key])
def test_build_terminal_step(self):
prev_step = step_data.StepData(
timestep=dm_env.TimeStep(
step_type=dm_env.StepType.LAST, reward=1, observation=2,
discount=0),
action=4)
expected_step = {
'observation': 2,
'action': 0,
'reward': 0,
'discount': 0,
'is_terminal': True,
'is_first': False,
'is_last': True,
}
rlds_step = rlds_utils.to_rlds_step(prev_step, None)
self.assertEqual(rlds_step, expected_step)
def test_build_step_with_metadata(self):
prev_step = step_data.StepData(
timestep=dm_env.TimeStep(
step_type=dm_env.StepType.FIRST,
reward=1,
observation=2,
discount=3),
action=4,
custom_data={'extra_data': 10})
step = step_data.StepData(
timestep=dm_env.TimeStep(
step_type=dm_env.StepType.LAST, reward=5, observation=6,
discount=7),
action=8)
expected_step = {
'observation': 2,
'action': 8,
'reward': 5,
'discount': 7,
'is_terminal': False,
'is_first': True,
'is_last': False,
'extra_data': 10,
}
rlds_step = rlds_utils.to_rlds_step(prev_step, step)
self.assertEqual(rlds_step, expected_step)
def test_regenerate_splits_noop(self):
num_episodes = 3
max_episodes_per_file = 2
data_dir = self.create_tempdir(name='my_data_dir').full_path
_ = tfds_backend_testlib.generate_episode_data(
backend=tfds_backend_testlib.tfds_backend_catch_env(
data_directory=data_dir,
max_episodes_per_file=max_episodes_per_file,
split_name='split'),
num_episodes=num_episodes)
builder = tfds.builder_from_directory(data_dir)
self.assertEqual(list(builder.info.splits.keys()), ['split'])
self.assertEqual(builder.info.splits['split'].num_examples, 3)
self.assertEqual(builder.info.splits['split'].num_shards, 2)
self.assertEqual(builder.info.splits['split'].shard_lengths, [2, 1])
expected_splits = builder.info.splits
new_builder = rlds_utils.maybe_recover_last_shard(builder)
self.assertEqual(new_builder.info.splits, expected_splits)
def test_regenerate_ds_with_one_split(self):
num_episodes = 3
max_episodes_per_file = 5
data_dir = self.create_tempdir(name='my_data_dir').full_path
_ = tfds_backend_testlib.generate_episode_data(
backend=tfds_backend_testlib.tfds_backend_catch_env(
data_directory=data_dir,
max_episodes_per_file=max_episodes_per_file,
split_name='split'),
num_episodes=num_episodes)
builder = tfds.builder_from_directory(data_dir)
expected_splits = builder.info.splits
# Remove info from the metadata
builder.info.set_splits(
tfds.core.splits.SplitDict([
tfds.core.SplitInfo(
name='split',
shard_lengths=[],
num_bytes=0,
filename_template=tfds.core.ShardedFileTemplate(
dataset_name=builder.name,
split='split',
filetype_suffix='tfrecord',
data_dir=data_dir,
template='{DATASET}-{SPLIT}.{FILEFORMAT}-{SHARD_INDEX}',
))
]))
builder.info.write_to_directory(data_dir)
new_builder = rlds_utils.maybe_recover_last_shard(builder)
self.assertEqual(
list(new_builder.info.splits.keys()), list(expected_splits.keys()))
self.assertEqual(new_builder.info.splits['split'].num_examples,
expected_splits['split'].num_examples)
self.assertEqual(new_builder.info.splits['split'].num_shards,
expected_splits['split'].num_shards)
self.assertEqual(new_builder.info.splits['split'].shard_lengths,
expected_splits['split'].shard_lengths)
self.assertEqual(new_builder.info.splits['split'].num_bytes,
expected_splits['split'].num_bytes)
def test_regenerate_ds_last_split(self):
num_episodes = 3
max_episodes_per_file = 2
data_dir = self.create_tempdir(name='my_data_dir').full_path
_ = tfds_backend_testlib.generate_episode_data(
backend=tfds_backend_testlib.tfds_backend_catch_env(
data_directory=data_dir,
max_episodes_per_file=max_episodes_per_file,
split_name='split'),
num_episodes=num_episodes)
builder = tfds.builder_from_directory(data_dir)
expected_splits = builder.info.splits
# Remove info from the metadata
# Since we don't know how many bytes each shard has, we let it as it was.
# We check later that the number of bytes increased.
builder.info.set_splits(
tfds.core.splits.SplitDict([
tfds.core.SplitInfo(
name='split',
shard_lengths=[expected_splits['split'].shard_lengths[0]],
num_bytes=expected_splits['split'].num_bytes,
filename_template=tfds.core.ShardedFileTemplate(
dataset_name=builder.name,
split='split',
filetype_suffix='tfrecord',
data_dir=data_dir,
template='{DATASET}-{SPLIT}.{FILEFORMAT}-{SHARD_INDEX}',
),
)
]))
builder.info.write_to_directory(data_dir)
new_builder = rlds_utils.maybe_recover_last_shard(builder)
self.assertEqual(
list(new_builder.info.splits.keys()), list(expected_splits.keys()))
self.assertEqual(new_builder.info.splits['split'].num_examples,
expected_splits['split'].num_examples)
self.assertEqual(new_builder.info.splits['split'].num_shards,
expected_splits['split'].num_shards)
self.assertEqual(new_builder.info.splits['split'].shard_lengths,
expected_splits['split'].shard_lengths)
# We don't know how many bytes are accounted to each episode, so we check
# that the new number of bytes is larger.
self.assertLess(expected_splits['split'].num_bytes,
new_builder.info.splits['split'].num_bytes)
if __name__ == '__main__':
absltest.main()
| envlogger-main | envlogger/backends/rlds_utils_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment logger backend that stores all data in RAM.
"""
import copy
from typing import Any
from envlogger import step_data
from envlogger.backends import backend_reader
from envlogger.backends import backend_writer
from envlogger.backends.python import episode_info
class InMemoryBackendWriter(backend_writer.BackendWriter):
"""Backend that stores trajectory data in memory."""
def __init__(self, **base_kwargs):
super().__init__(**base_kwargs)
self.steps = []
self.episode_metadata = {}
self.episode_start_indices = []
def _record_step(self, data: step_data.StepData,
is_new_episode: bool) -> None:
if is_new_episode:
self.episode_start_indices.append(len(self.steps))
self.steps.append(data)
def set_episode_metadata(self, data: Any) -> None:
current_episode = len(self.episode_start_indices)
if current_episode > 0:
self.episode_metadata[current_episode] = data
def close(self) -> None:
pass
class InMemoryBackendReader(backend_reader.BackendReader):
"""Reader that reads data from an InMemoryBackend."""
def __init__(self, in_memory_backend_writer: InMemoryBackendWriter):
self._backend = in_memory_backend_writer
super().__init__()
def _copy(self) -> 'InMemoryBackendReader':
return copy.deepcopy(self)
def close(self) -> None:
pass
def _get_nth_step(self, i: int) -> step_data.StepData:
return self._backend.steps[i]
def _get_nth_episode_info(self,
i: int,
include_metadata: bool = False
) -> episode_info.EpisodeInfo:
if i == len(self._backend.episode_start_indices) - 1: # Last episode.
length = len(self._backend.steps) - self._backend.episode_start_indices[i]
else:
length = (self._backend.episode_start_indices[i + 1] -
self._backend.episode_start_indices[i])
episode_metadata = self._backend.episode_metadata.get(i, None)
return episode_info.EpisodeInfo(
start=self._backend.episode_start_indices[i],
num_steps=length,
metadata=episode_metadata)
def _get_num_steps(self) -> int:
return len(self._backend.steps)
def _get_num_episodes(self) -> int:
return len(self._backend.episode_start_indices)
def metadata(self):
return self._backend.metadata()
| envlogger-main | envlogger/backends/in_memory_backend.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils to test the backends."""
import time
from typing import Any, Dict, List, Optional
from absl import logging
from envlogger import step_data
from envlogger.backends import backend_writer
from envlogger.backends import tfds_backend_writer
from envlogger.testing import catch_env
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
def generate_episode_data(
backend: backend_writer.BackendWriter,
num_episodes: int = 2,
) -> List[List[step_data.StepData]]:
"""Runs a Catch environment for `num_episodes` and logs them.
Args:
backend: environment logger writer.
num_episodes: number of episodes to generate.
Returns:
List of generated episodes.
"""
env = catch_env.Catch()
logging.info('Training a random agent for %r episodes...', num_episodes)
episodes_data = []
for index in range(num_episodes):
episode = []
timestep = env.reset()
data = step_data.StepData(timestep, None, {'timestamp': int(time.time())})
episode.append(data)
backend.record_step(data, is_new_episode=True)
while not timestep.last():
action = np.random.randint(low=0, high=3)
timestep = env.step(action)
data = step_data.StepData(timestep, action,
{'timestamp': int(time.time())})
episode.append(data)
backend.record_step(data, is_new_episode=False)
backend.set_episode_metadata({'episode_id': index})
episodes_data.append(episode)
logging.info('Done training a random agent for %r episodes.', num_episodes)
env.close()
backend.close()
return episodes_data
def catch_env_tfds_config(
name: str = 'catch_example') -> tfds.rlds.rlds_base.DatasetConfig:
"""Creates a TFDS DatasetConfig for the Catch environment."""
return tfds.rlds.rlds_base.DatasetConfig(
name=name,
observation_info=tfds.features.Tensor(
shape=(10, 5), dtype=tf.float32,
encoding=tfds.features.Encoding.ZLIB),
action_info=tf.int64,
reward_info=tf.float64,
discount_info=tf.float64,
step_metadata_info={'timestamp': tf.int64},
episode_metadata_info={'episode_id': tf.int64})
def tfds_backend_catch_env(
data_directory: str,
max_episodes_per_file: int = 1,
split_name: Optional[str] = None,
ds_metadata: Optional[Dict[Any, Any]] = None,
store_ds_metadata: bool = True,
) -> tfds_backend_writer.TFDSBackendWriter:
"""Creates a TFDS Backend Writer for the Catch Environment.
Args:
data_directory: directory where the data will be created (it has to exist).
max_episodes_per_file: maximum number of episodes per file.
split_name: number of the TFDS split to create.
ds_metadata: metadata of the dataset.
store_ds_metadata: if the metadata should be stored.
Returns:
TFDS backend writer.
"""
return tfds_backend_writer.TFDSBackendWriter(
data_directory=data_directory,
split_name=split_name,
ds_config=catch_env_tfds_config(),
max_episodes_per_file=max_episodes_per_file,
metadata=ds_metadata,
store_ds_metadata=store_ds_metadata)
| envlogger-main | envlogger/backends/tfds_backend_testlib.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""For reading trajectory data from riegeli files."""
import copy
from typing import Any, Dict, Tuple
from absl import logging
import dm_env
from envlogger import step_data
from envlogger.backends import backend_reader
from envlogger.backends.python import episode_info
from envlogger.backends.python import riegeli_dataset_reader
from envlogger.converters import codec
from envlogger.proto import storage_pb2
class RiegeliBackendReader(backend_reader.BackendReader):
"""A class that reads logs produced by an EnvironmentLoggerWrapper instance.
Attributes:
episodes: Traverse the data episode-wise in list-like fashion.
steps: Traverse the data stepwise in list-like fashion.
"""
def __init__(self, data_directory: str):
self._reader = riegeli_dataset_reader.RiegeliDatasetReader()
try:
self._reader.init(data_directory)
except RuntimeError as e:
error_message = str(e)
if error_message.startswith('NOT_FOUND: Empty steps in '):
# This case happens frequently when clients abruptly kill the
# EnvironmentLogger without calling its .close() method, which then
# causes the last shard to be truncated. This can be because the client
# exited successfully and "forgot" to call .close(), which is a bug, but
# also because of a preempted work unit, which is expected to happen
# under distributed settings.
# We can't do much to fix the bad usages, but we can be a bit more
# permissive and try to read the successful shards.
logging.exception("""Ignoring error due to empty step offset file.
*********************************
**** You likely forgot to ***
**** call close() on your env ***
**** ***
*********************************""")
else:
raise
self._metadata = codec.decode(self._reader.metadata()) or {}
super().__init__()
def _copy(self) -> 'RiegeliBackendReader':
c = copy.copy(self)
c._metadata = copy.deepcopy(self._metadata)
c._reader = self._reader.clone()
return c
def close(self):
if self._reader is not None:
self._reader.close()
self._reader = None
def _decode_step_data(self, data: Tuple[Any, Any, Any]) -> step_data.StepData:
"""Recovers dm_env.TimeStep from logged data (either dict or tuple)."""
# Recover the TimeStep from the first tuple element.
timestep = dm_env.TimeStep(
dm_env.StepType(data[0][0]), data[0][1], data[0][2], data[0][3])
return step_data.StepData(timestep, data[1], data[2])
def _get_num_steps(self):
return self._reader.num_steps
def _get_num_episodes(self):
return self._reader.num_episodes
def _get_nth_step(self, i: int) -> step_data.StepData:
"""Returns the timestep given by offset `i` (0-based)."""
serialized_data = self._reader.serialized_step(i)
data = storage_pb2.Data.FromString(serialized_data)
return self._decode_step_data(codec.decode(data))
def _get_nth_episode_info(self,
i: int,
include_metadata: bool = False
) -> episode_info.EpisodeInfo:
"""Returns the index of the start of nth episode, and its length."""
return self._reader.episode(i, include_metadata)
def metadata(self):
return self._metadata
| envlogger-main | envlogger/backends/riegeli_backend_reader.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""For writing trajectory data to riegeli files."""
from typing import Any, Optional
from absl import logging
from envlogger import step_data
from envlogger.backends import backend_writer
from envlogger.backends import schedulers
from envlogger.backends.python import riegeli_dataset_writer
from envlogger.converters import codec
class RiegeliBackendWriter(backend_writer.BackendWriter):
"""Backend that writes trajectory data to riegeli files."""
def __init__(
self,
data_directory: str,
max_episodes_per_file: int = 10000,
writer_options: str = 'transpose,brotli:6,chunk_size:1M',
flush_scheduler: Optional[schedulers.Scheduler] = None,
**base_kwargs,
):
"""Constructor.
Calling `close()` will flush the trajectories and the index to disk and will
ensure that they can be read later on. If it isn't called, there is a large
risk of losing data. This is particularly common in some RL frameworks that
do not clean up their environments. If the environment runs for a very long
time, this can happen only to the last shard, but if the instance is
short-lived, then a large portion of the trajectories can disappear.
Args:
data_directory: Destination for the episode data.
max_episodes_per_file: maximum number of episodes stored in one file.
writer_options: Comma-separated list of options that are passed to the
Riegeli RecordWriter as is.
flush_scheduler: This controls when data is flushed to permanent storage.
If `None`, it defaults to a step-wise Bernoulli scheduler with 1/5000
chances of flushing.
**base_kwargs: arguments for the base class.
"""
super().__init__(**base_kwargs)
self._data_directory = data_directory
if flush_scheduler is None:
self._flush_scheduler = schedulers.BernoulliStepScheduler(1.0 / 5000)
else:
self._flush_scheduler = flush_scheduler
self._data_writer = riegeli_dataset_writer.RiegeliDatasetWriter()
logging.info('self._data_directory: %r', self._data_directory)
metadata = self._metadata or {}
try:
self._data_writer.init(
data_dir=data_directory,
metadata=codec.encode(metadata),
max_episodes_per_shard=max_episodes_per_file,
writer_options=writer_options)
except RuntimeError as e:
logging.exception('exception: %r', e)
def _record_step(self, data: step_data.StepData,
is_new_episode: bool) -> None:
encoded_data = codec.encode(data)
if not self._data_writer.add_step(encoded_data, is_new_episode):
raise RuntimeError(
'Failed to write `data`. Please see logs for more details.')
if self._flush_scheduler is not None and not self._flush_scheduler(data):
return
self._data_writer.flush()
def set_episode_metadata(self, data: Any) -> None:
encoded_data = codec.encode(data)
self._data_writer.set_episode_metadata(encoded_data)
def close(self) -> None:
logging.info('Deleting the backend with data_dir: %r', self._data_directory)
self._data_writer.close()
logging.info('Done deleting the backend with data_dir: %r',
self._data_directory)
| envlogger-main | envlogger/backends/riegeli_backend_writer.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An enumeration that specifies the logging backend."""
import enum
class BackendType(enum.IntEnum):
RIEGELI = 0
IN_MEMORY = 1
| envlogger-main | envlogger/backends/backend_type.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for riegeli_backend_writer.
Note: A lot of the test coverage for riegeli_backend_writer is provided by tests
in environment_logger_test.
"""
import operator
from typing import Any, List, Optional, Tuple
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import dm_env
from envlogger import step_data
from envlogger.backends import riegeli_backend_reader
from envlogger.backends import riegeli_backend_writer
from envlogger.backends import schedulers
from envlogger.testing import catch_env
import numpy as np
class RiegeliBackendTest(parameterized.TestCase):
def _collect_episode_data(
self,
env: Optional[dm_env.Environment] = None,
num_episodes: int = 2,
metadata: Optional[Any] = None,
scheduler: Optional[schedulers.Scheduler] = None,
max_episodes_per_file: int = 1,
writer_options: str = 'transpose,brotli:6,chunk_size:1M'
) -> Tuple[List[step_data.StepData], str]:
if env is None:
logging.info('Creating Catch environment...')
env = catch_env.Catch()
logging.info('Done creating Catch environment.')
temp_dir = self.create_tempdir()
data_directory = temp_dir.full_path
backend = riegeli_backend_writer.RiegeliBackendWriter(
data_directory=data_directory,
max_episodes_per_file=max_episodes_per_file,
metadata=metadata,
scheduler=scheduler,
writer_options=writer_options)
logging.info('Training a random agent for %r episodes...', num_episodes)
num_actions = 3
episodes_data = []
for _ in range(num_episodes):
timestep = env.reset()
data = step_data.StepData(timestep, None, None)
episodes_data.append(data)
backend.record_step(data, is_new_episode=True)
while not timestep.last():
action = np.random.choice(num_actions)
timestep = env.step(action)
data = step_data.StepData(timestep, action, None)
episodes_data.append(data)
backend.record_step(data, is_new_episode=False)
logging.info('Done training a random agent for %r episodes.', num_episodes)
env.close()
backend.close()
return episodes_data, data_directory
def _validate_steps(self,
actual_steps,
expected_steps,
num_episodes,
num_steps_per_episode=10):
num_steps = num_episodes * num_steps_per_episode
self.assertLen(actual_steps, num_steps)
self.assertLen(expected_steps, num_steps)
for actual_step, expected_step in zip(actual_steps, expected_steps):
np.testing.assert_equal(actual_step, expected_step)
def test_step_roundtrip(self):
"""Test logging without having an environment."""
num_episodes = 3
expected_steps, data_directory = self._collect_episode_data(
num_episodes=num_episodes)
with riegeli_backend_reader.RiegeliBackendReader(
data_directory) as data_reader:
actual_steps = list(data_reader.steps)
self._validate_steps(actual_steps, expected_steps, num_episodes)
def test_episodes_round_trip(self):
num_episodes = 3
num_steps_per_episode = 10
expected_steps, data_directory = self._collect_episode_data(
num_episodes=num_episodes)
with riegeli_backend_reader.RiegeliBackendReader(
data_directory) as data_reader:
for episode_index, episode in enumerate(data_reader.episodes):
episode_actual_steps = list(episode)
episode_expected_steps = expected_steps[episode_index *
num_steps_per_episode:
(episode_index + 1) *
num_steps_per_episode]
self._validate_steps(
episode_actual_steps, episode_expected_steps, num_episodes=1)
def test_scheduler(self):
num_episodes = 2
step_interval = 2
scheduler = schedulers.NStepScheduler(step_interval=step_interval)
expected_steps, data_directory = self._collect_episode_data(
num_episodes=num_episodes, scheduler=scheduler)
with riegeli_backend_reader.RiegeliBackendReader(
data_directory) as data_reader:
expected_steps = [
step for i, step in enumerate(expected_steps)
if i % step_interval == 0
]
actual_steps = list(data_reader.steps)
self._validate_steps(
actual_steps,
expected_steps,
num_episodes,
num_steps_per_episode=10 / step_interval)
def test_step_negative_indices(self):
"""Ensures that negative step indices are handled correctly."""
_, data_directory = self._collect_episode_data(
num_episodes=6, max_episodes_per_file=3)
with riegeli_backend_reader.RiegeliBackendReader(
data_directory) as data_reader:
np.testing.assert_equal(data_reader.steps[-1],
data_reader.steps[len(data_reader.steps) - 1])
np.testing.assert_equal(data_reader.steps[-len(data_reader.steps)],
data_reader.steps[0])
def test_step_out_of_bounds_indices(self):
"""Ensures that out of bounds step indices are handled correctly."""
_, data_directory = self._collect_episode_data(
num_episodes=6, max_episodes_per_file=3)
with riegeli_backend_reader.RiegeliBackendReader(
data_directory) as data_reader:
self.assertRaises(IndexError, operator.getitem, data_reader.steps,
len(data_reader.steps))
self.assertRaises(IndexError, operator.getitem, data_reader.steps,
-len(data_reader.steps) - 1)
def test_episode_negative_indices(self):
"""Ensures that negative episode indices are handled correctly."""
_, data_directory = self._collect_episode_data(
num_episodes=6, max_episodes_per_file=3)
with riegeli_backend_reader.RiegeliBackendReader(
data_directory) as data_reader:
np.testing.assert_equal(
data_reader.episodes[-1][:],
data_reader.episodes[len(data_reader.episodes) - 1][:])
np.testing.assert_equal(
data_reader.episodes[-len(data_reader.episodes)][:],
data_reader.episodes[0][:])
def test_episode_out_of_bounds_indices(self):
"""Ensures that out of bounds episode indices are handled correctly."""
_, data_directory = self._collect_episode_data(
num_episodes=6, max_episodes_per_file=3)
with riegeli_backend_reader.RiegeliBackendReader(
data_directory) as data_reader:
self.assertRaises(IndexError, operator.getitem, data_reader.episodes,
len(data_reader.episodes))
self.assertRaises(IndexError, operator.getitem, data_reader.episodes,
-len(data_reader.episodes) - 1)
def test_episode_step_negative_indices(self):
"""Ensures that negative episode step indices are handled correctly."""
_, data_directory = self._collect_episode_data(
num_episodes=6, max_episodes_per_file=3)
with riegeli_backend_reader.RiegeliBackendReader(
data_directory) as data_reader:
for episode in data_reader.episodes:
np.testing.assert_equal(episode[-1], episode[len(episode) - 1])
np.testing.assert_equal(episode[-len(episode)], episode[0])
def test_episode_step_out_of_bounds_indices(self):
"""Ensures that out of bounds episode step indices are handled correctly."""
_, data_directory = self._collect_episode_data(
num_episodes=6, max_episodes_per_file=3)
with riegeli_backend_reader.RiegeliBackendReader(
data_directory) as data_reader:
for episode in data_reader.episodes:
self.assertRaises(IndexError, operator.getitem, episode, len(episode))
self.assertRaises(IndexError, operator.getitem, episode,
-len(episode) - 1)
if __name__ == '__main__':
absltest.main()
| envlogger-main | envlogger/backends/riegeli_backend_writer_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFDS backend for Envlogger."""
import dataclasses
from typing import Any, Dict, List, Optional
from absl import logging
from envlogger import step_data
from envlogger.backends import backend_writer
from envlogger.backends import rlds_utils
import tensorflow_datasets as tfds
DatasetConfig = tfds.rlds.rlds_base.DatasetConfig
@dataclasses.dataclass
class Episode(object):
"""Episode that is being constructed."""
prev_step: step_data.StepData
steps: Optional[List[rlds_utils.Step]] = None
metadata: Optional[Dict[str, Any]] = None
def add_step(self, step: step_data.StepData) -> None:
rlds_step = rlds_utils.to_rlds_step(self.prev_step, step)
if self.steps is None:
self.steps = []
self.steps.append(rlds_step)
self.prev_step = step
def get_rlds_episode(self) -> Dict[str, Any]:
last_step = rlds_utils.to_rlds_step(self.prev_step, None)
if self.steps is None:
self.steps = []
if self.metadata is None:
self.metadata = {}
return {'steps': self.steps + [last_step], **self.metadata}
class TFDSBackendWriter(backend_writer.BackendWriter):
"""Backend that writes trajectory data in TFDS format (and RLDS structure)."""
def __init__(self,
data_directory: str,
ds_config: tfds.rlds.rlds_base.DatasetConfig,
max_episodes_per_file: int = 1000,
split_name: Optional[str] = None,
version: str = '0.0.1',
store_ds_metadata: bool = False,
**base_kwargs):
"""Constructor.
Args:
data_directory: Directory to store the data
ds_config: Dataset Configuration.
max_episodes_per_file: Number of episodes to store per shard.
split_name: Name to be used by the split. If None, 'train' will be used.
version: version (major.minor.patch) of the dataset.
store_ds_metadata: if False, it won't store the dataset level
metadata.
**base_kwargs: arguments for the base class.
"""
super().__init__(**base_kwargs)
if not split_name:
split_name = 'train'
ds_identity = tfds.core.dataset_info.DatasetIdentity(
name=ds_config.name,
version=tfds.core.Version(version),
data_dir=data_directory,
module_name='')
if store_ds_metadata:
metadata = self._metadata
else:
metadata = None
self._data_directory = data_directory
self._ds_info = tfds.rlds.rlds_base.build_info(ds_config, ds_identity,
metadata)
self._ds_info.set_file_format('tfrecord')
self._current_episode = None
self._sequential_writer = tfds.core.SequentialWriter(
self._ds_info, max_episodes_per_file)
self._split_name = split_name
self._sequential_writer.initialize_splits([split_name])
logging.info('self._data_directory: %r', self._data_directory)
def _write_and_reset_episode(self):
if self._current_episode is not None:
self._sequential_writer.add_examples(
{self._split_name: [self._current_episode.get_rlds_episode()]})
self._current_episode = None
def _record_step(self, data: step_data.StepData,
is_new_episode: bool) -> None:
"""Stores RLDS steps in TFDS format."""
if is_new_episode:
self._write_and_reset_episode()
if self._current_episode is None:
self._current_episode = Episode(prev_step=data)
else:
self._current_episode.add_step(data)
def set_episode_metadata(self, data: Dict[str, Any]) -> None:
self._current_episode.metadata = data
def close(self) -> None:
logging.info('Deleting the backend with data_dir: %r', self._data_directory)
self._write_and_reset_episode()
self._sequential_writer.close_all()
logging.info('Done deleting the backend with data_dir: %r',
self._data_directory)
| envlogger-main | envlogger/backends/tfds_backend_writer.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for in_memory_backend."""
import operator
from typing import List, Optional, Tuple
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
from envlogger import step_data
from envlogger.backends import in_memory_backend
from envlogger.backends import schedulers
from envlogger.testing import catch_env
import numpy as np
def _collect_episode_data(
num_episodes: int,
scheduler: Optional[schedulers.Scheduler] = None
) -> Tuple[List[step_data.StepData], in_memory_backend.InMemoryBackendWriter]:
logging.info('Creating Catch environment...')
env = catch_env.Catch()
logging.info('Done creating Catch environment.')
backend = in_memory_backend.InMemoryBackendWriter(scheduler=scheduler)
logging.info('Training a random agent for %r episodes...', num_episodes)
num_actions = 3
episodes_data = []
for _ in range(num_episodes):
timestep = env.reset()
data = step_data.StepData(timestep, None, None)
episodes_data.append(data)
backend.record_step(data, is_new_episode=True)
while not timestep.last():
action = np.random.choice(num_actions)
timestep = env.step(action)
data = step_data.StepData(timestep, action, None)
episodes_data.append(data)
backend.record_step(data, is_new_episode=False)
logging.info('Done training a random agent for %r episodes.', num_episodes)
env.close()
return episodes_data, backend
class InMemoryBackendTest(parameterized.TestCase):
def _validate_steps(self,
actual_steps,
expected_steps,
num_episodes,
num_steps_per_episode=10):
num_steps = num_episodes * num_steps_per_episode
self.assertLen(actual_steps, num_steps)
self.assertLen(expected_steps, num_steps)
for actual_step, expected_step in zip(actual_steps, expected_steps):
np.testing.assert_equal(actual_step, expected_step)
def test_steps_round_trip(self):
num_episodes = 3
expected_steps, backend = _collect_episode_data(num_episodes)
actual_steps = list(in_memory_backend.InMemoryBackendReader(backend).steps)
self._validate_steps(actual_steps, expected_steps, num_episodes)
def test_episodes_round_trip(self):
num_episodes = 3
num_steps_per_episode = 10
expected_steps, backend = _collect_episode_data(num_episodes)
for episode_index, episode in enumerate(
in_memory_backend.InMemoryBackendReader(backend).episodes):
episode_actual_steps = list(episode)
episode_expected_steps = expected_steps[episode_index *
num_steps_per_episode:
(episode_index + 1) *
num_steps_per_episode]
self._validate_steps(
episode_actual_steps, episode_expected_steps, num_episodes=1)
def test_scheduler(self):
num_episodes = 2
step_interval = 2
scheduler = schedulers.NStepScheduler(step_interval=step_interval)
expected_steps, backend = _collect_episode_data(num_episodes, scheduler)
expected_steps = [
step for i, step in enumerate(expected_steps) if i % step_interval == 0
]
actual_steps = list(in_memory_backend.InMemoryBackendReader(backend).steps)
self._validate_steps(
actual_steps,
expected_steps,
num_episodes,
num_steps_per_episode=10 / step_interval)
def test_step_negative_indices(self):
"""Ensures that negative step indices are handled correctly."""
_, backend = _collect_episode_data(num_episodes=6)
data_reader = in_memory_backend.InMemoryBackendReader(backend)
np.testing.assert_equal(data_reader.steps[-1],
data_reader.steps[len(data_reader.steps) - 1])
np.testing.assert_equal(data_reader.steps[-len(data_reader.steps)],
data_reader.steps[0])
def test_step_out_of_bounds_indices(self):
"""Ensures that out of bounds step indices are handled correctly."""
_, backend = _collect_episode_data(num_episodes=6)
data_reader = in_memory_backend.InMemoryBackendReader(backend)
self.assertRaises(IndexError, operator.getitem, data_reader.steps,
len(data_reader.steps))
self.assertRaises(IndexError, operator.getitem, data_reader.steps,
-len(data_reader.steps) - 1)
def test_episode_negative_indices(self):
"""Ensures that negative episode indices are handled correctly."""
_, backend = _collect_episode_data(num_episodes=6)
data_reader = in_memory_backend.InMemoryBackendReader(backend)
np.testing.assert_equal(
data_reader.episodes[-1][:],
data_reader.episodes[len(data_reader.episodes) - 1][:])
np.testing.assert_equal(data_reader.episodes[-len(data_reader.episodes)][:],
data_reader.episodes[0][:])
def test_episode_out_of_bounds_indices(self):
"""Ensures that out of bounds episode indices are handled correctly."""
_, backend = _collect_episode_data(num_episodes=6)
data_reader = in_memory_backend.InMemoryBackendReader(backend)
self.assertRaises(IndexError, operator.getitem, data_reader.episodes,
len(data_reader.episodes))
self.assertRaises(IndexError, operator.getitem, data_reader.episodes,
-len(data_reader.episodes) - 1)
def test_episode_step_negative_indices(self):
"""Ensures that negative episode step indices are handled correctly."""
_, backend = _collect_episode_data(num_episodes=6)
data_reader = in_memory_backend.InMemoryBackendReader(backend)
for episode in data_reader.episodes:
np.testing.assert_equal(episode[-1], episode[len(episode) - 1])
np.testing.assert_equal(episode[-len(episode)], episode[0])
def test_episode_step_out_of_bounds_indices(self):
"""Ensures that out of bounds episode step indices are handled correctly."""
_, backend = _collect_episode_data(num_episodes=6)
data_reader = in_memory_backend.InMemoryBackendReader(backend)
for episode in data_reader.episodes:
self.assertRaises(IndexError, operator.getitem, episode, len(episode))
self.assertRaises(IndexError, operator.getitem, episode,
-len(episode) - 1)
if __name__ == '__main__':
absltest.main()
| envlogger-main | envlogger/backends/in_memory_backend_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common logging scheduling strategies."""
from typing import Callable, List, Optional, Union
from envlogger import step_data
import numpy as np
# A Scheduler returns True when something should be activated and False
# otherwise.
Scheduler = Callable[[step_data.StepData], bool]
class NStepScheduler:
"""Returns `True` every N times it is called."""
def __init__(self, step_interval: int):
if step_interval <= 0:
raise ValueError(f'step_interval must be positive, got {step_interval}')
self._step_interval = step_interval
self._step_counter = 0
def __call__(self, unused_data: step_data.StepData):
"""Returns `True` every N times it is called."""
should_log = self._step_counter % self._step_interval == 0
self._step_counter += 1
return should_log
class BernoulliStepScheduler:
"""Returns `True` with a given probability."""
def __init__(self, keep_probability: float, seed: Optional[int] = None):
if keep_probability < 0.0 or keep_probability > 1.0:
raise ValueError(
f'keep_probability must be in [0,1], got: {keep_probability}')
self._keep_probability = keep_probability
self._rng = np.random.default_rng(seed)
def __call__(self, unused_data: step_data.StepData):
"""Returns `True` with probability `self._keep_probability`."""
return self._rng.random() < self._keep_probability
class NEpisodeScheduler:
"""Returns `True` every N episodes."""
def __init__(self, episode_interval: int):
if episode_interval <= 0:
raise ValueError(
f'episode_interval must be positive, got {episode_interval}')
self._episode_interval = episode_interval
self._episode_counter = 0
def __call__(self, data: step_data.StepData):
"""Returns `True` every N episodes."""
should_log = self._episode_counter % self._episode_interval == 0
if data.timestep.last():
self._episode_counter += 1
return should_log
class BernoulliEpisodeScheduler:
"""Returns `True` with a given probability at every episode."""
def __init__(self, keep_probability: float, seed: Optional[int] = None):
if keep_probability < 0.0 or keep_probability > 1.0:
raise ValueError(
f'keep_probability must be in [0,1], got: {keep_probability}')
self._keep_probability = keep_probability
self._rng = np.random.default_rng(seed)
self._current_p = self._rng.random()
def __call__(self, data: step_data.StepData):
"""Returns `True` with probability `self._keep_probability`."""
if data.timestep.last():
self._current_p = self._rng.random()
return self._current_p < self._keep_probability
class ListStepScheduler:
"""Returns `True` for steps in `desired_steps`.
Please see unit tests for examples of using this scheduler. In particular,
you can use Numpy's functions such as logspace() to generate non-linear steps.
"""
def __init__(self, desired_steps: Union[List[int], np.ndarray]):
if (isinstance(desired_steps, np.ndarray) and
not (desired_steps.dtype == np.int32 or
desired_steps.dtype == np.int64)):
raise TypeError(
f'desired_steps.dtype must be np.in32 or np.int64: {desired_steps} '
f'(dtype: {desired_steps.dtype})')
if len(desired_steps) <= 0:
raise ValueError(f'desired_steps cannot be empty: {desired_steps}')
self._desired_steps = set(desired_steps)
self._step_counter = 0
def __call__(self, data: step_data.StepData):
"""Returns `True` every N episodes."""
should_log = self._step_counter in self._desired_steps
self._step_counter += 1
return should_log
class ListEpisodeScheduler:
"""Returns `True` for episodes in `desired_episodes`.
Please see unit tests for examples of using this scheduler. In particular,
you can use Numpy's functions such as logspace() to generate non-linear steps.
"""
def __init__(self, desired_episodes: Union[List[int], np.ndarray]):
if (isinstance(desired_episodes, np.ndarray) and
not (desired_episodes.dtype == np.int32 or
desired_episodes.dtype == np.int64)):
raise TypeError('desired_episodes.dtype must be np.in32 or np.int64: '
f'{desired_episodes} (dtype: {desired_episodes.dtype})')
if len(desired_episodes) <= 0:
raise ValueError(f'desired_episodes cannot be empty: {desired_episodes}')
self._desired_episodes = set(desired_episodes)
self._episode_counter = 0
def __call__(self, data: step_data.StepData):
"""Returns `True` every N episodes."""
should_log = self._episode_counter in self._desired_episodes
if data.timestep.last():
self._episode_counter += 1
return should_log
| envlogger-main | envlogger/backends/schedulers.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for episode_info.cc."""
import random
from absl.testing import absltest
from absl.testing import parameterized
from envlogger.backends.python import episode_info
from envlogger.proto import storage_pb2
class EpisodeInfoTest(parameterized.TestCase):
def test_empty_episode_info(self):
episode = episode_info.EpisodeInfo()
self.assertEqual(episode.start, 0)
self.assertEqual(episode.num_steps, 0)
self.assertIsNone(episode.metadata)
def test_episode_info_init_with_random_kwargs(self):
random_starts = [random.randint(-1, 10000) for _ in range(100)]
random_num_steps = [random.randint(-1, 10000) for _ in range(100)]
random_metadata = []
dimension = storage_pb2.Datum.Shape.Dim()
dimension.size = -438
for _ in range(100):
metadata = storage_pb2.Data()
metadata.datum.shape.dim.append(dimension)
metadata.datum.values.int32_values.append(random.randint(-1, 10000))
random_metadata.append(metadata)
for start, num_steps, metadata in zip(random_starts, random_num_steps,
random_metadata):
episode = episode_info.EpisodeInfo(
start=start, num_steps=num_steps, metadata=metadata)
self.assertEqual(episode.start, start)
self.assertEqual(episode.num_steps, num_steps)
self.assertSequenceEqual(episode.metadata.datum.values.int32_values,
metadata.datum.values.int32_values)
if __name__ == '__main__':
absltest.main()
| envlogger-main | envlogger/backends/python/episode_info_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| envlogger-main | envlogger/backends/python/__init__.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Writes and reads data using RiegeliDataset{Writer, Reader}."""
import os
import pickle
import random
import shutil
from absl import logging
from absl.testing import absltest
from envlogger.backends.python import riegeli_dataset_reader
from envlogger.backends.python import riegeli_dataset_writer
from envlogger.converters import codec
from envlogger.proto import storage_pb2
from google.protobuf import descriptor_pool
from google.protobuf import message_factory
class RiegeliDatasetTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._directory = os.path.join(absltest.get_default_test_tmpdir(), 'blah')
os.makedirs(self._directory)
def tearDown(self):
shutil.rmtree(self._directory)
super().tearDown()
def test_reader_non_existent_data_dir(self):
"""Checks that an exception is raised when a `data_dir` does not exist."""
reader = riegeli_dataset_reader.RiegeliDatasetReader()
self.assertRaises(RuntimeError, reader.init, data_dir='/i/do/not/exist/')
def test_writer_non_existent_data_dir(self):
"""Checks that an exception is raised when a `data_dir` does not exist."""
writer = riegeli_dataset_writer.RiegeliDatasetWriter()
self.assertRaises(RuntimeError, writer.init, data_dir='/i/do/not/exist/')
def test_storage_data_payload(self):
"""Ensures that we can read and write `Data` proto messages."""
writer = riegeli_dataset_writer.RiegeliDatasetWriter()
try:
writer.init(data_dir=self._directory)
except RuntimeError:
logging.exception('Failed to initialize writer')
for i in range(10):
writer.add_step(codec.encode(i))
writer.close()
reader = riegeli_dataset_reader.RiegeliDatasetReader()
try:
reader.init(data_dir=self._directory)
except RuntimeError:
logging.exception('Failed to initialize reader')
for i in range(reader.num_steps):
step = reader.step(i)
self.assertEqual(codec.decode(step), i)
reader.close()
def test_non_storage_data_payload(self):
"""Ensures that we can read and write proto messages other than `Data`."""
writer = riegeli_dataset_writer.RiegeliDatasetWriter()
try:
writer.init(data_dir=self._directory)
except RuntimeError:
logging.exception('Failed to initialize writer')
for i in range(10):
dim = storage_pb2.Datum.Shape.Dim()
dim.size = i
writer.add_step(dim)
writer.close()
reader = riegeli_dataset_reader.RiegeliDatasetReader()
try:
reader.init(data_dir=self._directory)
except RuntimeError:
logging.exception('Failed to initialize reader')
for i in range(reader.num_steps):
step = reader.step(i, storage_pb2.Datum.Shape.Dim)
self.assertEqual(step.size, i)
reader.close()
def test_dynamic_data_payload(self):
"""Checks that we can read and write dynamically obtained proto messages."""
pool = descriptor_pool.Default()
factory = message_factory.MessageFactory(pool)
prototype = factory.GetPrototype(
pool.FindMessageTypeByName('envlogger.Datum.Values'))
writer = riegeli_dataset_writer.RiegeliDatasetWriter()
try:
writer.init(data_dir=self._directory)
except RuntimeError:
logging.exception('Failed to initialize writer')
for i in range(10):
values = prototype(float_values=[3.14 + i])
writer.add_step(values)
writer.close()
reader = riegeli_dataset_reader.RiegeliDatasetReader()
try:
reader.init(data_dir=self._directory)
except RuntimeError:
logging.exception('Failed to initialize reader')
for i in range(reader.num_steps):
step = reader.step(i, prototype)
self.assertAlmostEqual(step.float_values[0], 3.14 + i, places=3)
# Protobuf message _objects_ also define `FromString()` and should work.
step2 = reader.step(i, prototype())
self.assertAlmostEqual(step2.float_values[0], 3.14 + i, places=3)
reader.close()
def test_clone(self):
"""Ensures that we can read the same data with a cloned reader."""
writer = riegeli_dataset_writer.RiegeliDatasetWriter()
try:
writer.init(data_dir=self._directory)
except RuntimeError:
logging.exception('Failed to initialize writer')
for i in range(10):
writer.add_step(codec.encode(i))
writer.close()
reader = riegeli_dataset_reader.RiegeliDatasetReader()
try:
reader.init(data_dir=self._directory)
except RuntimeError:
logging.exception('Failed to initialize reader')
cloned = reader.clone()
self.assertEqual(cloned.num_steps, reader.num_steps)
for i in range(reader.num_steps):
step = reader.step(i)
self.assertEqual(codec.decode(step), i)
reader.close()
# Even after closing the original `reader`, the cloned reader should still
# work just like it.
for i in range(cloned.num_steps):
step = cloned.step(i)
self.assertEqual(codec.decode(step), i)
cloned.close()
def test_writer_can_be_pickled(self):
"""RiegeliDatasetWriter pickling support."""
# Arrange.
writer = riegeli_dataset_writer.RiegeliDatasetWriter()
try:
writer.init(data_dir=self._directory, metadata=codec.encode(3.141592))
except RuntimeError:
logging.exception('Failed to initialize `writer`')
# Act.
data = pickle.dumps(writer)
another_writer = pickle.loads(data)
reader = riegeli_dataset_reader.RiegeliDatasetReader()
try:
reader.init(data_dir=self._directory)
except RuntimeError:
logging.exception('Failed to initialize `reader`')
# Assert.
self.assertEqual(writer.data_dir(), another_writer.data_dir())
self.assertEqual(writer.max_episodes_per_shard(),
another_writer.max_episodes_per_shard())
self.assertEqual(writer.writer_options(), another_writer.writer_options())
self.assertEqual(writer.episode_counter(), another_writer.episode_counter())
self.assertAlmostEqual(codec.decode(reader.metadata()), 3.141592)
def test_reader_can_be_pickled(self):
"""RiegeliDatasetReader pickling support."""
# Arrange.
writer = riegeli_dataset_writer.RiegeliDatasetWriter()
try:
writer.init(data_dir=self._directory, metadata=codec.encode(3.141592))
except RuntimeError:
logging.exception('Failed to initialize `writer`')
for i in range(50):
writer.add_step(codec.encode(i), is_new_episode=random.random() < 0.5)
writer.close()
reader = riegeli_dataset_reader.RiegeliDatasetReader()
try:
reader.init(data_dir=self._directory)
except RuntimeError:
logging.exception('Failed to initialize `reader`')
# Act.
data = pickle.dumps(reader)
another_reader = pickle.loads(data)
# Assert.
self.assertEqual(reader.data_dir(), another_reader.data_dir())
self.assertAlmostEqual(codec.decode(reader.metadata()), 3.141592)
self.assertAlmostEqual(
codec.decode(reader.metadata()),
codec.decode(another_reader.metadata()))
self.assertEqual(reader.num_steps, another_reader.num_steps)
self.assertEqual(reader.num_episodes, another_reader.num_episodes)
self.assertEqual(
[codec.decode(reader.step(i)) for i in range(reader.num_steps)], [
codec.decode(another_reader.step(i))
for i in range(another_reader.num_steps)
])
for i in range(reader.num_episodes):
episode = reader.episode(i)
another_episode = another_reader.episode(i)
self.assertEqual(episode.start, another_episode.start)
self.assertEqual(episode.num_steps, another_episode.num_steps)
self.assertEqual(episode.metadata, another_episode.metadata)
if __name__ == '__main__':
absltest.main()
| envlogger-main | envlogger/backends/python/riegeli_dataset_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| envlogger-main | envlogger/backends/cc/__init__.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests writing trajectories in one language and reading from another."""
import os
import shutil
import subprocess
from typing import Sequence
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
from rules_python.python.runfiles import runfiles
def _execute_binary(rel_path: str, args: Sequence[str]) -> bytes:
r = runfiles.Create()
path = r.Rlocation(os.path.join('__main__', 'envlogger', rel_path))
cmd = [path] + args
return subprocess.check_output(cmd, env=r.EnvVars())
class CrossLanguageTest(parameterized.TestCase):
def test_py_writer_cc_reader(self):
# Set up a trajectory directory.
trajectories_dir = os.path.join(absltest.TEST_TMPDIR.value, 'my_trajectory')
logging.info('trajectories_dir: %r', trajectories_dir)
os.makedirs(trajectories_dir)
# Find Python writer and run it.
py_writer_output = _execute_binary(
'backends/cross_language_test/py_writer',
args=[f'--trajectories_dir={trajectories_dir}'])
logging.info('py_writer_output: %r', py_writer_output)
# Find C++ reader and run it.
cc_reader_output = _execute_binary(
'backends/cross_language_test/cc_reader',
args=[f'--trajectories_dir={trajectories_dir}'])
logging.info('cc_reader_output: %r', cc_reader_output)
# If everything went well, there should be no
# `subprocess.CalledProcessError`.
logging.info('Cleaning up trajectories_dir %r', trajectories_dir)
shutil.rmtree(trajectories_dir)
if __name__ == '__main__':
absltest.main()
| envlogger-main | envlogger/backends/cross_language_test/cross_language_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple python binary that creates a simple RL trajectory."""
from typing import Sequence
from absl import app
from absl import flags
from absl import logging
import dm_env
import envlogger
import numpy as np
_TRAJECTORIES_DIR = flags.DEFINE_string(
'trajectories_dir', None, 'Path to write trajectory.', required=True)
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
logging.info('Starting Python-based writer...')
logging.info('--trajectories_dir: %r', _TRAJECTORIES_DIR.value)
writer = envlogger.RiegeliBackendWriter(
data_directory=_TRAJECTORIES_DIR.value, metadata={'my_data': [1, 2, 3]})
writer.record_step(
envlogger.StepData(
timestep=dm_env.TimeStep(
observation=np.array([0.0], dtype=np.float32),
reward=0.0,
discount=0.99,
step_type=dm_env.StepType.FIRST),
action=np.int32(100)),
is_new_episode=True)
for i in range(1, 100):
writer.record_step(
envlogger.StepData(
timestep=dm_env.TimeStep(
observation=np.array([float(i)], dtype=np.float32),
reward=i / 100.0,
discount=0.99,
step_type=dm_env.StepType.MID),
action=np.int32(100 - i)),
is_new_episode=False)
writer.close()
if __name__ == '__main__':
app.run(main)
| envlogger-main | envlogger/backends/cross_language_test/py_writer.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoder/decoder for dm_env.specs.Array (and subclasses).
"""
from typing import Any, Dict, List, Optional, Tuple, Union
import dm_env
from dm_env import specs
import numpy as np
_ENVIRONMENT_SPEC_NAMES = [
'observation_spec',
'action_spec',
'reward_spec',
'discount_spec',
]
def encode_environment_specs(
env: Optional[dm_env.Environment]) -> Dict[str, Any]:
"""Encodes all the specs from a given environment."""
if env:
return {
'observation_spec': encode(env.observation_spec()),
'action_spec': encode(env.action_spec()),
'reward_spec': encode(env.reward_spec()),
'discount_spec': encode(env.discount_spec()),
}
return {}
def decode_environment_specs(
encoded_specs: Dict[str, Any]) -> Dict[str, Optional[specs.Array]]:
"""Decodes all the specs of an environment."""
if encoded_specs:
return {spec_name: decode(encoded_specs[spec_name]) # pytype: disable=bad-return-type # always-use-return-annotations
for spec_name in _ENVIRONMENT_SPEC_NAMES}
return {spec_name: None for spec_name in _ENVIRONMENT_SPEC_NAMES}
def _array_spec_to_dict(array_spec: specs.Array) -> Dict[str, Any]:
"""Encodes an Array spec as a dictionary."""
dict_spec = {
'shape': np.array(array_spec.shape, dtype=np.int64),
'dtype': str(array_spec.dtype),
'name': array_spec.name,
}
if isinstance(array_spec, specs.BoundedArray):
dict_spec.update({
'minimum': array_spec.minimum,
'maximum': array_spec.maximum,
})
if isinstance(array_spec, specs.DiscreteArray):
dict_spec.update({'num_values': array_spec.num_values})
return dict_spec
def encode(
spec: Union[specs.Array, List[Any], Tuple[Any], Dict[str, Any]]
) -> Union[List[Any], Tuple[Any], Dict[str, Any]]:
"""Encodes `spec` using plain Python objects.
This function supports bare Array specs, lists of Array specs, Tuples of Array
specs, Dicts of string to Array specs and any combination of these things such
as Dict[str, Tuple[List[Array, Array]]].
Args:
spec: The actual spec to encode.
Returns:
The same spec encoded in a way that can be serialized to disk.
Raises:
TypeError: When the argument is not among the supported types.
"""
if isinstance(spec, specs.Array):
return _array_spec_to_dict(spec)
if isinstance(spec, list):
return [encode(x) for x in spec]
if isinstance(spec, tuple):
return tuple((encode(x) for x in spec))
if isinstance(spec, dict):
return {k: encode(v) for k, v in spec.items()}
raise TypeError(
'encode() should be called with an argument of type specs.Array (and '
f'subclasses), list, tuple or dict. Found {type(spec)}: {spec}.')
def decode(
spec: Union[List[Any], Tuple[Any], Dict[str, Any]]
) -> Union[specs.Array, List[Any], Tuple[Any], Dict[str, Any]]:
"""Parses `spec` into the supported dm_env spec formats."""
if isinstance(spec, dict):
if 'shape' in spec and 'dtype' in spec:
shape = spec['shape'] if spec['shape'] is not None else ()
if 'num_values' in spec:
# DiscreteArray case.
return specs.DiscreteArray(
num_values=spec['num_values'],
dtype=spec['dtype'],
name=spec['name'])
elif 'minimum' in spec and 'maximum' in spec:
# BoundedArray case.
return specs.BoundedArray(
shape=shape,
dtype=spec['dtype'],
minimum=spec['minimum'],
maximum=spec['maximum'],
name=spec['name'])
else:
# Base Array spec case.
return specs.Array(shape=shape, dtype=spec['dtype'], name=spec['name'])
# Recursively decode array elements.
return {k: decode(v) for k, v in spec.items()}
elif isinstance(spec, list):
return [decode(x) for x in spec]
elif isinstance(spec, tuple):
return tuple(decode(x) for x in spec)
raise TypeError(
'decode() should be called with an argument of type list, tuple or dict.'
f' Found: {type(spec)}: {spec}.')
| envlogger-main | envlogger/converters/spec_codec.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for spec_codec."""
from typing import Any, Dict
from absl.testing import absltest
from absl.testing import parameterized
import dm_env
from dm_env import specs
from envlogger.converters import spec_codec
import numpy as np
class CustomSpecsEnvironment(dm_env.Environment):
"""An Environment that allows us to customize its specs."""
def __init__(self,
observation_spec,
action_spec,
reward_spec,
discount_spec):
self._observation_spec = observation_spec
self._action_spec = action_spec
self._reward_spec = reward_spec
self._discount_spec = discount_spec
def reset(self):
pass
def step(self, unused_actions):
pass
def discount_spec(self):
return self._discount_spec
def reward_spec(self):
return self._reward_spec
def observation_spec(self):
return self._observation_spec
def action_spec(self):
return self._action_spec
class ArraySpecCodecTest(parameterized.TestCase):
def _compare_spec_dicts(self, actual: Dict[str, Any], expected: Dict[str,
Any]):
"""Checks that `actual` spec dict is equal to `expected`."""
# Check 'name'.
self.assertIn(
'name',
actual,
msg=f'`name` must be present in `actual`. Current contents: {actual}')
self.assertEqual(actual['name'], expected['name'])
# Check 'dtype'.
self.assertIn(
'dtype',
actual,
msg=f'`dtype` must be present in `actual`. Current contents: {actual}')
self.assertEqual(actual['dtype'], expected['dtype'])
# Check 'shape'.
self.assertIn(
'shape',
actual,
msg=f'`shape` must be present in `actual`. Current contents: {actual}')
np.testing.assert_equal(actual['shape'], expected['shape'])
# If 'minimum' and 'maximum' exist, check that it's equal to `actual`'s.
if 'minimum' in expected and 'maximum' in expected:
msg_min = 'Expected actual["minimum"] to be equal to expected["minimum"].'
msg_max = 'Expected actual["maximum"] to be equal to expected["maximum"].'
# If dtypes are float we allow for some decimal imprecision.
if actual['dtype'] == 'float32' or actual['dtype'] == 'float64':
self.assertAlmostEqual(
actual['minimum'], expected['minimum'], msg=msg_min)
self.assertAlmostEqual(
actual['maximum'], expected['maximum'], msg=msg_max)
else:
self.assertEqual(actual['minimum'], expected['minimum'], msg=msg_min)
self.assertEqual(actual['maximum'], expected['maximum'], msg=msg_max)
# If 'num_values' is in `expected`, check that it's equal to `actual`'s.
if 'num_values' in expected:
self.assertEqual(actual['num_values'], expected['num_values'])
##############################################################################
# encode() tests.
##############################################################################
# Single specs.Array.
@parameterized.named_parameters(
('int', 123),
('float', 3.14),
('object', object()),
('function', np.abs),
('module', np),
)
def test_encode_unsupported(self, arg):
"""Checks that `encode(unsupported type)` raises a TypeError."""
self.assertRaises(TypeError, spec_codec.encode, arg)
@parameterized.named_parameters(
('zero_shape_float', specs.Array((), np.float32, 'my_spec'), {
'shape': np.array([], np.int64),
'dtype': 'float32',
'name': 'my_spec',
}),
('zero_shape_int', specs.Array((), np.int32, 'another_spec'), {
'shape': np.array([], np.int64),
'dtype': 'int32',
'name': 'another_spec',
}),
# `name` is not required, so this example should also be valid.
('zero_shape_float_no_name', specs.Array((), np.float32), {
'shape': np.array([], np.int64),
'dtype': 'float32',
'name': None,
}),
('one_dim_shape_float', specs.Array(
(123,), np.float32, 'yet_another_spec'), {
'shape': np.array([123], np.int64),
'dtype': 'float32',
'name': 'yet_another_spec',
}),
('one_dim_shape_int', specs.Array((321,), np.int32, 'me_again'), {
'shape': np.array([321], np.int64),
'dtype': 'int32',
'name': 'me_again',
}),
('two_dim_shape_float', specs.Array((1, 2), np.float32, 'still_here'), {
'shape': np.array([1, 2], np.int64),
'dtype': 'float32',
'name': 'still_here',
}),
('two_dim_shape_int', specs.Array((2, 1), np.int32, 'come_on'), {
'shape': np.array([2, 1], np.int64),
'dtype': 'int32',
'name': 'come_on',
}),
)
def test_encode_array(self, input_spec, expected_spec_dict):
"""Checks that we can encode specs.Arrays."""
self._compare_spec_dicts(spec_codec.encode(input_spec), expected_spec_dict)
# Single specs.BoundedArray.
@parameterized.named_parameters(
('zero_shape_float',
specs.BoundedArray(
(), np.float32, minimum=3.0, maximum=10.0, name='my_spec'), {
'shape': np.array([], np.int64),
'dtype': 'float32',
'name': 'my_spec',
'minimum': 3.0,
'maximum': 10.0,
}),
('zero_shape_int',
specs.BoundedArray(
(), np.int32, minimum=-100, maximum=100, name='another_spec'), {
'shape': np.array([], np.int64),
'dtype': 'int32',
'name': 'another_spec',
'minimum': -100,
'maximum': 100,
}),
# `name` is not required, so this example should also be valid.
('zero_shape_float_no_name',
specs.BoundedArray((), np.float32, minimum=0.0, maximum=1.0), {
'shape': np.array([], np.int64),
'dtype': 'float32',
'name': None,
'minimum': 0.0,
'maximum': 1.0,
}),
('one_dim_shape_float',
specs.BoundedArray((123,),
np.float32,
minimum=123.0,
maximum=321.0,
name='yet_another_spec'), {
'shape': np.array([123], np.int64),
'dtype': 'float32',
'name': 'yet_another_spec',
'minimum': 123.0,
'maximum': 321.0,
}),
('one_dim_shape_int',
specs.BoundedArray(
(321,), np.int32, minimum=314, maximum=628, name='me_again'), {
'shape': np.array([321], np.int64),
'dtype': 'int32',
'name': 'me_again',
'minimum': 314,
'maximum': 628,
}),
('two_dim_shape_float',
specs.BoundedArray((1, 2),
np.float32,
minimum=-1.0 / 12.0,
maximum=2.73,
name='still_here'), {
'shape': np.array([1, 2], np.int64),
'dtype': 'float32',
'name': 'still_here',
'minimum': -1.0 / 12.0,
'maximum': 2.73,
}),
('two_dim_shape_int',
specs.BoundedArray((2, 1),
np.int32,
# Notice that sequence minimums/maximums should also
# be supported.
minimum=[1729],
maximum=[4525],
name='come_on'), {
'shape': np.array([2, 1], np.int64),
'dtype': 'int32',
'name': 'come_on',
'minimum': [1729],
'maximum': [4525],
}),
)
def test_encode_bounded_array(self, input_spec, expected_spec_dict):
"""Checks that we can encode specs.BoundedArrays."""
self._compare_spec_dicts(
spec_codec.encode(input_spec), expected_spec_dict)
# Single specs.DiscreteArray.
@parameterized.named_parameters(
('zero_shape_int', specs.DiscreteArray(
100, np.int64, name='another_spec'), {
'shape': np.array([], np.int64),
'dtype': 'int64',
'num_values': 100,
'name': 'another_spec',
}),
# `name` is not required, so this example should also be valid.
('zero_shape_int_no_name', specs.DiscreteArray(42, np.int32), {
'shape': np.array([], np.int64),
'dtype': 'int32',
'num_values': 42,
'name': None,
}),
)
def test_encode_discrete_array(self, input_spec, expected_spec_dict):
"""Checks that we can encode specs.DiscreArrays."""
self._compare_spec_dicts(
spec_codec.encode(input_spec), expected_spec_dict)
# Lists of specs.Arrays.
@parameterized.named_parameters(
('empty_list', [], []),
('single_spec', [specs.Array((), np.float32, 'my_spec')], [{
'shape': np.array([], np.int64),
'dtype': 'float32',
'name': 'my_spec',
}]),
('two_specs', [
specs.Array((1, 2, 3), np.float32, 'spec1'),
specs.Array((3, 2, 1), np.int32, 'spec2')
], [{
'shape': np.array([1, 2, 3], np.int64),
'dtype': 'float32',
'name': 'spec1',
}, {
'shape': np.array([3, 2, 1], np.int64),
'dtype': 'int32',
'name': 'spec2',
}]),
)
def test_encode_list_of_specs(self, input_spec, expected_spec_list):
"""Checks that we can encode lists of Array specs."""
actual_spec_list = spec_codec.encode(input_spec)
self.assertLen(actual_spec_list, len(expected_spec_list))
for actual, expected in zip(actual_spec_list, expected_spec_list):
self._compare_spec_dicts(actual, expected)
# Tuples of specs.Arrays.
@parameterized.named_parameters(
('empty_tuple', (), ()),
('single_spec', (specs.Array((), np.float32, 'my_spec'),), ({
'shape': np.array([], np.int64),
'dtype': 'float32',
'name': 'my_spec',
},)),
('two_specs', (
specs.Array((1, 2, 3), np.float32, 'spec1'),
specs.Array((3, 2, 1), np.int32, 'spec2')
), ({
'shape': np.array([1, 2, 3], np.int64),
'dtype': 'float32',
'name': 'spec1',
}, {
'shape': np.array([3, 2, 1], np.int64),
'dtype': 'int32',
'name': 'spec2',
})),
)
def test_encode_tuple_of_specs(self, input_spec, expected_spec_tuple):
"""Checks that we can encode tuples of Array specs."""
actual_spec_tuple = spec_codec.encode(input_spec)
self.assertLen(actual_spec_tuple, len(expected_spec_tuple))
for actual, expected in zip(actual_spec_tuple, expected_spec_tuple):
self._compare_spec_dicts(actual, expected)
# Dicts of specs.Arrays.
@parameterized.named_parameters(
('empty_dict', {}, {}),
('single_spec', {
'my_favorite_spec': specs.Array((), np.float32, 'my_spec')
}, {
'my_favorite_spec': {
'shape': np.array([], np.int64),
'dtype': 'float32',
'name': 'my_spec',
}
}),
('two_specs', {
'hello': specs.Array((1, 2, 3), np.float32, 'spec1'),
'world': specs.Array((3, 2, 1), np.int32, 'spec2')
}, {
'hello': {
'shape': np.array([1, 2, 3], np.int64),
'dtype': 'float32',
'name': 'spec1',
},
'world': {
'shape': np.array([3, 2, 1], np.int64),
'dtype': 'int32',
'name': 'spec2',
}
}),
)
def test_encode_dict_of_specs(self, input_spec, expected_spec_dict):
"""Checks that we can encode dicts of Array specs."""
actual_spec_dict = spec_codec.encode(input_spec)
self.assertLen(actual_spec_dict, len(expected_spec_dict))
for actual, expected in zip(sorted(actual_spec_dict.items()),
sorted(expected_spec_dict.items())):
actual_key, actual_value = actual
expected_key, expected_value = expected
self.assertEqual(actual_key, expected_key)
self._compare_spec_dicts(actual_value, expected_value)
##############################################################################
# decode() tests.
##############################################################################
@parameterized.named_parameters(
('int', 123),
('float', 3.14),
('object', object()),
('function', np.abs),
('module', np),
)
def test_decode_unsupported(self, arg):
"""Checks that `decode(unsupported type)` raises a TypeError."""
self.assertRaises(TypeError, spec_codec.decode, arg)
# Single specs.Arrays.
@parameterized.named_parameters(
('no_shape', {
'shape': None, # None shapes are interpreted as scalars.
'dtype': 'float32',
'name': 'no_shape_spec',
}, specs.Array((), np.float32, 'no_shape_spec')),
('no_dtype', {
'shape': (),
'dtype': None, # None dtypes are interpreted as float64.
'name': 'no_dtype_spec',
}, specs.Array((), np.float64, 'no_dtype_spec')),
('no_shape_dtype', {
'shape': None, # None shapes are interpreted as scalars.
'dtype': None, # None dtypes are interpreted as float64.
'name': 'no_shape_dtype_spec',
}, specs.Array((), np.float64, 'no_shape_dtype_spec')),
('no_name_float', {
'shape': np.array([1], np.int64),
'dtype': 'float32',
'name': None, # `name` is optional.
}, specs.Array((1,), np.float32)),
('zero_shape_float', {
'shape': np.array([], np.int64),
'dtype': 'float32',
'name': 'my_spec',
}, specs.Array((), np.float32, 'my_spec')),
('zero_shape_int', {
'shape': np.array([], np.int64),
'dtype': 'int64',
'name': 'int_spec',
}, specs.Array((), np.int64, 'int_spec')),
('one_dim_shape_float', {
'shape': np.array([123], np.int64),
'dtype': 'float32',
'name': 'one_dim_float',
}, specs.Array((123,), np.float32, 'one_dim_float')),
('one_dim_shape_int', {
'shape': np.array([321], np.int64),
'dtype': 'int64',
'name': 'one_dim_int',
}, specs.Array((321,), np.int64, 'one_dim_int')),
('two_dim_shape_float', {
'shape': np.array([1, 2], np.int64),
'dtype': 'float32',
'name': 'two_dim_float',
}, specs.Array((1, 2), np.float32, 'two_dim_float')),
('two_dim_shape_int', {
'shape': np.array([4, 3], np.int64),
'dtype': 'int64',
'name': 'two_dim_int',
}, specs.Array((4, 3), np.int64, 'two_dim_int')),
)
def test_decode_array(self, input_spec_dict, expected_spec):
result = spec_codec.decode(input_spec_dict)
self.assertIsInstance(result, specs.Array)
self.assertEqual(result, expected_spec)
# Single specs.BoundedArrays.
@parameterized.named_parameters(
('zero_shape_float', {
'shape': np.array([], np.int64),
'dtype': 'float32',
'minimum': 0.0,
'maximum': 1.0,
'name': 'my_spec',
},
specs.BoundedArray(
(), np.float32, minimum=0.0, maximum=1.0, name='my_spec')),
('zero_shape_int', {
'shape': np.array([], np.int64),
'dtype': 'int64',
'minimum': 0,
'maximum': 3,
'name': 'int_spec',
}, specs.BoundedArray(
(), np.int64, minimum=0, maximum=3, name='int_spec')),
('one_dim_shape_float', {
'shape': np.array([123], np.int64),
'dtype': 'float32',
'minimum': -1.0,
'maximum': 1.0,
'name': 'one_dim_float',
},
specs.BoundedArray(
(123,), np.float32, minimum=-1.0, maximum=1.0,
name='one_dim_float')),
('one_dim_shape_int', {
'shape': np.array([321], np.int64),
'dtype': 'int64',
'minimum': 1000,
'maximum': 2000,
'name': 'one_dim_int',
},
specs.BoundedArray(
(321,), np.int64, minimum=1000, maximum=2000, name='one_dim_int')),
# Decoding sequence minimums/maximums should also be supported.
('two_dim_shape_float', {
'shape': np.array([1, 2], np.int64),
'dtype': 'float32',
'minimum': [0.0, 5.0],
'maximum': [1.0, 10.0],
'name': 'two_dim_float',
},
specs.BoundedArray((1, 2),
np.float32,
minimum=[0.0, 5.0],
maximum=[1.0, 10.0],
name='two_dim_float')),
('two_dim_shape_int', {
'shape': np.array([4, 3], np.int64),
'dtype': 'int64',
'minimum': -10,
'maximum': 10,
'name': 'two_dim_int',
},
specs.BoundedArray(
(4, 3), np.int64, minimum=-10, maximum=10, name='two_dim_int')),
('no_name_float', {
'shape': np.array([1], np.int64),
'dtype': 'float32',
'minimum': 0.0,
'maximum': 1.0,
'name': None, # `name` is optional.
}, specs.BoundedArray((1,), np.float32, minimum=0.0, maximum=1.0)),
)
def test_decode_bounded_array(self, input_spec_dict, expected_spec):
result = spec_codec.decode(input_spec_dict)
self.assertIsInstance(result, specs.BoundedArray)
self.assertEqual(result, expected_spec)
# Single specs.DiscreteArrays.
@parameterized.named_parameters(
('zero_shape', {
'shape': np.array([], np.int64),
'dtype': 'int32',
'num_values': 123,
'name': 'my_spec',
}, specs.DiscreteArray(123, name='my_spec')),
('custom_dtype', {
'shape': np.array([], np.int64),
'dtype': 'int64',
'num_values': 123,
'name': 'custom_spec',
}, specs.DiscreteArray(123, dtype=np.int64, name='custom_spec')),
('no_name', {
'shape': np.array([], np.int64),
'dtype': 'int32',
'num_values': 666,
'name': None, # `name` is optional.
}, specs.DiscreteArray(666, np.int32)),
)
def test_decode_discrete_array(self, input_spec_dict, expected_spec):
result = spec_codec.decode(input_spec_dict)
self.assertIsInstance(result, specs.DiscreteArray)
self.assertEqual(result, expected_spec)
# Lists of specs.Arrays.
@parameterized.named_parameters(
('empty_list', [], []),
('single_spec', [{
'shape': np.array([], np.int64),
'dtype': 'float32',
'name': 'my_spec',
}], [specs.Array((), np.float32, 'my_spec')]),
('two_specs', [{
'shape': np.array([1, 2, 3], np.int64),
'dtype': 'float32',
'name': 'spec1',
}, {
'shape': np.array([3, 2, 1], np.int64),
'dtype': 'int32',
'name': 'spec2',
}], [
specs.Array((1, 2, 3), np.float32, 'spec1'),
specs.Array((3, 2, 1), np.int32, 'spec2')
]),
)
def test_decode_list_of_specs(self, input_spec, expected_spec_list):
"""Checks that we can encode lists of Array specs."""
result = spec_codec.decode(input_spec)
self.assertIsInstance(result, list)
self.assertEqual(result, expected_spec_list)
# Tuples of specs.Arrays.
@parameterized.named_parameters(
('empty_tuple', (), ()),
('single_spec', ({
'shape': np.array([], np.int64),
'dtype': 'float32',
'name': 'my_spec',
},), (specs.Array((), np.float32, 'my_spec'),)),
('two_specs', ({
'shape': np.array([1, 2, 3], np.int64),
'dtype': 'float32',
'name': 'spec1',
}, {
'shape': np.array([3, 2, 1], np.int64),
'dtype': 'int32',
'name': 'spec2',
}), (
specs.Array((1, 2, 3), np.float32, 'spec1'),
specs.Array((3, 2, 1), np.int32, 'spec2')
)),
)
def test_decode_tuple_of_specs(self, input_spec, expected_spec_tuple):
"""Checks that we can encode tuples of Array specs."""
result = spec_codec.decode(input_spec)
self.assertIsInstance(result, tuple)
self.assertEqual(result, expected_spec_tuple)
# Dicts of specs.Arrays.
@parameterized.named_parameters(
('empty_dict', {}, {}),
('single_spec', {
'my_favorite_spec': {
'shape': np.array([], np.int64),
'dtype': 'float32',
'name': 'my_spec',
}
}, {
'my_favorite_spec': specs.Array((), np.float32, 'my_spec')
}),
('two_specs', {
'hello': {
'shape': np.array([1, 2, 3], np.int64),
'dtype': 'float32',
'name': 'spec1',
},
'world': {
'shape': np.array([3, 2, 1], np.int64),
'dtype': 'int32',
'name': 'spec2',
}
}, {
'hello': specs.Array((1, 2, 3), np.float32, 'spec1'),
'world': specs.Array((3, 2, 1), np.int32, 'spec2')
}),
)
def test_decode_dict_of_specs(self, input_spec, expected_spec_dict):
"""Checks that we can encode dicts of Array specs."""
result = spec_codec.decode(input_spec)
self.assertIsInstance(result, dict)
self.assertEqual(result, expected_spec_dict)
@parameterized.named_parameters(
('single_spec',
specs.Array(shape=(1, 2, 3), dtype=np.float64, name='my_3d_spec')),
(
'spec_list',
[
specs.Array(shape=(1, 2), dtype=np.float32, name='my_2d_spec'),
specs.BoundedArray(
shape=(),
dtype=np.uint8,
minimum=32,
maximum=64,
name='scalar_spec')
],
),
(
'spec_tuple',
(specs.BoundedArray(
shape=(),
dtype=np.uint8,
minimum=32,
maximum=64,
name='scalar_spec'),
specs.Array(shape=(1, 2), dtype=np.float32, name='my_2d_spec')),
),
(
'spec_dict',
{
'spec1':
specs.BoundedArray(
shape=(),
dtype=np.uint8,
minimum=32,
maximum=64,
name='scalar_spec'),
'spec2':
specs.Array(
shape=(1, 2), dtype=np.float32, name='my_2d_spec'),
},
),
# Any combination of tuples, lists and dicts should be supported.
(
'complicated_spec',
{
'spec1': [
specs.BoundedArray(
shape=(),
dtype=np.uint8,
minimum=32,
maximum=64,
name='scalar_spec'),
specs.Array(
shape=(1, 2), dtype=np.float32, name='my_2d_spec')
],
'spec2': (specs.Array(
shape=(1, 2), dtype=np.float32, name='my_2d_spec'), {
'deeply_nested':
specs.DiscreteArray(
num_values=999, name='hard_to_find')
}),
},
),
)
def test_roundtrip_encoding_decoding(self, input_spec):
self.assertEqual(
spec_codec.decode(spec_codec.encode(input_spec)),
input_spec)
def test_environment_specs_roundtrip(self):
"""Checks that {encode|decode}_environment_specs work correctly.
"""
# Each spec has a different shape, type and name
observation_spec = specs.Array((1, 2, 3), np.float32, 'spec1')
action_spec = specs.Array((4, 5), np.float64, 'spec2')
reward_spec = specs.Array((1,), np.int32, 'spec3')
discount_spec = specs.Array((2,), np.int64, 'spec4')
env = CustomSpecsEnvironment(observation_spec, action_spec, reward_spec,
discount_spec)
env_specs = spec_codec.encode_environment_specs(env)
decoded_specs = spec_codec.decode_environment_specs(env_specs)
self.assertEqual(decoded_specs['observation_spec'], observation_spec)
self.assertEqual(decoded_specs['action_spec'], action_spec)
self.assertEqual(decoded_specs['reward_spec'], reward_spec)
self.assertEqual(decoded_specs['discount_spec'], discount_spec)
def test_environment_specs_roundtrip_no_env(self):
"""Checks that {encode|decode}_environment_specs with no environment.
"""
env_specs = spec_codec.encode_environment_specs(None)
decoded_specs = spec_codec.decode_environment_specs(env_specs)
self.assertIsNone(decoded_specs['observation_spec'])
self.assertIsNone(decoded_specs['action_spec'])
self.assertIsNone(decoded_specs['reward_spec'])
self.assertIsNone(decoded_specs['discount_spec'])
if __name__ == '__main__':
absltest.main()
| envlogger-main | envlogger/converters/spec_codec_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for codec."""
from absl.testing import absltest
from absl.testing import parameterized
from envlogger.converters import codec
from envlogger.proto import storage_pb2
import numpy as np
class NumpyConvertersTest(parameterized.TestCase):
##############################################################################
#
# Datum tests (i.e. not Array/Tuple/Dict of Datums)
#
##############################################################################
##############################################################################
#
# Scalar tests
#
##############################################################################
##############################################################################
# Empty and None values
##############################################################################
def test_encode_none(self):
"""The proto should be completely empty if given a None value."""
self.assertEqual(codec.encode(None), storage_pb2.Data())
def test_decode_none(self):
"""Decoding a None value should produce None."""
self.assertIsNone(codec.decode(None))
def test_decode_empty_proto(self):
"""Decoding an empty proto should produce None."""
user_data = storage_pb2.Data()
self.assertIsNone(codec.decode(user_data))
def test_encode_empty_ndarray(self):
"""The proto should be completely empty if given zero shape numpy array."""
self.assertEqual(codec.encode(np.array([])), storage_pb2.Data())
# Also test other explicit types.
self.assertEqual(
codec.encode(np.array([], dtype='float')), storage_pb2.Data())
def test_identity_none(self):
"""Encoding and decoding it back should not change its value."""
self.assertIsNone(codec.decode(codec.encode(None)))
##############################################################################
# float32
##############################################################################
def test_encode_32bit_float_scalar(self):
"""Proto supports float32 so we expect no precision loss in encoding."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.float_values.append(np.float32(3.14))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.float32(3.14)), expected)
def test_decode_32bit_float_scalar(self):
"""Proto supports float32 so we expect no precision loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.float_values.append(np.float32(3.14))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.float32)
self.assertEqual(decoded, np.float32(3.14))
def test_identity_32bit_float_scalar(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.float32(3.14)))
self.assertIsInstance(decoded, np.float32)
self.assertEqual(decoded, np.float32(3.14))
##############################################################################
# float32 buffer
##############################################################################
def test_decode_32bit_float_scalar_buffer(self):
"""Proto supports float32 so we expect no precision loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
# 3.14159 in big-endian byte array.
datum.values.float_values_buffer = b'\x40\x49\x0f\xd0'
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.float32)
self.assertEqual(decoded, np.float32(3.14159))
##############################################################################
# float64 (aka double)
##############################################################################
def test_encode_double_scalar(self):
"""Proto supports double so we expect no precision loss in encoding."""
# Ordinary floats in python are 64-bit floats.
expected = storage_pb2.Data()
datum = expected.datum
datum.values.double_values.append(3.14159265358979)
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(3.14159265358979), expected)
# np.float64 should also work.
self.assertEqual(codec.encode(np.float64(3.14159265358979)), expected)
def test_decode_double_scalar(self):
"""Proto supports double so we expect no precision loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.double_values.append(3.14159265358979)
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.float64)
self.assertEqual(decoded, np.float64(3.14159265358979))
def test_identity_double_scalar(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.float64(3.14159265358979)))
self.assertIsInstance(decoded, np.float64)
self.assertEqual(decoded, np.float64(3.14159265358979))
##############################################################################
# int32
##############################################################################
def test_encode_int32_scalar(self):
"""Proto supports int32 so we expect no precision loss in encoding."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.int32_values.append(np.int32(3))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.int32(3)), expected)
def test_decode_int32_scalar(self):
"""Proto supports int32 so we expect no precision loss in encoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.int32_values.append(np.int32(-32))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.int32)
self.assertEqual(decoded, np.int32(-32))
def test_identity_int32_scalar(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.int32(-3)))
self.assertIsInstance(decoded, np.int32)
self.assertEqual(decoded, np.int32(-3))
##############################################################################
# int64
##############################################################################
def test_encode_int64_scalar(self):
"""Proto supports int64 so we expect no precision loss in encoding."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.int64_values.append(np.int64(-3))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.int64(-3)), expected)
def test_decode_int64_scalar(self):
"""Proto supports int64 so we expect no precision loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.int64_values.append(np.int64(-64))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.int64)
self.assertEqual(decoded, np.int64(-64))
def test_identity_int64_scalar(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.int64(-1234567890123)))
self.assertIsInstance(decoded, np.int64)
self.assertEqual(decoded, np.int64(-1234567890123))
##############################################################################
# uint32
##############################################################################
def test_encode_uint32_scalar(self):
"""Proto supports uint32 so we expect no precision loss in encoding."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.uint32_values.append(np.uint32(12345))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.uint32(12345)), expected)
def test_decode_uint32_scalar(self):
"""Proto supports uint32 so we expect no precision loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.uint32_values.append(np.uint32(32))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.uint32)
self.assertEqual(decoded, np.uint32(32))
def test_identity_uint32_scalar(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.uint32(4294967295)))
self.assertIsInstance(decoded, np.uint32)
self.assertEqual(decoded, np.uint32(4294967295))
##############################################################################
# uint64
##############################################################################
def test_encode_uint64_scalar(self):
"""Proto supports uint64 so we expect no precision loss in encoding."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.uint64_values.append(np.uint64(12345))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.uint64(12345)), expected)
def test_decode_uint64_scalar(self):
"""Proto supports uint64 so we expect no precision loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.uint64_values.append(np.uint64(64))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.uint64)
self.assertEqual(decoded, np.uint64(64))
def test_identity_uint64_scalar(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.uint64(18446744073709551615)))
self.assertIsInstance(decoded, np.uint64)
self.assertEqual(decoded, np.uint64(18446744073709551615))
##############################################################################
# bool
##############################################################################
def test_encode_bool_scalar(self):
"""Proto supports bool so we expect no precision loss in encoding."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.bool_values.append(True)
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(True), expected)
def test_decode_bool_scalar(self):
"""Proto supports bool so we expect no precision loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.bool_values.append(True)
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertEqual(decoded, True)
def test_identity_bool_scalar_true(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(True))
self.assertIsInstance(decoded, bool)
self.assertEqual(decoded, True)
def test_identity_bool_scalar_false(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(False))
self.assertIsInstance(decoded, bool)
self.assertEqual(decoded, False)
##############################################################################
# string
##############################################################################
def test_encode_string_scalar(self):
"""Proto supports string so we expect no loss in encoding."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.string_values.append('pi')
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode('pi'), expected)
def test_decode_string_scalar(self):
"""Proto supports string so we expect no loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.string_values.append('ravel')
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, str)
self.assertEqual(decoded, 'ravel')
def test_identity_string_scalar(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode('do not change me, please!'))
self.assertIsInstance(decoded, str)
self.assertEqual(decoded, 'do not change me, please!')
##############################################################################
# bytes
##############################################################################
def test_encode_bytes_scalar(self):
"""Proto supports bytes so we expect no precision loss in encoding."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.bytes_values.append(b'pi')
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(b'pi'), expected)
def test_decode_bytes_scalar(self):
"""Proto supports bytes so we expect no precision loss in decoding."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.bytes_values.append(b'xu xin')
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, bytes)
self.assertEqual(decoded, b'xu xin')
def test_identity_bytes_scalar(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(b'awesome bytes'))
self.assertIsInstance(decoded, bytes)
self.assertEqual(decoded, b'awesome bytes')
##############################################################################
# big int (arbitrarily long)
##############################################################################
def test_encode_int_small_scalar(self):
"""Ensures that a vanilla Python int can be stored as bytes."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.bigint_values.append(b'\x03')
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(3), expected)
def test_encode_bigint_scalar(self):
"""Ensures that a large Python int can be stored as bytes."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.bigint_values.append(
b'\x01\x8e\xe9\x0f\xf6\xc3s\xe0\xeeN?\n\xd2')
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(123456789012345678901234567890), expected)
def test_encode_negative_bigint_scalar(self):
"""Ensures that a large negative Python int can be stored as bytes."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.bigint_values.append(
b'\xfeq\x16\xf0\t<\x8c\x1f\x11\xb1\xc0\xf5.')
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(-123456789012345678901234567890), expected)
def test_decode_int_scalar(self):
"""Ensures that a large negative integer can be decoded to a Python int."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.bigint_values.append(
b'\xfeq\x16\xf0\t<\x8c\x1f\x11\xb1\xc0\xf5.')
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, int)
self.assertEqual(decoded, -123456789012345678901234567890)
def test_identity_int_scalar_positive(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(12345678901234567890))
self.assertIsInstance(decoded, int)
self.assertEqual(decoded, 12345678901234567890)
def test_identity_int_scalar_zero(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(0))
self.assertIsInstance(decoded, int)
self.assertEqual(decoded, 0)
def test_identity_int_scalar_negative(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(-98765432109876543210))
self.assertIsInstance(decoded, int)
self.assertEqual(decoded, -98765432109876543210)
##############################################################################
# int8
##############################################################################
def test_encode_int8_scalar(self):
"""Ensures that an np.int8 can be stored as bytes."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.int8_values = b'\x03'
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.int8(3)), expected)
def test_decode_int8_scalar(self):
"""Ensures that int8s can be retrieved as np.int8."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.int8_values = b'\xfd'
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.int8)
self.assertEqual(decoded, np.int8(-3))
def test_identity_int8_scalar_negative(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.int8(-123)))
self.assertIsInstance(decoded, np.int8)
self.assertEqual(decoded, np.int8(-123))
def test_identity_int8_scalar_zero(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.int8(0)))
self.assertIsInstance(decoded, np.int8)
self.assertEqual(decoded, np.int8(0))
def test_identity_int8_scalar_positive(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.int8(127)))
self.assertIsInstance(decoded, np.int8)
self.assertEqual(decoded, np.int8(127))
##############################################################################
# int16
##############################################################################
def test_encode_int16_scalar(self):
"""Ensures that an np.int16 can be stored as bytes."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.int16_values = b'\xfe\xd4'
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.int16(-300)), expected)
def test_decode_int16_scalar(self):
"""Ensures that int16s can be retrieved as np.int16."""
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.int16_values = b'\x07\xd0'
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.int16)
self.assertEqual(decoded, np.int16(2000))
def test_identity_int16_scalar_negative(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.int16(-123)))
self.assertIsInstance(decoded, np.int16)
self.assertEqual(decoded, np.int16(-123))
def test_identity_int16_scalar_zero(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.int16(0)))
self.assertIsInstance(decoded, np.int16)
self.assertEqual(decoded, np.int16(0))
def test_identity_int16_scalar_positive(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.int16(127)))
self.assertIsInstance(decoded, np.int16)
self.assertEqual(decoded, np.int16(127))
##############################################################################
# uint8
##############################################################################
def test_encode_uint8_scalar(self):
"""Ensures that an np.uint8 can be stored as bytes."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.uint8_values = b'\xfb'
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.uint8(251)), expected)
def test_decode_uint8_scalar(self):
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.uint8_values = b'\xed'
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.uint8)
self.assertEqual(decoded, np.uint8(237))
def test_identity_uint8_scalar_zero(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.uint8(0)))
self.assertIsInstance(decoded, np.uint8)
self.assertEqual(decoded, np.uint8(0))
def test_identity_uint8_scalar_positive(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.uint8(255)))
self.assertIsInstance(decoded, np.uint8)
self.assertEqual(decoded, np.uint8(255))
##############################################################################
# uint16
##############################################################################
def test_encode_uint16_scalar(self):
"""Ensures that an np.uint16 can be stored as bytes."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.uint16_values = b'\x03\xe8'
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode(np.uint16(1000)), expected)
def test_decode_uint16_scalar(self):
user_data = storage_pb2.Data()
datum = user_data.datum
datum.values.uint16_values = b'\x0b\xb8'
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertTrue(
np.isscalar(decoded), 'The returned data should be a plain scalar.\n'
f'Actual type: {type(decoded)}\n'
f'user_data: {user_data}\n'
f'decoded: {decoded}')
self.assertIsInstance(decoded, np.uint16)
self.assertEqual(decoded, np.uint16(3000))
def test_identity_uint16_scalar_zero(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.uint16(0)))
self.assertIsInstance(decoded, np.uint16)
self.assertEqual(decoded, np.uint16(0))
def test_identity_uint16_scalar_positive(self):
"""Encoding and decoding it back should not change its value."""
decoded = codec.decode(codec.encode(np.uint16(12345)))
self.assertIsInstance(decoded, np.uint16)
self.assertEqual(decoded, np.uint16(12345))
##############################################################################
#
# Array tests
#
##############################################################################
##############################################################################
# Empty and None values
##############################################################################
def test_encode_empty_list(self):
"""Tests that a Python list of one None element is represented by an Array."""
expected = storage_pb2.Data()
self.assertEqual(codec.encode([]), expected)
def test_encode_none_list(self):
"""Tests that a Python list of one None element is represented by an Array."""
expected = storage_pb2.Data()
expected.array.values.add()
self.assertEqual(codec.encode([None]), expected)
def test_encode_two_none_list(self):
"""Tests that a Python list of one None element is represented by an Array."""
expected = storage_pb2.Data()
expected.array.values.add()
expected.array.values.add()
self.assertEqual(codec.encode([None, None]), expected)
def test_encode_decode_empty_list(self):
"""Tests that an empty Python list becomes None when decoded."""
self.assertIsNone(codec.decode(codec.encode([])), None)
##############################################################################
# float32
##############################################################################
def test_encode_float32_list(self):
"""Tests that a Python list of one element is represented by an Array."""
expected = storage_pb2.Data()
datum = expected.array.values.add().datum
datum.values.float_values.append(np.float32(3.14))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode([np.float32(3.14)]), expected)
def test_decode_float32_list(self):
"""Tests that we get a Python list from a proto Array."""
user_data = storage_pb2.Data()
datum = user_data.array.values.add().datum
datum.values.float_values.append(np.float32(3.14))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertNotEmpty(decoded)
self.assertIsInstance(decoded[0], np.float32)
self.assertListEqual(decoded, [np.float32(3.14)])
def test_encode_float32_nested_list(self):
"""Ensures that [[1.2, 3.4], [5.6, 7.8]] is represented correctly."""
expected = storage_pb2.Data()
array1 = expected.array.values.add().array
datum1 = array1.values.add().datum
datum1.values.float_values.append(np.float32(1.2))
datum1.shape.dim.add().size = -438
datum2 = array1.values.add().datum
datum2.values.float_values.append(np.float32(3.4))
datum2.shape.dim.add().size = -438
array2 = expected.array.values.add().array
datum3 = array2.values.add().datum
datum3.values.float_values.append(np.float32(5.6))
datum3.shape.dim.add().size = -438
datum4 = array2.values.add().datum
datum4.values.float_values.append(np.float32(7.8))
datum4.shape.dim.add().size = -438
self.assertEqual(
codec.encode([[np.float32(1.2), np.float32(3.4)],
[np.float32(5.6), np.float32(7.8)]]), expected)
##############################################################################
# float64
##############################################################################
def test_encode_float64_list(self):
"""Tests that a Python list of one element is represented by an Array."""
expected = storage_pb2.Data()
datum = expected.array.values.add().datum
datum.values.double_values.append(np.float64(6.28))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode([np.float64(6.28)]), expected)
def test_decode_float64_list(self):
"""Tests that we get a Python list from a proto Array."""
user_data = storage_pb2.Data()
datum = user_data.array.values.add().datum
datum.values.double_values.append(np.float64(6.28))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertNotEmpty(decoded)
self.assertIsInstance(decoded[0], np.float64)
self.assertListEqual(decoded, [np.float64(6.28)])
##############################################################################
# int32
##############################################################################
def test_encode_int32_list(self):
"""Tests that a Python list of one element is represented by an Array."""
expected = storage_pb2.Data()
datum = expected.array.values.add().datum
datum.values.int32_values.append(np.int32(-12345))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode([np.int32(-12345)]), expected)
def test_decode_int32_list(self):
"""Tests that a Python list of one element is represented by an Array."""
user_data = storage_pb2.Data()
datum = user_data.array.values.add().datum
datum.values.int32_values.append(np.int32(-12345))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertNotEmpty(decoded)
self.assertIsInstance(decoded[0], np.int32)
self.assertListEqual(decoded, [np.int32(-12345)])
##############################################################################
# int64
##############################################################################
def test_encode_int64_list(self):
"""Tests that a Python list of one element is represented by an Array."""
expected = storage_pb2.Data()
datum = expected.array.values.add().datum
datum.values.int64_values.append(np.int64(-1234567890123456))
datum.shape.dim.add().size = -438
self.assertEqual(codec.encode([np.int64(-1234567890123456)]), expected)
def test_decode_int64_list(self):
"""Tests that a Python list of one element is represented by an Array."""
user_data = storage_pb2.Data()
datum = user_data.array.values.add().datum
datum.values.int64_values.append(np.int64(-1234567890123456))
datum.shape.dim.add().size = -438
decoded = codec.decode(user_data)
self.assertNotEmpty(decoded)
self.assertIsInstance(decoded[0], np.int64)
self.assertListEqual(decoded, [np.int64(-1234567890123456)])
# Homogeneity.
def test_encode_heterogeneous_list(self):
"""Tests that an error is thrown for a list with different types."""
user_data = [np.int64(-1234567890123456), np.int32(1)]
self.assertRaises(TypeError, codec.encode, user_data)
##############################################################################
#
# ndarray tests
#
##############################################################################
def test_encode_one_float_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
a = np.array(1.5, dtype=np.float32)
expected = storage_pb2.Data()
datum = expected.datum
datum.values.float_values_buffer = a.astype('>f').tobytes()
self.assertEqual(codec.encode(a), expected)
def test_encode_one_float_elem_ndarray(self):
"""Ensures that np float32 arrays can be encoded in our proto."""
a = np.array([1.5], dtype=np.float32)
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.float_values_buffer = a.astype('>f').tobytes()
self.assertEqual(codec.encode(a), expected)
def test_identity_one_float_elem_ndarray(self):
"""Ensures that np float32 arrays can be written and read back."""
a = np.array(1.5, dtype=np.float32)
np.testing.assert_equal(codec.decode(codec.encode(a)), a)
def test_decode_one_float_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.float_values.append(0.1512)
np.testing.assert_equal(
codec.decode(user_data), np.array([0.1512], dtype=np.float32))
def test_decode_one_float_elem_ndarray_buffer(self):
"""Tests that we get a Python list from a float32 buffer."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
# 3.141519 in big-endian byte array.
user_data.datum.values.float_values_buffer = b'\x40\x49\x0f\xd0'
decoded = codec.decode(user_data)
self.assertEqual(decoded.dtype, np.float32)
np.testing.assert_equal(decoded, np.array([3.14159], dtype=np.float32))
def test_encode_one_double_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.double_values.append(512.123)
self.assertEqual(
codec.encode(np.array(512.123, dtype=np.float64)), expected)
def test_encode_one_double_elem_ndarray(self):
"""Ensures that np float64 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.double_values.append(512.123)
self.assertEqual(
codec.encode(np.array([512.123], dtype=np.float64)), expected)
def test_decode_one_double_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.double_values.append(0.63661)
np.testing.assert_equal(
codec.decode(user_data), np.array([0.63661], dtype=np.float64))
def test_encode_multiple_double_elem_ndarray(self):
"""Ensures that np float64 multi-element arrays can be encoded."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 2
datum.values.double_values.extend([987.654, 321.098])
self.assertEqual(codec.encode(np.array([987.654, 321.098])), expected)
def test_decode_multiple_double_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 3
user_data.datum.values.double_values.extend([0.74048, 2.09455, 0.69314])
np.testing.assert_equal(
codec.decode(user_data),
np.array([0.74048, 2.09455, 0.69314], dtype=np.float64))
def test_encode_one_int32_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.int32_values.append(415)
self.assertEqual(codec.encode(np.array(415, dtype=np.int32)), expected)
def test_encode_one_int32_elem_ndarray(self):
"""Ensures that np int32 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.int32_values.append(415)
self.assertEqual(codec.encode(np.array([415], dtype=np.int32)), expected)
def test_decode_one_int32_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.int32_values.append(9)
np.testing.assert_equal(
codec.decode(user_data), np.array([9], dtype=np.int32))
def test_encode_one_int64_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.int64_values.append(415)
self.assertEqual(codec.encode(np.array(415, dtype=np.int64)), expected)
def test_encode_one_int64_elem_ndarray(self):
"""Ensures that np int64 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.int64_values.append(415)
self.assertEqual(codec.encode(np.array([415])), expected)
def test_encode_multiple_int64_elem_ndarray(self):
"""Ensures that np int64 multi-element arrays can be encoded."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 2
datum.values.int64_values.extend([123, 456])
self.assertEqual(codec.encode(np.array([123, 456])), expected)
def test_decode_one_int64_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.int64_values.append(9)
np.testing.assert_equal(
codec.decode(user_data), np.array([9], dtype=np.int64))
def test_decode_multiple_int64_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 3
user_data.datum.shape.dim.add().size = 2
user_data.datum.values.int64_values.extend([6, 5, 4, 3, 2, 1])
np.testing.assert_equal(
codec.decode(user_data),
np.array([[6, 5], [4, 3], [2, 1]], dtype=np.int64))
def test_encode_one_uint32_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.uint32_values.append(415)
self.assertEqual(codec.encode(np.array(415, dtype=np.uint32)), expected)
def test_encode_one_uint32_elem_ndarray(self):
"""Ensures that np uint32 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.uint32_values.append(415)
self.assertEqual(codec.encode(np.array([415], dtype=np.uint32)), expected)
def test_decode_one_uint32_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.uint32_values.append(9)
np.testing.assert_equal(
codec.decode(user_data), np.array([9], dtype=np.uint32))
def test_encode_one_uint64_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.uint64_values.append(415)
self.assertEqual(codec.encode(np.array(415, dtype=np.uint64)), expected)
def test_encode_one_uint64_elem_ndarray(self):
"""Ensures that np uint64 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.uint64_values.append(415)
self.assertEqual(codec.encode(np.array([415], dtype=np.uint64)), expected)
def test_decode_one_uint64_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.uint64_values.append(9)
np.testing.assert_equal(
codec.decode(user_data), np.array([9], dtype=np.uint64))
def test_encode_one_bool_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.bool_values.append(True)
self.assertEqual(codec.encode(np.array(True, dtype=bool)), expected)
def test_encode_one_bool_elem_ndarray(self):
"""Ensures that bool arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.bool_values.append(True)
self.assertEqual(codec.encode(np.array([True], dtype=bool)), expected)
def test_decode_one_bool_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.bool_values.append(True)
np.testing.assert_equal(
codec.decode(user_data), np.array([True], dtype=bool))
def test_encode_one_string_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.string_values.append('dream theater')
self.assertEqual(codec.encode(np.array('dream theater')), expected)
def test_encode_one_string_elem_ndarray(self):
"""Ensures that np string arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.string_values.append('rachmaninov')
self.assertEqual(codec.encode(np.array(['rachmaninov'])), expected)
def test_decode_one_string_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.string_values.append('scriabin')
np.testing.assert_equal(codec.decode(user_data), np.array(['scriabin']))
def test_encode_one_bytes_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.bytes_values.append(b'a1b2c3d4e5f6')
self.assertEqual(codec.encode(np.array(b'a1b2c3d4e5f6')), expected)
def test_encode_one_bytes_elem_ndarray(self):
"""Ensures that np bytes arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.bytes_values.append(b'a1b2c3d4e5f6')
self.assertEqual(codec.encode(np.array([b'a1b2c3d4e5f6'])), expected)
def test_decode_one_bytes_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.bytes_values.append(b'6f5e4d3c2b1a')
np.testing.assert_equal(
codec.decode(user_data), np.array([b'6f5e4d3c2b1a']))
def test_encode_one_int_elem_scalar_ndarray(self):
"""Ensures that ndarrays with dtype==object raise an error."""
self.assertRaises(TypeError, codec.encode,
np.array(12345678901234567890, dtype=object))
def test_decode_one_int_elem_ndarray(self):
"""Ensures that non-scalar Datums with dtype==object raise an error."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.bigint_values.append(
b'\000\253T\251\214\353\037\n\322')
self.assertRaises(TypeError, codec.decode, user_data)
def test_encode_one_int8_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.int8_values = b'\x85'
self.assertEqual(codec.encode(np.array(-123, dtype=np.int8)), expected)
def test_encode_one_int8_elem_ndarray(self):
"""Ensures that np int8 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.int8_values = b'\x85'
self.assertEqual(codec.encode(np.array([-123], dtype=np.int8)), expected)
def test_encode_two_int8_elem_ndarray(self):
"""Ensures that np int8 2-element arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 2
datum.values.int8_values = b'\x85\x84'
self.assertEqual(
codec.encode(np.array([-123, -124], dtype=np.int8)), expected)
def test_decode_one_int8_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.int8_values = b'\x91'
decoded = codec.decode(user_data)
self.assertEqual(decoded.dtype, np.int8)
np.testing.assert_equal(decoded, np.array([-111], dtype=np.int8))
def test_decode_two_int8_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 2
user_data.datum.values.int8_values = b'\xa1\xb2'
np.testing.assert_equal(
codec.decode(user_data), np.array([-95, -78], dtype=np.int8))
def test_encode_one_int16_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.int16_values = b'\xfe\xa7'
self.assertEqual(codec.encode(np.array(-345, dtype=np.int16)), expected)
def test_encode_one_int16_elem_ndarray(self):
"""Ensures that np int16 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.int16_values = b'\xfe\xa7'
self.assertEqual(codec.encode(np.array([-345], dtype=np.int16)), expected)
def test_encode_two_int16_elem_ndarray(self):
"""Ensures that np int16 2-element arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 2
datum.values.int16_values = b'\xfe\xa7\xfe\xa6'
self.assertEqual(
codec.encode(np.array([-345, -346], dtype=np.int16)), expected)
def test_decode_one_int16_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.int16_values = b'\xfe\xa7'
decoded = codec.decode(user_data)
self.assertEqual(decoded.dtype, np.int16)
np.testing.assert_equal(decoded, np.array([-345], dtype=np.int16))
def test_decode_two_int16_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 2
user_data.datum.values.int16_values = b'\xa1\xb2\xc3\xd4'
np.testing.assert_equal(
codec.decode(user_data), np.array([-24142, -15404], dtype=np.int16))
def test_encode_one_uint8_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.uint8_values = b'\x7b'
self.assertEqual(codec.encode(np.array(123, dtype=np.uint8)), expected)
def test_encode_one_uint8_elem_ndarray(self):
"""Ensures that np uint8 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.uint8_values = b'\x7b'
self.assertEqual(codec.encode(np.array([123], dtype=np.uint8)), expected)
def test_encode_two_uint8_elem_ndarray(self):
"""Ensures that np uint8 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 2
datum.values.uint8_values = b'\x7b\x7a'
self.assertEqual(
codec.encode(np.array([123, 122], dtype=np.uint8)), expected)
def test_decode_one_uint8_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.uint8_values = b'\xa1'
np.testing.assert_equal(
codec.decode(user_data), np.array([161], dtype=np.uint8))
def test_decode_two_uint8_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 2
user_data.datum.values.uint8_values = b'\xa1\xb2'
np.testing.assert_equal(
codec.decode(user_data), np.array([161, 178], dtype=np.uint8))
def test_encode_one_uint16_elem_scalar_ndarray(self):
"""Ensures that np arrays with shape 0 can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.values.uint16_values = b'\x01Y'
self.assertEqual(codec.encode(np.array(345, dtype=np.uint16)), expected)
def test_encode_one_uint16_elem_ndarray(self):
"""Ensures that np uint16 arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 1
datum.values.uint16_values = b'\x01Y'
self.assertEqual(codec.encode(np.array([345], dtype=np.uint16)), expected)
def test_encode_two_uint16_elem_ndarray(self):
"""Ensures that np uint16 2-element arrays can be encoded in our proto."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 2
datum.values.uint16_values = b'\x01Y\x01X'
self.assertEqual(
codec.encode(np.array([345, 344], dtype=np.uint16)), expected)
def test_decode_one_uint16_elem_ndarray(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 1
user_data.datum.values.uint16_values = b'\xa1\xb2'
np.testing.assert_equal(
codec.decode(user_data), np.array([41394], dtype=np.uint16))
def test_decode_two_uint16_elem_ndarray(self):
user_data = storage_pb2.Data()
user_data.datum.shape.dim.add().size = 2
user_data.datum.values.uint16_values = b'\xa1\xb2\xc3\xd4'
np.testing.assert_equal(
codec.decode(user_data), np.array([41394, 50132], dtype=np.uint16))
# Multi-dimensional arrays.
def test_encode_2d_int64_elem_ndarray(self):
"""A 2D np int64 array should also be reprentable."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 2
datum.shape.dim.add().size = 3
datum.values.int64_values.extend([1, 3, 5, 7, 9, 11])
self.assertEqual(codec.encode(np.array([[1, 3, 5], [7, 9, 11]])), expected)
def test_encode_2d_double_elem_ndarray(self):
"""A 2D np float64 array should also be reprentable."""
expected = storage_pb2.Data()
datum = expected.datum
datum.shape.dim.add().size = 3
datum.shape.dim.add().size = 2
datum.values.double_values.extend([10.0, 8.0, 6.0, 4.0, 2.0, 0.0])
self.assertEqual(
codec.encode(np.array([[10.0, 8.0], [6.0, 4.0], [2.0, 0.0]])), expected)
##############################################################################
#
# Array of np arrays tests
#
##############################################################################
# float64
def test_encode_one_double_elem_ndarray_list(self):
"""A list of one np float64 array should be representable."""
expected = storage_pb2.Data()
datum = expected.array.values.add().datum
datum.shape.dim.add().size = 1
datum.values.double_values.append(3.14)
self.assertEqual(codec.encode([np.array([3.14])]), expected)
def test_encode_multiple_double_elem_ndarray_list(self):
"""A list of one multidimensional np int64 array should be representable."""
expected = storage_pb2.Data()
datum = expected.array.values.add().datum
datum.shape.dim.add().size = 5
datum.values.double_values.extend([0.0, 0.25, 0.5, 0.75, 1.0])
self.assertEqual(
codec.encode([np.array([0.0, 0.25, 0.5, 0.75, 1.0])]), expected)
def test_decode_double_elem_ndarray_list(self):
user_data = storage_pb2.Data()
datum1 = user_data.array.values.add().datum
datum1.shape.dim.add().size = 1
datum1.values.double_values.append(1.2345)
datum2 = user_data.array.values.add().datum
datum2.shape.dim.add().size = 2
datum2.values.double_values.extend([4.567, 8.9011])
datum3 = user_data.array.values.add().datum
datum3.shape.dim.add().size = 3
datum3.shape.dim.add().size = 1
datum3.values.double_values.extend([9.8765, 4.321, -0.12345])
decoded = codec.decode(user_data)
self.assertLen(decoded, 3)
self.assertIsInstance(decoded, list)
np.testing.assert_equal(decoded[0], np.array([1.2345], dtype=np.float64))
np.testing.assert_equal(decoded[1],
np.array([4.567, 8.9011], dtype=np.float64))
np.testing.assert_equal(
decoded[2], np.array([[9.8765], [4.321], [-0.12345]], dtype=np.float64))
# int64
def test_encode_one_int64_elem_ndarray_list(self):
"""A list of one np int64 array should be representable."""
expected = storage_pb2.Data()
datum = expected.array.values.add().datum
datum.shape.dim.add().size = 1
datum.values.int64_values.append(719)
self.assertEqual(codec.encode([np.array([719])]), expected)
def test_encode_multiple_int64_elem_ndarray_list(self):
"""A list of one multidimensional np int64 array should be representable."""
expected = storage_pb2.Data()
datum = expected.array.values.add().datum
datum.shape.dim.add().size = 5
datum.values.int64_values.extend([1, 1, 2, 3, 5])
self.assertEqual(codec.encode([np.array([1, 1, 2, 3, 5])]), expected)
def test_decode_int64_elem_ndarray_list(self):
user_data = storage_pb2.Data()
datum1 = user_data.array.values.add().datum
datum1.shape.dim.add().size = 1
datum1.values.int64_values.append(1000)
datum2 = user_data.array.values.add().datum
datum2.shape.dim.add().size = 2
datum2.values.int64_values.extend([2000, 3000])
datum3 = user_data.array.values.add().datum
datum3.shape.dim.add().size = 3
datum3.shape.dim.add().size = 1
datum3.values.int64_values.extend([4000, 5000, 6000])
decoded = codec.decode(user_data)
self.assertLen(decoded, 3)
self.assertIsInstance(decoded, list)
np.testing.assert_equal(decoded[0], np.array([1000], dtype=np.int64))
np.testing.assert_equal(decoded[1], np.array([2000, 3000], dtype=np.int64))
np.testing.assert_equal(decoded[2],
np.array([[4000], [5000], [6000]], dtype=np.int64))
##############################################################################
#
# Tuple tests
#
##############################################################################
def test_encode_one_double_elem_ndarray_tuple(self):
"""Tuples of np float64 arrays should be representable."""
expected = storage_pb2.Data()
datum = expected.tuple.values.add().datum
datum.shape.dim.add().size = 1
datum.values.double_values.append(-1 / 12)
self.assertEqual(codec.encode((np.array([-1 / 12]),)), expected)
def test_encode_multiple_double_elem_ndarray_tuple(self):
"""Tuples of np float64 arrays should be representable."""
expected = storage_pb2.Data()
datum = expected.tuple.values.add().datum
datum.shape.dim.add().size = 2
datum.values.double_values.extend([6.28, 2.71828])
self.assertEqual(codec.encode((np.array([6.28, 2.71828]),)), expected)
def test_decode_double_elem_ndarray_tuple(self):
"""Once encoded, the proto should be decodeable."""
user_data = storage_pb2.Data()
datum1 = user_data.tuple.values.add().datum
datum1.shape.dim.add().size = 1
datum1.values.double_values.append(1.2345)
datum2 = user_data.tuple.values.add().datum
datum2.shape.dim.add().size = 2
datum2.values.double_values.extend([4.567, 8.9011])
datum3 = user_data.tuple.values.add().datum
datum3.shape.dim.add().size = 3
datum3.shape.dim.add().size = 1
datum3.values.double_values.extend([9.8765, 4.321, -0.12345])
decoded = codec.decode(user_data)
self.assertLen(decoded, 3)
self.assertIsInstance(decoded, tuple)
np.testing.assert_equal(decoded[0], np.array([1.2345], dtype=np.float64))
np.testing.assert_equal(decoded[1],
np.array([4.567, 8.9011], dtype=np.float64))
np.testing.assert_equal(
decoded[2], np.array([[9.8765], [4.321], [-0.12345]], dtype=np.float64))
def test_encode_one_int64_elem_ndarray_tuple(self):
"""Tuples of np int64 arrays should be representable."""
expected = storage_pb2.Data()
datum = expected.tuple.values.add().datum
datum.shape.dim.add().size = 1
datum.values.int64_values.append(1729)
self.assertEqual(codec.encode((np.array([1729]),)), expected)
def test_encode_multiple_int64_elem_ndarray_tuple(self):
"""Tuples of np int64 arrays should be representable."""
expected = storage_pb2.Data()
datum = expected.tuple.values.add().datum
datum.shape.dim.add().size = 6
datum.values.int64_values.extend([2, 3, 5, 7, 9, 11])
self.assertEqual(codec.encode((np.array([2, 3, 5, 7, 9, 11]),)), expected)
def test_decode_int64_elem_ndarray_tuple(self):
user_data = storage_pb2.Data()
datum1 = user_data.tuple.values.add().datum
datum1.shape.dim.add().size = 1
datum1.values.int64_values.append(1000)
datum2 = user_data.tuple.values.add().datum
datum2.shape.dim.add().size = 2
datum2.values.int64_values.extend([2000, 3000])
datum3 = user_data.tuple.values.add().datum
datum3.shape.dim.add().size = 3
datum3.shape.dim.add().size = 1
datum3.values.int64_values.extend([4000, 5000, 6000])
decoded = codec.decode(user_data)
self.assertLen(decoded, 3)
self.assertIsInstance(decoded, tuple)
np.testing.assert_equal(decoded[0], np.array([1000], dtype=np.int64))
np.testing.assert_equal(decoded[1], np.array([2000, 3000], dtype=np.int64))
np.testing.assert_equal(decoded[2],
np.array([[4000], [5000], [6000]], dtype=np.int64))
##############################################################################
#
# Dict tests
#
##############################################################################
def test_encode_int64_elem_ndarray_dict(self):
"""Dict of int64 and of other dicts."""
expected = storage_pb2.Data()
d = expected.dict.values
datum1 = d['good'].datum
datum1.shape.dim.add().size = 1
datum1.values.int64_values.append(1)
datum2 = d['bad'].datum
datum2.shape.dim.add().size = 1
datum2.values.int64_values.append(-1)
# Dict also supports nested dicts.
datum3 = d['nested_dict'].dict.values['cumulants'].datum
datum3.shape.dim.add().size = 2
datum3.values.int64_values.extend([1000, -2])
self.assertEqual(
codec.encode({
'good': np.array([1]),
'bad': np.array([-1]),
'nested_dict': {
'cumulants': np.array([1000, -2])
}
}), expected)
def test_encode_double_elem_ndarray_dict(self):
"""Dicts of np arrays."""
expected = storage_pb2.Data()
d = expected.dict.values
datum1 = d['golden'].datum
datum1.shape.dim.add().size = 1
datum1.values.double_values.append(1.618)
datum2 = d['sqrt2'].datum
datum2.shape.dim.add().size = 1
datum2.values.double_values.append(1.41421)
self.assertEqual(
codec.encode({
'golden': np.array([1.618]),
'sqrt2': np.array([1.41421])
}), expected)
def test_encode_mixed_elem_ndarray_dict(self):
"""Dicts of np arrays of different dtypes."""
expected = storage_pb2.Data()
d = expected.dict.values
datum1 = d['mozart_death'].datum
datum1.shape.dim.add().size = 1
datum1.values.int64_values.append(35)
datum2 = d['sqrt3'].datum
datum2.shape.dim.add().size = 1
datum2.values.double_values.append(1.73205)
self.assertEqual(
codec.encode({
'mozart_death': np.array([35]),
'sqrt3': np.array([1.73205])
}), expected)
def test_decode_dict(self):
user_data = storage_pb2.Data()
datum1 = user_data.dict.values['pi'].datum
datum1.shape.dim.add().size = 1
datum1.values.double_values.append(3.14159265)
datum2 = user_data.dict.values['primes'].datum
datum2.shape.dim.add().size = 5
datum2.values.int64_values.extend([2, 3, 5, 7, 11])
datum3 = user_data.dict.values['negative_squares_doubles'].datum
datum3.shape.dim.add().size = 5
datum3.shape.dim.add().size = 2
datum3.values.int64_values.extend(
[-1, -4, -9, -16, -25, -2, -8, -18, -32, -50])
decoded = codec.decode(user_data)
self.assertIsInstance(decoded, dict)
self.assertIn('pi', decoded)
np.testing.assert_equal(decoded['pi'],
np.array([3.14159265], dtype=np.float64))
self.assertIn('primes', decoded)
np.testing.assert_equal(decoded['primes'],
np.array([2, 3, 5, 7, 11], dtype=np.int64))
self.assertIn('negative_squares_doubles', decoded)
np.testing.assert_equal(
decoded['negative_squares_doubles'],
np.array([[-1, -4], [-9, -16], [-25, -2], [-8, -18], [-32, -50]],
dtype=np.int64))
def test_encode_dict_int_keys(self):
"""Dict with Python int keys."""
expected = storage_pb2.Data()
d = expected.dict.kvs
t1 = d.values.add().tuple
k1 = t1.values.add().datum
k1.shape.dim.add().size = -438
k1.values.bigint_values.append(b'{') # 123 == b'{'
v1 = t1.values.add().datum
v1.shape.dim.add().size = -438
v1.values.int64_values.append(456)
self.assertEqual(codec.encode({123: np.int64(456)}), expected)
def test_decode_dict_int_keys(self):
"""Dict with Python int keys."""
user_data = storage_pb2.Data()
d = user_data.dict.kvs
t1 = d.values.add().tuple
k1 = t1.values.add().datum
k1.shape.dim.add().size = -438
k1.values.bigint_values.append(b'{') # 123 == b'{'
v1 = t1.values.add().datum
v1.shape.dim.add().size = -438
v1.values.int64_values.append(456)
self.assertEqual(codec.decode(user_data), {123: np.int64(456)})
def test_identity_dict_int_keys(self):
"""Dict with Python int keys."""
self.assertEqual(
codec.decode(codec.encode({123: np.int64(456)})), {123: np.int64(456)})
def test_encode_dict_int64_keys(self):
"""Dict with Python int64 keys."""
expected = storage_pb2.Data()
d = expected.dict.kvs
t1 = d.values.add().tuple
k1 = t1.values.add().datum
k1.shape.dim.add().size = -438
k1.values.int64_values.append(np.int64(1729))
v1 = t1.values.add().datum
v1.shape.dim.add().size = -438
v1.values.int32_values.append(12345)
self.assertEqual(codec.encode({np.int64(1729): np.int32(12345)}), expected)
def test_decode_dict_int64_keys(self):
"""Dict with Python int64 keys."""
user_data = storage_pb2.Data()
d = user_data.dict.kvs
t1 = d.values.add().tuple
k1 = t1.values.add().datum
k1.shape.dim.add().size = -438
k1.values.int64_values.append(np.int64(1729))
v1 = t1.values.add().datum
v1.shape.dim.add().size = -438
v1.values.int32_values.append(12345)
self.assertEqual(codec.decode(user_data), {np.int64(1729): np.int32(12345)})
def test_identity_dict_int64_keys(self):
"""Dict with Python int keys."""
self.assertEqual(
codec.decode(codec.encode({np.int64(1729): np.int32(12345)})),
{np.int64(1729): np.int32(12345)})
def test_identity_dict_mixed_keytypes(self):
"""Dict with Python mixed key types."""
data = {123: np.int64(456), np.int64(1729): np.int32(12345), 'hello': True}
self.assertEqual(codec.decode(codec.encode(data)), data)
##############################################################################
#
# Unsupported types tests
#
##############################################################################
@parameterized.named_parameters(
('modules_are_not_supported', np),
('classes_are_not_supported', set),
('functions_are_not_supported', map),
('type_classes_are_not_supported', type(int)),
('sets_are_not_supported', set()),
('complex_numbers_are_not_supported', complex(1, 2)),
)
def test_unsupported_types(self, arg):
"""Ensures that TypeError is raised when an unsupported type is encoded."""
self.assertRaises(TypeError, codec.encode, arg)
if __name__ == '__main__':
absltest.main()
| envlogger-main | envlogger/converters/codec_test.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| envlogger-main | envlogger/converters/__init__.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converters to/from np arrays from/to the storage_pb2 proto storage format.
The encode() method transforms Python values into storage_pb2.Data() objects
that can be serialized to disk or network. Please refer to storage.proto for the
exact proto schema for details.
The decode() method transforms storage_pb2.Data() objects into Python values of
types from the following specific pre-defined list:
- 32-bit floats become np.float32.
- 64-bit floats become np.float64.
- 8-bit integers become np.int8.
- 16-bit integers become np.int16.
- 32-bit integers become np.int32.
- 64-bit integers become np.int64.
- 8-bit unsigned integers become np.uint8.
- 16-bit unsigned integers become np.uint16.
- 32-bit unsigned integers become np.uint32.
- 64-bit unsigned integers become np.uint64.
- arbitrarily long integers become int().
- boolean values become bool().
- string values become str().
- bytes values become bytes().
In particular, values that can be represented by different types in Python will
be cast to the above types. For example:
type(decode(encode(3.14159265))) == np.float64
which means that Python floats are implicitly cast to np.float64. This is true
even though type(np.float64(3.14159265)) != type(3.14159265).
We can also store multidimensional arrays (np.ndarray):
encode(np.array([[1, 2], [3, 4]], dtype=np.int8))
The shape is preserved and the dtype is cast to one of the types mentioned
above.
We can also compose values with lists, tuples and dictionaries. For example:
encode([1, 2, 3, 4])
or even:
encode([np.int8(1), np.int8(2), np.int8(3), np.int8(4)])
Note however that np.ndarrays are MUCH more efficiently stored because all
elements are packed within a single Datum instead of one Data with multiple
Datums, requiring multiple decoding steps. np.ndarrays also enforce that its
elements have the same type, which prevents bugs such as b/156304574. The bottom
line is: store your data as an np.ndarray if you can (e.g. tensors), and use
Python lists for everything else.
NOTE: encoding an empty list, dict or tuple stores nothing:
decode(encode([])) is None == True
Tuples:
encode((1, 2, 3, 4))
And dictionary of string to another value:
encode({'primes': np.array([2, 3, 5, 7, 11], dtype=np.int64)})
"""
import struct
from typing import Any, Dict, List, Optional, Tuple, Union
from envlogger.proto import storage_pb2
import numpy as np
# A type annotation that represents all the possible number types we support.
ScalarNumber = Union[float, int, np.float32, np.float64, np.int32, np.int64,
np.uint32, np.uint64]
# Dimension size reserved for scalars. Please see proto definition.
_SCALAR_DIM_SIZE = -438
# Converters for scalar int8, int16, uint8, uint16 and float32 values stored in
# big-endian format.
int8struct = struct.Struct('>b')
int16struct = struct.Struct('>h')
uint8struct = struct.Struct('>B')
uint16struct = struct.Struct('>H')
float32struct = struct.Struct('>f')
def _python_int_to_bytes(py_int: int) -> bytes:
"""Encodes a vanilla Python integer into bytes.
The output is a signed, big-endian byte string with as many bytes as needed to
encode it without losing precision.
NOTE: Only signed integers are accepted.
Args:
py_int: The integer to be encoded.
Returns:
The integer represented as bytes.
"""
# Calculate the number of bytes needed to encode this _signed_ integer.
# For example, to encode int(127) we need 7 // 8 + 1 == 1 byte. This is the
# same as what we need to encode int(-127). However, to encode int(128) or
# int(-128) we actually need 2 bytes.
num_bytes_needed = py_int.bit_length() // 8 + 1
return py_int.to_bytes(num_bytes_needed, byteorder='big', signed=True)
def _set_datum_values_from_scalar(scalar: Union[ScalarNumber, bool, str, bytes],
datum: storage_pb2.Datum) -> bool:
"""Populates `datum` using `scalar` in a best effort way.
Notice that unrecognized scalar datum will be ignored.
Args:
scalar: The source of the data.
datum: The destination of the copy.
Returns:
True if the population was successful, False otherwise.
"""
values = datum.values
shape = datum.shape
if isinstance(scalar, str):
values.string_values.append(scalar)
shape.dim.add().size = _SCALAR_DIM_SIZE
return True
if isinstance(scalar, bytes):
values.bytes_values.append(scalar)
shape.dim.add().size = _SCALAR_DIM_SIZE
return True
try:
fdtype = np.finfo(scalar).dtype
if fdtype == np.float32:
values.float_values.append(scalar)
shape.dim.add().size = _SCALAR_DIM_SIZE
return True
if fdtype == np.float64:
values.double_values.append(scalar)
shape.dim.add().size = _SCALAR_DIM_SIZE
return True
except ValueError:
pass
try:
# Vanilla Python ints.
if isinstance(scalar, int) and not isinstance(scalar, bool):
values.bigint_values.append(_python_int_to_bytes(scalar))
shape.dim.add().size = _SCALAR_DIM_SIZE
return True
# Numpy ints.
idtype = np.iinfo(scalar).dtype
if idtype == np.int8:
values.int8_values = int8struct.pack(scalar)
shape.dim.add().size = _SCALAR_DIM_SIZE
return True
if idtype == np.int16:
values.int16_values = int16struct.pack(scalar)
shape.dim.add().size = _SCALAR_DIM_SIZE
return True
if idtype == np.int32:
values.int32_values.append(scalar)
shape.dim.add().size = _SCALAR_DIM_SIZE
return True
if idtype == np.int64:
values.int64_values.append(scalar)
shape.dim.add().size = _SCALAR_DIM_SIZE
return True
if idtype == np.uint8:
values.uint8_values = uint8struct.pack(scalar)
shape.dim.add().size = _SCALAR_DIM_SIZE
return True
if idtype == np.uint16:
values.uint16_values = uint16struct.pack(scalar)
shape.dim.add().size = _SCALAR_DIM_SIZE
return True
if idtype == np.uint32:
values.uint32_values.append(scalar)
shape.dim.add().size = _SCALAR_DIM_SIZE
return True
if idtype == np.uint64:
values.uint64_values.append(scalar)
shape.dim.add().size = _SCALAR_DIM_SIZE
return True
except ValueError:
pass
if isinstance(scalar, bool) or isinstance(scalar, np.bool_):
values.bool_values.append(bool(scalar))
shape.dim.add().size = _SCALAR_DIM_SIZE
return True
return False
def _set_datum_values_from_array(array: np.ndarray,
values: storage_pb2.Datum.Values) -> None:
"""Populates `values` from entries in `array`.
Args:
array: The source of the data.
values: The destination of the copy.
"""
if array.dtype == np.float32:
setattr(values, 'float_values_buffer', array.astype('>f').tobytes())
return
for vs, dtype, cast_type in [
(values.double_values, np.float64, np.float64),
(values.int32_values, np.int32, np.int32),
(values.int64_values, np.int64, np.int64),
(values.uint32_values, np.uint32, np.uint32),
(values.uint64_values, np.uint64, np.uint64),
(values.bool_values, np.bool_, bool),
(values.string_values, np.unicode_, np.unicode_),
(values.bytes_values, np.bytes_, np.bytes_),
]:
if np.issubdtype(array.dtype, dtype):
for x in array.flatten():
vs.append(cast_type(x))
return
for key, dtype, cast_type in [('int8_values', np.int8, '>b'),
('int16_values', np.int16, '>h'),
('uint8_values', np.uint8, '>B'),
('uint16_values', np.uint16, '>H')]:
if np.issubdtype(array.dtype, dtype):
setattr(values, key, array.astype(cast_type).tobytes())
return
raise TypeError(f'Unsupported `array.dtype`: {array.dtype}')
def encode(
user_data: Union[np.ndarray, List[Any], Tuple[Any, ...], Dict[str, Any]]
) -> storage_pb2.Data:
"""Converts common Python data objects to storage_pb2.Data() proto.
This function converts numpy arrays, lists of numpy arrays, tuples of numpy
arrays, dicts of numpy arrays and their nested versions (e.g. lists of lists
of numpy arrays) to a proto format that can be written to disk.
NOTE: When converting numpy arrays of strings or bytes, ensure that its dtype
is `object` to ensure that no wrong conversions will occur.
Usage:
# A bare numpy array.
proto_data = encode(np.ones((3, 4), dtype=np.int64))
# A list of numpy arrays.
proto_data = encode([np.ones((5.5, 4.4), np.array([1.1, 2.2])],
dtype=np.float64))
# Please see the unit test for examples of other data types.
Args:
user_data: The python data to convert to proto.
Returns:
A storage_pb2.Data properly filled.
Raises:
TypeError: This error is raised in two different situations:
1. An unsupported type is passed. We support only a subset of all python
types. Things like functions, classes and even sets() are not supported.
2. A heterogeneous list is passed. For compatibility with other
programming languages, our definition of a list is narrower than
Python's and we enforce that all elements in the list have the exact
same type. We do not support something like [123, 'hello', True].
"""
output = storage_pb2.Data()
if user_data is None:
return output
datum = output.datum
if isinstance(user_data, list):
type_x = None
for index, x in enumerate(user_data):
# Ensure that all elements have the same type.
# This is intentionally verbose so that we can provide a useful message
# when an exception is raised.
if type_x is None:
type_x = type(x)
elif not isinstance(x, type_x):
raise TypeError(
'We assume list is homogeneous, i.e., data are of the same type.'
f' Expecting value of type {type_x} (type of the first element).'
f' Got {x} of type {type(x)}, index: {index},'
f' Whole list: {user_data}')
# Copy each element to the array.
output.array.values.add().CopyFrom(encode(x))
return output
if isinstance(user_data, tuple):
for x in user_data:
output.tuple.values.add().CopyFrom(encode(x))
return output
if isinstance(user_data, dict):
for k, v in user_data.items():
if isinstance(k, str):
output.dict.values[k].CopyFrom(encode(v))
else:
t = output.dict.kvs.values.add().tuple
t.values.add().CopyFrom(encode(k))
t.values.add().CopyFrom(encode(v))
return output
if isinstance(user_data, np.ndarray):
pass # The "base" ndarray case.
else: # Try to encode scalars.
if _set_datum_values_from_scalar(user_data, datum):
return output
raise TypeError(f'Unsupported data type: {type(user_data)}')
# Set shape.
for dim in user_data.shape:
if dim > 0:
proto_dim = datum.shape.dim.add()
proto_dim.size = dim
# Copy values.
_set_datum_values_from_array(user_data, datum.values)
return output
def decode_datum(
datum: storage_pb2.Datum
) -> Union[np.ndarray, ScalarNumber, bool, str, bytes]:
"""Creates a numpy array or scalar from a Datum protobuf.
Args:
datum: The source data.
Returns:
A Python object ready to be consumed.
"""
# Adjust shape.
shape = [dim.size for dim in datum.shape.dim]
is_scalar = len(shape) == 1 and shape[0] == _SCALAR_DIM_SIZE
array = None
values = datum.values
# Normal values.
for vs, dtype in [(values.float_values, np.float32),
(values.double_values, np.float64),
(values.int32_values, np.int32),
(values.int64_values, np.int64),
(values.uint32_values, np.uint32),
(values.uint64_values, np.uint64),
(values.bool_values, bool)]:
if vs:
if is_scalar:
return dtype(vs[0])
array = np.array(vs, dtype=dtype)
break
# Values packed in bytes.
for vs, converter, dtype, dtype_code in [
(values.float_values_buffer, float32struct, np.float32, '>f'),
(values.int8_values, int8struct, np.int8, '>b'),
(values.int16_values, int16struct, np.int16, '>h'),
(values.uint8_values, uint8struct, np.uint8, '>B'),
(values.uint16_values, uint16struct, np.uint16, '>H'),
]:
if vs:
if is_scalar:
return dtype(converter.unpack(vs)[0])
array = np.frombuffer(vs, dtype=dtype_code).astype(dtype)
if values.string_values:
if is_scalar:
return values.string_values[0]
array = np.array(list(values.string_values), dtype=object)
elif values.bytes_values:
if is_scalar:
return values.bytes_values[0]
array = np.array(list(values.bytes_values), dtype=object)
elif values.bigint_values:
def from_bigint(int_bytes):
return int.from_bytes(int_bytes, byteorder='big', signed=True)
if is_scalar:
return from_bigint(values.bigint_values[0])
raise TypeError(
f'Unsupported Datum of arbitrarily big ints: {values.bigint_values}')
if array is None:
return None
return np.reshape(array, shape)
def decode(
user_data: storage_pb2.Data,
) -> Optional[
Union[
ScalarNumber,
bool,
str,
bytes,
np.ndarray,
List[Any],
Tuple[Any, ...],
Dict[Any, Any],
]
]:
"""Converts from storage_pb2.Data to common Python data objects.
This function converts a storage_pb2.Data protobuf to numpy arrays, lists of
numpy arrays, tuples of numpy arrays, dicts of numpy arrays and their nested
versions (e.g. lists of lists of numpy arrays).
For usage examples, please see the unit tests for this function.
NOTE: `string_values` and `bytes_values` will both use numpy's dtype ==
`object`. This is to avoid wrong conversions and unintended narrowing.
Args:
user_data: The protobuf data to convert to Python data objects.
Returns:
A Python object of numpy arrays.
"""
if user_data is None:
return None
# The type of the wrapped protocol buffer is different and consequently
# with the existing versions of the dependencies, accessing the (map)
# fields are triggering a check failure.
if not isinstance(user_data, storage_pb2.Data):
s = user_data.SerializeToString()
user_data = storage_pb2.Data()
user_data.ParseFromString(s)
if user_data.HasField('datum'):
return decode_datum(user_data.datum)
if user_data.HasField('array'):
return [decode(x) for x in user_data.array.values]
if user_data.HasField('tuple'):
return tuple((decode(x) for x in user_data.tuple.values))
if user_data.HasField('dict'):
string_dict: dict[Any, Any] = {
k: decode(x) for k, x in user_data.dict.values.items()
}
kvs_dict: dict[Any, Any] = {
decode(t.tuple.values[0]): decode(t.tuple.values[1])
for t in user_data.dict.kvs.values
}
string_dict.update(kvs_dict)
return string_dict
return None
| envlogger-main | envlogger/converters/codec.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Catch reinforcement learning environment."""
import dm_env
from dm_env import specs
import numpy as np
_ACTIONS = (-1, 0, 1) # Left, no-op, right.
class Catch(dm_env.Environment):
"""A Catch environment built on the `dm_env.Environment` class.
The agent must move a paddle to intercept falling balls. Falling balls only
move downwards on the column they are in.
The observation is an array shape (rows, columns), with binary values:
zero if a space is empty; 1 if it contains the paddle or a ball.
The actions are discrete, and by default there are three available:
stay, move left, and move right.
The episode terminates when the ball reaches the bottom of the screen.
"""
def __init__(self, rows=10, columns=5, seed=1):
"""Initializes a new Catch environment.
Args:
rows: number of rows.
columns: number of columns.
seed: random seed for the RNG.
"""
self._rows = rows
self._columns = columns
self._rng = np.random.RandomState(seed)
self._board = np.zeros((rows, columns), dtype=np.float32)
self._ball_x = None
self._ball_y = None
self._paddle_x = None
self._paddle_y = self._rows - 1
self._reset_next_step = True
def reset(self):
"""Returns the first `TimeStep` of a new episode."""
self._reset_next_step = False
self._ball_x = self._rng.randint(self._columns)
self._ball_y = 0
self._paddle_x = self._columns // 2
return dm_env.restart(self._observation())
def step(self, action):
"""Updates the environment according to the action."""
if self._reset_next_step:
return self.reset()
# Move the paddle.
dx = _ACTIONS[action]
self._paddle_x = np.clip(self._paddle_x + dx, 0, self._columns - 1)
# Drop the ball.
self._ball_y += 1
# Check for termination.
if self._ball_y == self._paddle_y:
reward = 1. if self._paddle_x == self._ball_x else -1.
self._reset_next_step = True
return dm_env.termination(reward=reward, observation=self._observation())
else:
return dm_env.transition(reward=0., observation=self._observation())
def observation_spec(self):
"""Returns the observation spec."""
return specs.BoundedArray(shape=self._board.shape, dtype=self._board.dtype,
name="board", minimum=0, maximum=1)
def action_spec(self):
"""Returns the action spec."""
return specs.DiscreteArray(
dtype=int, num_values=len(_ACTIONS), name="action")
def _observation(self):
self._board.fill(0.)
self._board[self._ball_y, self._ball_x] = 1.
self._board[self._paddle_y, self._paddle_x] = 1.
return self._board.copy()
| envlogger-main | envlogger/testing/catch_env.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| envlogger-main | envlogger/testing/__init__.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple binary to run catch for a while and record its trajectories.
"""
import time
from absl import app
from absl import flags
from absl import logging
import envlogger
from envlogger.testing import catch_env
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_episodes', 1000, 'Number of episodes to log.')
flags.DEFINE_string('trajectories_dir', '/tmp/catch_data/',
'Path in a filesystem to record trajectories.')
def main(unused_argv):
logging.info('Creating Catch environment...')
env = catch_env.Catch()
logging.info('Done creating Catch environment.')
def step_fn(unused_timestep, unused_action, unused_env):
return {'timestamp': time.time()}
logging.info('Wrapping environment with EnvironmentLogger...')
with envlogger.EnvLogger(
env,
data_directory=FLAGS.trajectories_dir,
max_episodes_per_file=1000,
metadata={
'agent_type': 'random',
'env_type': type(env).__name__,
'num_episodes': FLAGS.num_episodes,
},
step_fn=step_fn) as env:
logging.info('Done wrapping environment with EnvironmentLogger.')
logging.info('Training a random agent for %r episodes...',
FLAGS.num_episodes)
for i in range(FLAGS.num_episodes):
logging.info('episode %r', i)
timestep = env.reset()
while not timestep.last():
action = np.random.randint(low=0, high=3)
timestep = env.step(action)
logging.info('Done training a random agent for %r episodes.',
FLAGS.num_episodes)
if __name__ == '__main__':
app.run(main)
| envlogger-main | envlogger/examples/random_agent_catch.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| envlogger-main | envlogger/examples/__init__.py |
# coding=utf-8
# Copyright 2023 DeepMind Technologies Limited..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple binary to run catch for a while and record its trajectories.
"""
import time
from absl import app
from absl import flags
from absl import logging
import envlogger
from envlogger.backends import tfds_backend_writer
from envlogger.testing import catch_env
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_episodes', 1000, 'Number of episodes to log.')
flags.DEFINE_string('trajectories_dir', '/tmp/catch_data/',
'Path in a filesystem to record trajectories.')
def main(unused_argv):
logging.info('Creating Catch environment...')
env = catch_env.Catch()
logging.info('Done creating Catch environment.')
def step_fn(unused_timestep, unused_action, unused_env):
return {'timestamp': time.time()}
dataset_config = tfds.rlds.rlds_base.DatasetConfig(
name='catch_example',
observation_info=tfds.features.Tensor(
shape=(10, 5), dtype=tf.float32,
encoding=tfds.features.Encoding.ZLIB),
action_info=tf.int64,
reward_info=tf.float64,
discount_info=tf.float64,
step_metadata_info={'timestamp': tf.float32})
logging.info('Wrapping environment with EnvironmentLogger...')
with envlogger.EnvLogger(
env,
step_fn=step_fn,
backend = tfds_backend_writer.TFDSBackendWriter(
data_directory=FLAGS.trajectories_dir,
split_name='train',
max_episodes_per_file=500,
ds_config=dataset_config),
) as env:
logging.info('Done wrapping environment with EnvironmentLogger.')
logging.info('Training a random agent for %r episodes...',
FLAGS.num_episodes)
for i in range(FLAGS.num_episodes):
logging.info('episode %r', i)
timestep = env.reset()
while not timestep.last():
action = np.random.randint(low=0, high=3)
timestep = env.step(action)
logging.info('Done training a random agent for %r episodes.',
FLAGS.num_episodes)
if __name__ == '__main__':
app.run(main)
| envlogger-main | envlogger/examples/tfds_random_agent_catch.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script for setuptools."""
import os
from setuptools import find_namespace_packages
from setuptools import setup
_CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
def _get_version():
with open(os.path.join(_CURRENT_DIR, 'optax', '__init__.py')) as fp:
for line in fp:
if line.startswith('__version__') and '=' in line:
version = line[line.find('=') + 1:].strip(' \'"\n')
if version:
return version
raise ValueError('`__version__` not defined in `optax/__init__.py`')
def _parse_requirements(path):
with open(os.path.join(_CURRENT_DIR, path)) as f:
return [
line.rstrip()
for line in f
if not (line.isspace() or line.startswith('#'))
]
setup(
name='optax',
version=_get_version(),
url='https://github.com/deepmind/optax',
license='Apache 2.0',
author='DeepMind',
description=('A gradient processing and optimisation library in JAX.'),
long_description=open(os.path.join(_CURRENT_DIR, 'README.md')).read(),
long_description_content_type='text/markdown',
author_email='[email protected]',
keywords='reinforcement-learning python machine learning',
packages=find_namespace_packages(exclude=['*_test.py']),
install_requires=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements.txt')),
tests_require=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements-test.txt')),
zip_safe=False, # Required for full installation.
python_requires='>=3.9',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| optax-master | setup.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration file for the Sphinx documentation builder."""
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# pylint: disable=g-bad-import-order
# pylint: disable=g-import-not-at-top
import inspect
import os
import sys
import typing
# The following typenames are re-written for public-facing type annotations.
TYPE_REWRITES = [
('~optax._src.base.GradientTransformation', 'optax.GradientTransformation'),
('~optax._src.base.Params', 'optax.Params'),
('~optax._src.base.Updates', 'optax.Updates'),
('~optax._src.base.OptState', 'optax.OptState'),
('base.GradientTransformation', 'optax.GradientTransformation'),
('base.Params', 'optax.Params'),
('base.Updates', 'optax.Updates'),
('base.OptState', 'optax.OptState'),
]
def _add_annotations_import(path):
"""Appends a future annotations import to the file at the given path."""
with open(path) as f:
contents = f.read()
if contents.startswith('from __future__ import annotations'):
# If we run sphinx multiple times then we will append the future import
# multiple times too.
return
assert contents.startswith('#'), (path, contents.split('\n')[0])
with open(path, 'w') as f:
# NOTE: This is subtle and not unit tested, we're prefixing the first line
# in each Python file with this future import. It is important to prefix
# not insert a newline such that source code locations are accurate (we link
# to GitHub). The assertion above ensures that the first line in the file is
# a comment so it is safe to prefix it.
f.write('from __future__ import annotations ')
f.write(contents)
def _recursive_add_annotations_import():
for path, _, files in os.walk('../optax/'):
for file in files:
if file.endswith('.py'):
_add_annotations_import(os.path.abspath(os.path.join(path, file)))
def _monkey_patch_doc_strings():
"""Rewrite function signatures to match the public API.
This is a bit of a dirty hack, but it helps ensure that the public-facing
docs have the correct type names and crosslinks.
Since all optax code lives in a `_src` directory, and since all function
annotations use types within that private directory, the public facing
annotations are given relative to private paths.
This means that the normal documentation generation process does not give
the correct import paths, and the paths it does give cannot cross link to
other parts of the documentation.
Do we really need to use the _src structure for optax?
Note, class members are not fixed by this patch, only function
parameters. We should find a way to genearlize this solution.
"""
import sphinx_autodoc_typehints
original_process_docstring = sphinx_autodoc_typehints.process_docstring
def new_process_docstring(app, what, name, obj, options, lines):
result = original_process_docstring(app, what, name, obj, options, lines)
for i in range(len(lines)):
l = lines[i]
for before, after in TYPE_REWRITES:
l = l.replace(before, after)
lines[i] = l
return result
sphinx_autodoc_typehints.process_docstring = new_process_docstring
if 'READTHEDOCS' in os.environ:
_recursive_add_annotations_import()
_monkey_patch_doc_strings()
# TODO(b/254461517) Remove the annotation filtering when we drop Python 3.8
# support.
# We remove `None` type annotations as this breaks Sphinx under Python 3.7 and
# 3.8 with error `AssertionError: Invalid annotation [...] None is not a class.`
filter_nones = lambda x: dict((k, v) for k, v in x.items() if v is not None)
typing.get_type_hints = lambda obj, *unused: filter_nones(obj.__annotations__)
sys.path.insert(0, os.path.abspath('../'))
sys.path.append(os.path.abspath('ext'))
import optax
from sphinxcontrib import katex
# -- Project information -----------------------------------------------------
project = 'Optax'
copyright = '2021, DeepMind' # pylint: disable=redefined-builtin
author = 'Optax Contributors'
# -- General configuration ---------------------------------------------------
master_doc = 'index'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx.ext.napoleon',
'sphinxcontrib.bibtex',
'sphinxcontrib.katex',
'sphinx_autodoc_typehints',
'sphinx_book_theme',
'coverage_check',
'myst_nb', # This is used for the .ipynb notebooks
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for autodoc -----------------------------------------------------
autodoc_default_options = {
'member-order': 'bysource',
'special-members': True,
'exclude-members': '__repr__, __str__, __weakref__',
}
# -- Options for bibtex ------------------------------------------------------
bibtex_bibfiles = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_book_theme'
html_theme_options = {
'logo_only': True,
'show_toc_level': 2,
}
html_logo = 'images/logo.svg'
html_favicon = 'images/favicon.svg'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for myst -------------------------------------------------------
jupyter_execute_notebooks = 'force'
execution_allow_errors = False
# -- Options for katex ------------------------------------------------------
# See: https://sphinxcontrib-katex.readthedocs.io/en/0.4.1/macros.html
latex_macros = r"""
\def \d #1{\operatorname{#1}}
"""
# Translate LaTeX macros to KaTeX and add to options for HTML builder
katex_macros = katex.latex_defs_to_katex_macros(latex_macros)
katex_options = '{displayMode: true, fleqn: true, macros: {' + katex_macros + '}}'
# Add LaTeX macros for LATEX builder
latex_elements = {'preamble': latex_macros}
# -- Source code links -------------------------------------------------------
def linkcode_resolve(domain, info):
"""Resolve a GitHub URL corresponding to Python object."""
if domain != 'py':
return None
try:
mod = sys.modules[info['module']]
except ImportError:
return None
obj = mod
try:
for attr in info['fullname'].split('.'):
obj = getattr(obj, attr)
except AttributeError:
return None
else:
obj = inspect.unwrap(obj)
try:
filename = inspect.getsourcefile(obj)
except TypeError:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except OSError:
return None
# TODO(slebedev): support tags after we release an initial version.
return 'https://github.com/deepmind/optax/tree/master/optax/%s#L%d#L%d' % (
os.path.relpath(filename, start=os.path.dirname(
optax.__file__)), lineno, lineno + len(source) - 1)
# -- Intersphinx configuration -----------------------------------------------
intersphinx_mapping = {
'jax': ('https://jax.readthedocs.io/en/latest/', None),
}
source_suffix = ['.rst', '.md', '.ipynb']
| optax-master | docs/conf.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Asserts all public symbols are covered in the docs."""
from typing import Any, Mapping
import optax
from optax._src import test_utils
from sphinx import application
from sphinx import builders
from sphinx import errors
def optax_public_symbols():
names = set()
for module_name, module in test_utils.find_internal_python_modules(optax):
for name in module.__all__:
names.add(module_name + "." + name)
return names
class OptaxCoverageCheck(builders.Builder):
"""Builder that checks all public symbols are included."""
name = "coverage_check"
def get_outdated_docs(self) -> str:
return "coverage_check"
def write(self, *ignored: Any) -> None:
pass
def finish(self) -> None:
documented_objects = frozenset(self.env.domaindata["py"]["objects"])
undocumented_objects = set(optax_public_symbols()) - documented_objects
if undocumented_objects:
undocumented_objects = tuple(sorted(undocumented_objects))
raise errors.SphinxError(
"All public symbols must be included in our documentation, did you "
"forget to add an entry to `api.rst`?\n"
f"Undocumented symbols: {undocumented_objects}")
def setup(app: application.Sphinx) -> Mapping[str, Any]:
app.add_builder(OptaxCoverageCheck)
return dict(version=optax.__version__, parallel_read_safe=True)
| optax-master | docs/ext/coverage_check.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optax: composable gradient processing and optimization, in JAX."""
from optax import contrib
from optax import experimental
from optax._src.alias import adabelief
from optax._src.alias import adafactor
from optax._src.alias import adagrad
from optax._src.alias import adam
from optax._src.alias import adamax
from optax._src.alias import adamaxw
from optax._src.alias import adamw
from optax._src.alias import amsgrad
from optax._src.alias import dpsgd
from optax._src.alias import fromage
from optax._src.alias import lamb
from optax._src.alias import lars
from optax._src.alias import lion
from optax._src.alias import MaskOrFn
from optax._src.alias import noisy_sgd
from optax._src.alias import novograd
from optax._src.alias import optimistic_gradient_descent
from optax._src.alias import radam
from optax._src.alias import rmsprop
from optax._src.alias import ScalarOrSchedule
from optax._src.alias import sgd
from optax._src.alias import sm3
from optax._src.alias import yogi
from optax._src.base import EmptyState
from optax._src.base import GradientTransformation
from optax._src.base import GradientTransformationExtraArgs
from optax._src.base import identity
from optax._src.base import OptState
from optax._src.base import Params
from optax._src.base import Schedule
from optax._src.base import set_to_zero
from optax._src.base import stateless
from optax._src.base import stateless_with_tree_map
from optax._src.base import TransformInitFn
from optax._src.base import TransformUpdateExtraArgsFn
from optax._src.base import TransformUpdateFn
from optax._src.base import Updates
from optax._src.base import with_extra_args_support
from optax._src.clipping import adaptive_grad_clip
from optax._src.clipping import AdaptiveGradClipState
from optax._src.clipping import clip
from optax._src.clipping import clip_by_block_rms
from optax._src.clipping import clip_by_global_norm
from optax._src.clipping import ClipByGlobalNormState
from optax._src.clipping import ClipState
from optax._src.clipping import per_example_global_norm_clip
from optax._src.combine import chain
from optax._src.combine import multi_transform
from optax._src.combine import MultiTransformState
from optax._src.constrain import keep_params_nonnegative
from optax._src.constrain import NonNegativeParamsState
from optax._src.constrain import zero_nans
from optax._src.constrain import ZeroNansState
from optax._src.control_variates import control_delta_method
from optax._src.control_variates import control_variates_jacobians
from optax._src.control_variates import moving_avg_baseline
from optax._src.factorized import FactoredState
from optax._src.factorized import scale_by_factored_rms
from optax._src.linear_algebra import global_norm
from optax._src.linear_algebra import matrix_inverse_pth_root
from optax._src.linear_algebra import power_iteration
from optax._src.lookahead import lookahead
from optax._src.lookahead import LookaheadParams
from optax._src.lookahead import LookaheadState
from optax._src.loss import convex_kl_divergence
from optax._src.loss import cosine_distance
from optax._src.loss import cosine_similarity
from optax._src.loss import ctc_loss
from optax._src.loss import ctc_loss_with_forward_probs
from optax._src.loss import hinge_loss
from optax._src.loss import huber_loss
from optax._src.loss import kl_divergence
from optax._src.loss import l2_loss
from optax._src.loss import log_cosh
from optax._src.loss import sigmoid_binary_cross_entropy
from optax._src.loss import smooth_labels
from optax._src.loss import softmax_cross_entropy
from optax._src.loss import softmax_cross_entropy_with_integer_labels
from optax._src.loss import squared_error
from optax._src.numerics import safe_int32_increment
from optax._src.numerics import safe_norm
from optax._src.numerics import safe_root_mean_squares
from optax._src.privacy import differentially_private_aggregate
from optax._src.privacy import DifferentiallyPrivateAggregateState
from optax._src.schedule import constant_schedule
from optax._src.schedule import cosine_decay_schedule
from optax._src.schedule import cosine_onecycle_schedule
from optax._src.schedule import exponential_decay
from optax._src.schedule import inject_hyperparams
from optax._src.schedule import InjectHyperparamsState
from optax._src.schedule import join_schedules
from optax._src.schedule import linear_onecycle_schedule
from optax._src.schedule import linear_schedule
from optax._src.schedule import piecewise_constant_schedule
from optax._src.schedule import piecewise_interpolate_schedule
from optax._src.schedule import polynomial_schedule
from optax._src.schedule import sgdr_schedule
from optax._src.schedule import warmup_cosine_decay_schedule
from optax._src.schedule import warmup_exponential_decay_schedule
from optax._src.second_order import fisher_diag
from optax._src.second_order import hessian_diag
from optax._src.second_order import hvp
from optax._src.state_utils import tree_map_params
from optax._src.stochastic_gradient_estimators import measure_valued_jacobians
from optax._src.stochastic_gradient_estimators import pathwise_jacobians
from optax._src.stochastic_gradient_estimators import score_function_jacobians
from optax._src.transform import add_decayed_weights
from optax._src.transform import add_noise
from optax._src.transform import AddDecayedWeightsState
from optax._src.transform import AddNoiseState
from optax._src.transform import apply_every
from optax._src.transform import ApplyEvery
from optax._src.transform import bias_correction
from optax._src.transform import centralize
from optax._src.transform import ema
from optax._src.transform import EmaState
from optax._src.transform import scale
from optax._src.transform import scale_by_adam
from optax._src.transform import scale_by_adamax
from optax._src.transform import scale_by_amsgrad
from optax._src.transform import scale_by_belief
from optax._src.transform import scale_by_distance_over_gradients
from optax._src.transform import scale_by_lion
from optax._src.transform import scale_by_novograd
from optax._src.transform import scale_by_optimistic_gradient
from optax._src.transform import scale_by_param_block_norm
from optax._src.transform import scale_by_param_block_rms
from optax._src.transform import scale_by_radam
from optax._src.transform import scale_by_rms
from optax._src.transform import scale_by_rss
from optax._src.transform import scale_by_schedule
from optax._src.transform import scale_by_sm3
from optax._src.transform import scale_by_stddev
from optax._src.transform import scale_by_trust_ratio
from optax._src.transform import scale_by_yogi
from optax._src.transform import ScaleByAdamState
from optax._src.transform import ScaleByAmsgradState
from optax._src.transform import ScaleByBeliefState
from optax._src.transform import ScaleByLionState
from optax._src.transform import ScaleByNovogradState
from optax._src.transform import ScaleByRmsState
from optax._src.transform import ScaleByRssState
from optax._src.transform import ScaleByRStdDevState
from optax._src.transform import ScaleByScheduleState
from optax._src.transform import ScaleBySM3State
from optax._src.transform import ScaleByTrustRatioState
from optax._src.transform import ScaleState
from optax._src.transform import trace
from optax._src.transform import TraceState
from optax._src.transform import update_infinity_moment
from optax._src.transform import update_moment
from optax._src.transform import update_moment_per_elem_norm
from optax._src.update import apply_updates
from optax._src.update import incremental_update
from optax._src.update import periodic_update
from optax._src.utils import multi_normal
from optax._src.utils import scale_gradient
from optax._src.wrappers import apply_if_finite
from optax._src.wrappers import ApplyIfFiniteState
from optax._src.wrappers import flatten
from optax._src.wrappers import masked
from optax._src.wrappers import MaskedNode
from optax._src.wrappers import MaskedState
from optax._src.wrappers import maybe_update
from optax._src.wrappers import MaybeUpdateState
from optax._src.wrappers import MultiSteps
from optax._src.wrappers import MultiStepsState
from optax._src.wrappers import ShouldSkipUpdateFunction
from optax._src.wrappers import skip_large_updates
from optax._src.wrappers import skip_not_finite
__version__ = "0.1.8.dev"
__all__ = (
"adabelief",
"adafactor",
"adagrad",
"adam",
"adamax",
"adamaxw",
"adamw",
"adaptive_grad_clip",
"AdaptiveGradClipState",
"add_decayed_weights",
"add_noise",
"AddDecayedWeightsState",
"AddNoiseState",
"amsgrad",
"apply_every",
"apply_if_finite",
"apply_updates",
"ApplyEvery",
"ApplyIfFiniteState",
"centralize",
"chain",
"clip_by_block_rms",
"clip_by_global_norm",
"clip",
"ClipByGlobalNormState",
"ClipState",
"constant_schedule",
"ctc_loss",
"ctc_loss_with_forward_probs",
"control_delta_method",
"control_variates_jacobians",
"convex_kl_divergence",
"cosine_decay_schedule",
"cosine_distance",
"cosine_onecycle_schedule",
"cosine_similarity",
"differentially_private_aggregate",
"DifferentiallyPrivateAggregateState",
"dpsgd",
"ema",
"EmaState",
"EmptyState",
"exponential_decay",
"FactoredState",
"fisher_diag",
"flatten",
"fromage",
"global_norm",
"GradientTransformation",
"GradientTransformationExtraArgs",
"hinge_loss",
"hessian_diag",
"huber_loss",
"hvp",
"identity",
"incremental_update",
"inject_hyperparams",
"InjectHyperparamsState",
"join_schedules",
"keep_params_nonnegative",
"kl_divergence",
"l2_loss",
"lamb",
"lars",
"lion",
"linear_onecycle_schedule",
"linear_schedule",
"log_cosh",
"lookahead",
"LookaheadParams",
"LookaheadState",
"masked",
"MaskOrFn",
"MaskedState",
"matrix_inverse_pth_root",
"maybe_update",
"MaybeUpdateState",
"measure_valued_jacobians",
"moving_avg_baseline",
"multi_normal",
"multi_transform",
"MultiSteps",
"MultiStepsState",
"MultiTransformState",
"noisy_sgd",
"novograd",
"NonNegativeParamsState",
"OptState",
"Params",
"pathwise_jacobians",
"periodic_update",
"per_example_global_norm_clip",
"piecewise_constant_schedule",
"piecewise_interpolate_schedule",
"polynomial_schedule",
"power_iteration",
"radam",
"rmsprop",
"safe_int32_increment",
"safe_norm",
"safe_root_mean_squares",
"ScalarOrSchedule",
"scale_by_adam",
"scale_by_adamax",
"scale_by_amsgrad",
"scale_by_belief",
"scale_by_lion",
"scale_by_factored_rms",
"scale_by_novograd",
"scale_by_param_block_norm",
"scale_by_param_block_rms",
"scale_by_radam",
"scale_by_rms",
"scale_by_rss",
"scale_by_schedule",
"scale_by_sm3",
"scale_by_stddev",
"scale_by_trust_ratio",
"scale_by_yogi",
"scale_gradient",
"scale",
"ScaleByAdamState",
"ScaleByAmsgradState",
"ScaleByBeliefState",
"ScaleByLionState",
"ScaleByNovogradState",
"ScaleByRmsState",
"ScaleByRssState",
"ScaleByRStdDevState",
"ScaleByScheduleState",
"ScaleBySM3State",
"ScaleByTrustRatioState",
"ScaleState",
"Schedule",
"score_function_jacobians",
"set_to_zero",
"sgd",
"sgdr_schedule",
"ShouldSkipUpdateFunction",
"sigmoid_binary_cross_entropy",
"skip_large_updates",
"skip_not_finite",
"sm3",
"smooth_labels",
"softmax_cross_entropy",
"softmax_cross_entropy_with_integer_labels",
"stateless",
"stateless_with_tree_map",
"trace",
"TraceState",
"TransformInitFn",
"TransformUpdateFn",
"TransformUpdateExtraArgsFn",
"Updates",
"warmup_cosine_decay_schedule",
"warmup_exponential_decay_schedule",
"yogi",
"zero_nans",
"ZeroNansState",
)
# _________________________________________
# / Please don't use symbols in `_src` they \
# \ are not part of the Optax public API. /
# -----------------------------------------
# \ ^__^
# \ (oo)\_______
# (__)\ )\/\
# ||----w |
# || ||
#
| optax-master | optax/__init__.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optax."""
from absl.testing import absltest
import optax
class OptaxTest(absltest.TestCase):
"""Test optax can be imported correctly."""
def test_import(self):
self.assertTrue(hasattr(optax, 'GradientTransformation'))
if __name__ == '__main__':
absltest.main()
| optax-master | optax/optax_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental features in Optax.
Features may be removed or modified at any time.
"""
from optax._src.experimental.complex_valued import split_real_and_imaginary
from optax._src.experimental.complex_valued import SplitRealAndImaginaryState
from optax._src.experimental.extra_args import GradientTransformationWithExtraArgs
from optax._src.experimental.extra_args import named_chain
| optax-master | optax/experimental/__init__.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear algebra utilities used in optimisation."""
import chex
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
from optax._src import base
from optax._src import numerics
def global_norm(updates: base.PyTree) -> chex.Array:
"""Compute the global norm across a nested structure of tensors."""
return jnp.sqrt(sum(
jnp.sum(numerics.abs_sq(x)) for x in jax.tree_util.tree_leaves(updates)))
def power_iteration(matrix: chex.Array,
num_iters: int = 100,
error_tolerance: float = 1e-6,
precision: lax.Precision = lax.Precision.HIGHEST):
r"""Power iteration algorithm.
The power iteration algorithm takes a symmetric PSD matrix `A`, and produces
a scalar `\lambda` , which is the greatest (in absolute value) eigenvalue
of `A`, and a vector v, which is the corresponding eigenvector of `A`.
References:
[Wikipedia, 2021](https://en.wikipedia.org/wiki/Power_iteration)
Args:
matrix: the symmetric PSD matrix.
num_iters: Number of iterations.
error_tolerance: Iterative exit condition.
precision: precision XLA related flag, the available options are:
a) lax.Precision.DEFAULT (better step time, but not precise);
b) lax.Precision.HIGH (increased precision, slower);
c) lax.Precision.HIGHEST (best possible precision, slowest).
Returns:
eigen vector, eigen value
"""
matrix_size = matrix.shape[-1]
def _iter_condition(state):
i, unused_v, unused_s, unused_s_v, run_step = state
return jnp.logical_and(i < num_iters, run_step)
def _iter_body(state):
"""One step of power iteration."""
i, new_v, s, s_v, unused_run_step = state
new_v = new_v / jnp.linalg.norm(new_v)
s_v = jnp.einsum('ij,j->i', matrix, new_v, precision=precision)
s_new = jnp.einsum('i,i->', new_v, s_v, precision=precision)
return (i + 1, s_v, s_new, s_v,
jnp.greater(jnp.abs(s_new - s), error_tolerance))
# Figure out how to use step as seed for random.
v_0 = np.random.uniform(-1.0, 1.0, matrix_size).astype(matrix.dtype)
init_state = tuple([0, v_0, jnp.zeros([], dtype=matrix.dtype), v_0, True])
_, v_out, s_out, _, _ = lax.while_loop(
_iter_condition, _iter_body, init_state)
v_out = v_out / jnp.linalg.norm(v_out)
return v_out, s_out
def matrix_inverse_pth_root(matrix: chex.Array,
p: int,
num_iters: int = 100,
ridge_epsilon: float = 1e-6,
error_tolerance: float = 1e-6,
precision: lax.Precision = lax.Precision.HIGHEST):
"""Computes `matrix^(-1/p)`, where `p` is a positive integer.
This function uses the Coupled newton iterations algorithm for
the computation of a matrix's inverse pth root.
References:
[Functions of Matrices, Theory and Computation,
Nicholas J Higham, Pg 184, Eq 7.18](
https://epubs.siam.org/doi/book/10.1137/1.9780898717778)
Args:
matrix: the symmetric PSD matrix whose power it to be computed
p: exponent, for p a positive integer.
num_iters: Maximum number of iterations.
ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
error_tolerance: Error indicator, useful for early termination.
precision: precision XLA related flag, the available options are:
a) lax.Precision.DEFAULT (better step time, but not precise);
b) lax.Precision.HIGH (increased precision, slower);
c) lax.Precision.HIGHEST (best possible precision, slowest).
Returns:
matrix^(-1/p)
"""
# We use float32 for the matrix inverse pth root.
# Switch to f64 if you have hardware that supports it.
matrix_size = matrix.shape[0]
alpha = jnp.asarray(-1.0 / p, jnp.float32)
identity = jnp.eye(matrix_size, dtype=jnp.float32)
_, max_ev = power_iteration(
matrix=matrix, num_iters=100,
error_tolerance=1e-6, precision=precision)
ridge_epsilon = ridge_epsilon * jnp.maximum(max_ev, 1e-16)
def _unrolled_mat_pow_1(mat_m):
"""Computes mat_m^1."""
return mat_m
def _unrolled_mat_pow_2(mat_m):
"""Computes mat_m^2."""
return jnp.matmul(mat_m, mat_m, precision=precision)
def _unrolled_mat_pow_4(mat_m):
"""Computes mat_m^4."""
mat_pow_2 = _unrolled_mat_pow_2(mat_m)
return jnp.matmul(
mat_pow_2, mat_pow_2, precision=precision)
def _unrolled_mat_pow_8(mat_m):
"""Computes mat_m^4."""
mat_pow_4 = _unrolled_mat_pow_4(mat_m)
return jnp.matmul(
mat_pow_4, mat_pow_4, precision=precision)
def mat_power(mat_m, p):
"""Computes mat_m^p, for p == 1, 2, 4 or 8.
Args:
mat_m: a square matrix
p: a positive integer
Returns:
mat_m^p
"""
# We unrolled the loop for performance reasons.
exponent = jnp.round(jnp.log2(p))
return lax.switch(
jnp.asarray(exponent, jnp.int32), [
_unrolled_mat_pow_1,
_unrolled_mat_pow_2,
_unrolled_mat_pow_4,
_unrolled_mat_pow_8,
], (mat_m))
def _iter_condition(state):
(i, unused_mat_m, unused_mat_h, unused_old_mat_h, error,
run_step) = state
error_above_threshold = jnp.logical_and(
error > error_tolerance, run_step)
return jnp.logical_and(i < num_iters, error_above_threshold)
def _iter_body(state):
(i, mat_m, mat_h, unused_old_mat_h, error, unused_run_step) = state
mat_m_i = (1 - alpha) * identity + alpha * mat_m
new_mat_m = jnp.matmul(mat_power(mat_m_i, p), mat_m, precision=precision)
new_mat_h = jnp.matmul(mat_h, mat_m_i, precision=precision)
new_error = jnp.max(jnp.abs(new_mat_m - identity))
# sometimes error increases after an iteration before decreasing and
# converging. 1.2 factor is used to bound the maximal allowed increase.
return (i + 1, new_mat_m, new_mat_h, mat_h, new_error,
new_error < error * 1.2)
if matrix_size == 1:
resultant_mat_h = (matrix + ridge_epsilon)**alpha
error = 0
else:
damped_matrix = matrix + ridge_epsilon * identity
z = (1 + p) / (2 * jnp.linalg.norm(damped_matrix))
new_mat_m_0 = damped_matrix * z
new_error = jnp.max(jnp.abs(new_mat_m_0 - identity))
new_mat_h_0 = identity * jnp.power(z, 1.0 / p)
init_state = tuple(
[0, new_mat_m_0, new_mat_h_0, new_mat_h_0, new_error, True])
_, mat_m, mat_h, old_mat_h, error, convergence = lax.while_loop(
_iter_condition, _iter_body, init_state)
error = jnp.max(jnp.abs(mat_m - identity))
is_converged = jnp.asarray(convergence, old_mat_h.dtype)
resultant_mat_h = is_converged * mat_h + (1 - is_converged) * old_mat_h
resultant_mat_h = jnp.asarray(resultant_mat_h, matrix.dtype)
return resultant_mat_h, error
| optax-master | optax/_src/linear_algebra.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Apply transformed gradient updates to parameters."""
import chex
import jax
import jax.numpy as jnp
from optax._src import base
def apply_updates(params: base.Params, updates: base.Updates) -> base.Params:
"""Applies an update to the corresponding parameters.
This is a utility functions that applies an update to a set of parameters, and
then returns the updated parameters to the caller. As an example, the update
may be a gradient transformed by a sequence of`GradientTransformations`. This
function is exposed for convenience, but it just adds updates and parameters;
you may also apply updates to parameters manually, using `tree_map`
(e.g. if you want to manipulate updates in custom ways before applying them).
Args:
params: a tree of parameters.
updates: a tree of updates, the tree structure and the shape of the leaf
nodes must match that of `params`.
Returns:
Updated parameters, with same structure, shape and type as `params`.
"""
return jax.tree_util.tree_map(
lambda p, u: jnp.asarray(p + u).astype(jnp.asarray(p).dtype),
params, updates)
def incremental_update(
new_tensors: base.Params,
old_tensors: base.Params,
step_size: chex.Numeric
) -> base.Params:
"""Incrementally update parameters via polyak averaging.
Polyak averaging tracks an (exponential moving) average of the past
parameters of a model, for use at test/evaluation time.
References:
[Polyak et al, 1991](https://epubs.siam.org/doi/10.1137/0330046)
Args:
new_tensors: the latest value of the tensors.
old_tensors: a moving average of the values of the tensors.
step_size: the step_size used to update the polyak average on each step.
Returns:
an updated moving average `step_size*new+(1-step_size)*old` of the params.
"""
return jax.tree_util.tree_map(
lambda new, old: step_size * new + (1.0 - step_size) * old,
new_tensors, old_tensors)
def periodic_update(
new_tensors: base.Params,
old_tensors: base.Params,
steps: chex.Array,
update_period: int
) -> base.Params:
"""Periodically update all parameters with new values.
A slow copy of a model's parameters, updated every K actual updates, can be
used to implement forms of self-supervision (in supervised learning), or to
stabilise temporal difference learning updates (in reinforcement learning).
References:
[Grill et al., 2020](https://arxiv.org/abs/2006.07733)
[Mnih et al., 2015](https://arxiv.org/abs/1312.5602)
Args:
new_tensors: the latest value of the tensors.
old_tensors: a slow copy of the model's parameters.
steps: number of update steps on the "online" network.
update_period: every how many steps to update the "target" network.
Returns:
a slow copy of the model's parameters, updated every `update_period` steps.
"""
return jax.lax.cond(
jnp.mod(steps, update_period) == 0,
lambda _: new_tensors,
lambda _: old_tensors,
None)
| optax-master | optax/_src/update.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for base.py."""
from absl.testing import absltest
import chex
import jax
import jax.numpy as jnp
import numpy as np
from optax._src import base
# pylint:disable=no-value-for-parameter
class BaseTest(chex.TestCase):
def test_typing(self):
"""Ensure that the type annotations work for the update function."""
def f(updates, opt_state, params=None):
del params
return updates, opt_state
def g(f: base.TransformUpdateFn):
updates = np.zeros([])
params = np.zeros([])
opt_state = np.zeros([])
f(updates, opt_state)
f(updates, opt_state, params)
f(updates, opt_state, params=params)
g(f)
@chex.all_variants
def test_set_to_zero_returns_tree_of_correct_zero_arrays(self):
"""Tests that zero transform returns a tree of zeros of correct shape."""
grads = ({'a': np.ones((3, 4)), 'b': 1.}, np.ones((1, 2, 3)))
updates, _ = self.variant(base.set_to_zero().update)(grads,
base.EmptyState())
correct_zeros = ({'a': np.zeros((3, 4)), 'b': 0.}, np.zeros((1, 2, 3)))
chex.assert_trees_all_close(updates, correct_zeros, rtol=0)
@chex.all_variants(with_pmap=False)
def test_set_to_zero_is_stateless(self):
"""Tests that the zero transform returns an empty state."""
self.assertEqual(
self.variant(base.set_to_zero().init)(params=None), base.EmptyState())
class ExtraArgsTest(chex.TestCase):
def test_isinstance(self):
"""Locks in behaviour for comparing transformations."""
def init_fn(params):
del params
return {}
def update_fn(updates, state, params=None):
del params
return updates, state
t1 = base.GradientTransformation(init_fn, update_fn)
self.assertIsInstance(t1, base.GradientTransformation)
self.assertNotIsInstance(t1, base.GradientTransformationExtraArgs)
t2 = base.with_extra_args_support(t1)
self.assertIsInstance(t2, base.GradientTransformation)
self.assertIsInstance(t2, base.GradientTransformationExtraArgs)
with self.subTest('args_correctly_ignored'):
state = t2.init({})
t2.update({}, state, ignored_arg='hi')
t3 = base.with_extra_args_support(t2)
self.assertIsInstance(t3, base.GradientTransformation)
self.assertIsInstance(t3, base.GradientTransformationExtraArgs)
def test_extra_args_with_callback(self):
"""An example of using extra args to log the learning rate."""
def init_fn(params):
del params
return {}
def update_fn(updates, state, *, metrics_logger=None, **extra_args):
del extra_args
if metrics_logger:
metrics_logger('learning_rate', 0.3)
return updates, state
t = base.GradientTransformationExtraArgs(init_fn, update_fn)
@jax.jit
def f(params):
state = t.init(params)
metrics = {}
def metrics_logger(name, value):
metrics[name] = value
t.update(params, state, metrics_logger=metrics_logger)
return metrics
metrics = f({'a': 1})
self.assertEqual(metrics['learning_rate'], 0.3)
class StatelessTest(chex.TestCase):
"""Tests for the stateless transformation."""
@chex.all_variants
def test_stateless(self):
params = {'a': jnp.zeros((1, 2)), 'b': jnp.ones((1,))}
updates = {'a': jnp.ones((1, 2)), 'b': jnp.full((1,), 2.0)}
@base.stateless
def opt(g, p):
return jax.tree_util.tree_map(lambda g_, p_: g_ + 0.1 * p_, g, p)
state = opt.init(params)
update_fn = self.variant(opt.update)
new_updates, _ = update_fn(updates, state, params)
expected_updates = {'a': jnp.ones((1, 2)), 'b': jnp.array([2.1])}
chex.assert_trees_all_close(new_updates, expected_updates)
@chex.all_variants
def test_stateless_no_params(self):
updates = {'linear': jnp.full((5, 3), 3.0)}
@base.stateless
def opt(g, _):
return jax.tree_util.tree_map(lambda g_: g_ * 2, g)
state = opt.init(None) # pytype: disable=wrong-arg-types # numpy-scalars
update_fn = self.variant(opt.update)
new_updates, _ = update_fn(updates, state)
expected_updates = {'linear': jnp.full((5, 3), 6.0)}
chex.assert_trees_all_close(new_updates, expected_updates)
def test_init_returns_emptystate(self):
def weight_decay(g, p):
return jax.tree_util.tree_map(lambda g_, p_: g_ + 0.1 * p_, g, p)
opt = base.stateless(weight_decay)
state = opt.init(None) # pytype: disable=wrong-arg-types # numpy-scalars
self.assertIsInstance(state, base.EmptyState)
class StatelessWithTreeMapTest(chex.TestCase):
"""Tests for the stateless_with_tree_map transformation."""
@chex.all_variants
def test_stateless_with_tree_map(self):
params = {'a': jnp.zeros((1, 2)), 'b': jnp.ones((1,))}
updates = {'a': jnp.ones((1, 2)), 'b': jnp.full((1,), 2.0)}
opt = base.stateless_with_tree_map(lambda g, p: g + 0.1 * p)
state = opt.init(params)
update_fn = self.variant(opt.update)
new_updates, _ = update_fn(updates, state, params)
expected_updates = {'a': jnp.ones((1, 2)), 'b': jnp.array([2.1])}
chex.assert_trees_all_close(new_updates, expected_updates)
@chex.all_variants
def test_stateless_with_tree_map_no_params(self):
updates = {'linear': jnp.full((5, 3), 3.0)}
opt = base.stateless_with_tree_map(lambda g, _: g * 2.0)
state = opt.init(None) # pytype: disable=wrong-arg-types # numpy-scalars
update_fn = self.variant(opt.update)
new_updates, _ = update_fn(updates, state)
expected_updates = {'linear': jnp.full((5, 3), 6.0)}
chex.assert_trees_all_close(new_updates, expected_updates)
def test_init_returns_emptystate(self):
opt = base.stateless_with_tree_map(lambda g, p: g + 0.1 * p)
state = opt.init(None) # pytype: disable=wrong-arg-types # numpy-scalars
self.assertIsInstance(state, base.EmptyState)
if __name__ == '__main__':
absltest.main()
| optax-master | optax/_src/base_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing utilities for Optax."""
import inspect
import types
from typing import Sequence, Tuple
def find_internal_python_modules(
root_module: types.ModuleType,
) -> Sequence[Tuple[str, types.ModuleType]]:
"""Returns `(name, module)` for all Optax submodules under `root_module`."""
modules = set([(root_module.__name__, root_module)])
visited = set()
to_visit = [root_module]
while to_visit:
mod = to_visit.pop()
visited.add(mod)
for name in dir(mod):
obj = getattr(mod, name)
if inspect.ismodule(obj) and obj not in visited:
if obj.__name__.startswith('optax'):
if '_src' not in obj.__name__:
to_visit.append(obj)
modules.add((obj.__name__, obj))
return sorted(modules)
| optax-master | optax/_src/test_utils.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Aliases for popular optimizers."""
from typing import Any, Callable, Optional, Union
import jax
import jax.numpy as jnp
from optax._src import base
from optax._src import clipping
from optax._src import combine
from optax._src import factorized
from optax._src import privacy
from optax._src import transform
from optax._src import wrappers
ScalarOrSchedule = Union[float, jax.Array, base.Schedule]
MaskOrFn = Optional[Union[Any, Callable[[base.Params], Any]]]
def _scale_by_learning_rate(learning_rate: ScalarOrSchedule, flip_sign=True):
m = -1 if flip_sign else 1
if callable(learning_rate):
return transform.scale_by_schedule(lambda count: m * learning_rate(count))
return transform.scale(m * learning_rate)
def adabelief(
learning_rate: ScalarOrSchedule,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-16,
eps_root: float = 1e-16) -> base.GradientTransformation:
"""The AdaBelief optimizer.
AdaBelief is an adaptive learning rate optimizer that focuses on fast
convergence, generalization, and stability. It adapts the step size depending
on its "belief" in the gradient direction — the optimizer adaptively scales
the step size by the difference between the predicted and observed gradients.
AdaBelief is a modified version of Adam and contains the same number of
parameters.
References:
Zhuang et al, 2020: https://arxiv.org/abs/2010.07468
Args:
learning_rate: A fixed global scaling factor.
b1: Exponential decay rate to track the first moment of past gradients.
b2: Exponential decay rate to track the second moment of past gradients.
eps: Term added to the denominator to improve numerical stability.
eps_root: Term added to the second moment of the prediction error to
improve numerical stability. If backpropagating gradients through the
gradient transformation (e.g. for meta-learning), this must be non-zero.
Returns:
The corresponding `GradientTransformation`.
"""
return combine.chain(
transform.scale_by_belief(b1=b1, b2=b2, eps=eps, eps_root=eps_root),
_scale_by_learning_rate(learning_rate),
)
def adafactor(
learning_rate: Optional[ScalarOrSchedule] = None,
min_dim_size_to_factor: int = 128,
decay_rate: float = 0.8,
decay_offset: int = 0,
multiply_by_parameter_scale: float = True,
clipping_threshold: Optional[float] = 1.0,
momentum: Optional[float] = None,
dtype_momentum: Any = jnp.float32,
weight_decay_rate: Optional[float] = None,
eps: float = 1e-30,
factored: bool = True,
weight_decay_mask: MaskOrFn = None,
) -> base.GradientTransformation:
"""The Adafactor optimizer.
Adafactor is an adaptive learning rate optimizer that focuses on fast
training of large scale neural networks. It saves memory by using a factored
estimate of the second order moments used to scale gradients.
References:
Shazeer and Stern, 2018: https://arxiv.org/abs/1804.04235
Args:
learning_rate: A fixed global scaling factor. Note: the natural scale for
Adafactor's LR is markedly different from Adam, one doesn't use the
1/sqrt(hidden) correction for this optim with attention-based models.
min_dim_size_to_factor: Only factor the statistics if two array dimensions
have at least this size.
decay_rate: Controls second-moment exponential decay schedule.
decay_offset: For fine-tuning, one may set this to the starting step
number of the fine-tuning phase.
multiply_by_parameter_scale: If True, then scale learning_rate by
parameter norm. If False, provided learning_rate is absolute step size.
clipping_threshold: Optional clipping threshold. Must be >= 1. If None,
clipping is disabled.
momentum: Optional value between 0 and 1, enables momentum and uses extra
memory if non-None! None by default.
dtype_momentum: Data type of momentum buffers.
weight_decay_rate: Optional rate at which to decay weights.
eps: Regularization constant for root mean squared gradient.
factored: Whether to use factored second-moment estimates.
weight_decay_mask: A tree with same structure as (or a prefix of)
the params PyTree, or a Callable that returns such a pytree given
the params/updates. The leaves should be booleans, `True`
for leaves/subtrees you want to apply the transformation to,
and `False` for those you want to skip.
Returns:
The corresponding `GradientTransformation`.
"""
# The core of the algorithm is a procedure for rescaling gradients
# by a factored estimate of the root mean squared gradients.
# This reduces memory compared to algorithms such as Adam or RmsProp,
# by not having to hold a separate estimate for each weight.
tx = [
factorized.scale_by_factored_rms(
factored, decay_rate, decay_offset, min_dim_size_to_factor, eps)]
# This basic rescaling is typically combined with one or more of the following
# transformation (all can be disabled via adafactor's constructor args).
if clipping_threshold is not None:
tx.append(clipping.clip_by_block_rms(clipping_threshold))
if learning_rate is not None:
tx.append(_scale_by_learning_rate(learning_rate, flip_sign=False))
if multiply_by_parameter_scale:
tx.append(transform.scale_by_param_block_rms())
if momentum is not None:
tx.append(
transform.ema(momentum, debias=False, accumulator_dtype=dtype_momentum))
if weight_decay_rate is not None:
tx.append(transform.add_decayed_weights(
weight_decay_rate, mask=weight_decay_mask))
# In gradient "descent" we follow the negative gradient.
tx.append(transform.scale(-1))
return combine.chain(*tx)
def adagrad(
learning_rate: ScalarOrSchedule,
initial_accumulator_value: float = 0.1,
eps: float = 1e-7
) -> base.GradientTransformation:
"""The Adagrad optimizer.
Adagrad is an algorithm for gradient based optimization that anneals the
learning rate for each parameter during the course of training.
WARNING: Adagrad's main limit is the monotonic accumulation of squared
gradients in the denominator: since all terms are >0, the sum keeps growing
during training and the learning rate eventually becomes vanishingly small.
References:
Duchi et al, 2011: https://jmlr.org/papers/v12/duchi11a.html
Args:
learning_rate: A fixed global scaling factor.
initial_accumulator_value: Initial value for the accumulator.
eps: A small constant applied to denominator inside of the square root
(as in RMSProp) to avoid dividing by zero when rescaling.
Returns:
The corresponding `GradientTransformation`.
"""
return combine.chain(
transform.scale_by_rss(
initial_accumulator_value=initial_accumulator_value, eps=eps),
_scale_by_learning_rate(learning_rate),
)
def adam(
learning_rate: ScalarOrSchedule,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
eps_root: float = 0.0,
mu_dtype: Optional[Any] = None,
) -> base.GradientTransformation:
r"""The classic Adam optimizer.
Adam is an SGD variant with gradient scaling adaptation. The scaling
used for each parameter is computed from estimates of first and second-order
moments of the gradients (using suitable exponential moving averages).
Let :math:`\alpha_t` represent the learning rate and :math:`\beta_1, \beta_2`,
:math:`\varepsilon`, :math:`\bar{\varepsilon}` represent the arguments
``b1``, ``b2``, ``eps`` and ``eps_root`` respectievly. The learning rate is
indexed by :math:`t` since the learning rate may also be provided by a
schedule function.
The ``init`` function of this optimizer initializes an internal state
:math:`S_0 := (m_0, v_0) = (0, 0)`, representing initial estimates for the
first and second moments. In practice these values are stored as pytrees
containing all zeros, with the same shape as the model updates.
At step :math:`t`, the ``update`` function of this optimizer takes as
arguments the incoming gradients :math:`g_t` and optimizer state :math:`S_t`
and computes updates :math:`u_t` and new state :math:`S_{t+1}`. Thus, for
:math:`t > 0`, we have,
.. math::
\begin{align*}
m_t &\leftarrow \beta_1 \cdot m_{t-1} + (1-\beta_1) \cdot g_t \\
v_t &\leftarrow \beta_2 \cdot v_{t-1} + (1-\beta_2) \cdot {g_t}^2 \\
\hat{m}_t &\leftarrow m_t / {(1-\beta_1^t)} \\
\hat{v}_t &\leftarrow v_t / {(1-\beta_2^t)} \\
u_t &\leftarrow \alpha_t \cdot \hat{m}_t / \left({\sqrt{\hat{v}_t +
\bar{\varepsilon}} + \varepsilon} \right)\\
S_t &\leftarrow (m_t, v_t).
\end{align*}
References:
Kingma et al, 2014: https://arxiv.org/abs/1412.6980
Args:
learning_rate: A fixed global scaling factor.
b1: Exponential decay rate to track the first moment of past gradients.
b2: Exponential decay rate to track the second moment of past gradients.
eps: A small constant applied to denominator outside of the square root
(as in the Adam paper) to avoid dividing by zero when rescaling.
eps_root: A small constant applied to denominator inside the square root (as
in RMSProp), to avoid dividing by zero when rescaling. This is needed for
example when computing (meta-)gradients through Adam.
mu_dtype: Optional `dtype` to be used for the first order accumulator; if
`None` then the `dtype` is inferred from `params` and `updates`.
Returns:
The corresponding `GradientTransformation`.
"""
return combine.chain(
transform.scale_by_adam(
b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype),
_scale_by_learning_rate(learning_rate),
)
def adamw(
learning_rate: ScalarOrSchedule,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
eps_root: float = 0.0,
mu_dtype: Optional[Any] = None,
weight_decay: float = 1e-4,
mask: Optional[Union[Any, Callable[[base.Params], Any]]] = None,
) -> base.GradientTransformation:
"""Adam with weight decay regularization.
AdamW uses weight decay to regularize learning towards small weights, as
this leads to better generalization. In SGD you can also use L2 regularization
to implement this as an additive loss term, however L2 regularization
does not behave as intended for adaptive gradient algorithms such as Adam.
References:
Loshchilov et al, 2019: https://arxiv.org/abs/1711.05101
Args:
learning_rate: A fixed global scaling factor.
b1: Exponential decay rate to track the first moment of past gradients.
b2: Exponential decay rate to track the second moment of past gradients.
eps: A small constant applied to denominator outside of the square root
(as in the Adam paper) to avoid dividing by zero when rescaling.
eps_root: A small constant applied to denominator inside the square root (as
in RMSProp), to avoid dividing by zero when rescaling. This is needed for
instance when computing (meta-)gradients through Adam.
mu_dtype: Optional `dtype` to be used for the first order accumulator; if
`None` then the `dtype` is inferred from `params` and `updates`.
weight_decay: Strength of the weight decay regularization. Note that this
weight decay is multiplied with the learning rate. This is consistent
with other frameworks such as PyTorch, but different from
(Loshchilov et al, 2019) where the weight decay is only multiplied with
the "schedule multiplier", but not the base learning rate.
mask: A tree with same structure as (or a prefix of) the params PyTree,
or a Callable that returns such a pytree given the params/updates.
The leaves should be booleans, `True` for leaves/subtrees you want to
apply the weight decay to, and `False` for those you want to skip. Note
that the Adam gradient transformations are applied to all parameters.
Returns:
The corresponding `GradientTransformation`.
"""
return combine.chain(
transform.scale_by_adam(
b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype),
transform.add_decayed_weights(weight_decay, mask),
_scale_by_learning_rate(learning_rate),
)
def lion(
learning_rate: ScalarOrSchedule,
b1: float = 0.9,
b2: float = 0.99,
mu_dtype: Optional[Any] = None,
weight_decay: float = 1e-3,
mask: Optional[Union[Any, Callable[[base.Params], Any]]] = None,
) -> base.GradientTransformation:
"""The Lion optimizer.
Lion is discovered by symbolic program search. Unlike most adaptive optimizers
such as AdamW, Lion only tracks momentum, making it more memory-efficient.
The update of Lion is produced through the sign operation, resulting in a
larger norm compared to updates produced by other optimizers such as SGD and
AdamW. A suitable learning rate for Lion is typically 3-10x smaller than that
for AdamW, the weight decay for Lion should be in turn 3-10x larger than that
for AdamW to maintain a similar strength (lr * wd).
References:
Chen et al, 2023: https://arxiv.org/abs/2302.06675
Args:
learning_rate: A fixed global scaling factor.
b1: Rate to combine the momentum and the current gradient.
b2: Exponential decay rate to track the momentum of past gradients.
mu_dtype: Optional `dtype` to be used for the first order accumulator; if
`None` then the `dtype` is inferred from `params` and `updates`.
weight_decay: Strength of the weight decay regularization. Note that this
weight decay is multiplied with the learning rate. This is consistent
with other frameworks such as PyTorch, but different from
(Loshchilov et al, 2019) where the weight decay is only multiplied with
the "schedule multiplier", but not the base learning rate.
mask: A tree with same structure as (or a prefix of) the params PyTree,
or a Callable that returns such a pytree given the params/updates.
The leaves should be booleans, `True` for leaves/subtrees you want to
apply the weight decay to, and `False` for those you want to skip. Note
that the Adam gradient transformations are applied to all parameters.
Returns:
The corresponding `GradientTransformation`.
"""
return combine.chain(
transform.scale_by_lion(b1=b1, b2=b2, mu_dtype=mu_dtype),
transform.add_decayed_weights(weight_decay, mask),
_scale_by_learning_rate(learning_rate),
)
def amsgrad(
learning_rate: ScalarOrSchedule,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
eps_root: float = 0.0,
mu_dtype: Optional[Any] = None,
) -> base.GradientTransformation:
"""The AMSGrad optimiser.
The original Adam can fail to converge to the optimal solution in some cases.
AMSGrad guarantees convergence by using a long-term memory of past gradients.
References:
Reddi et al, 2018: https://openreview.net/forum?id=ryQu7f-RZ
Args:
learning_rate: A fixed global scaling factor.
b1: Exponential decay rate to track the first moment of past gradients.
b2: Exponential decay rate to track the second moment of past gradients.
eps: A small constant applied to denominator outside of the square root
(as in the Adam paper) to avoid dividing by zero when rescaling.
eps_root: A small constant applied to denominator inside the square root (as
in RMSProp), to avoid dividing by zero when rescaling. This is needed for
instance when computing (meta-)gradients through Adam.
mu_dtype: Optional `dtype` to be used for the first order accumulator; if
`None` then the `dtype` is inferred from `params` and `updates`.
Returns:
The corresponding `GradientTransformation`.
"""
return combine.chain(
transform.scale_by_amsgrad(
b1=b1, b2=b2, eps=eps, eps_root=eps_root, mu_dtype=mu_dtype),
_scale_by_learning_rate(learning_rate),
)
def fromage(
learning_rate: float,
min_norm: float = 1e-6
) -> base.GradientTransformation:
"""The Frobenius matched gradient descent (Fromage) optimizer.
Fromage is a learning algorithm that does not require learning rate tuning.
The optimizer is based on modeling neural network gradients via deep relative
trust (a distance function on deep neural networks). Fromage is similar to the
LARS optimizer and can work on a range of standard neural network benchmarks,
such as natural language Transformers and generative adversarial networks.
References:
Bernstein et al, 2020: https://arxiv.org/abs/2002.03432
Args:
learning_rate: A fixed global scaling factor.
min_norm: A minimum value that the norm of the gradient updates and the norm
of the layer parameters can be clipped to to avoid dividing by zero when
computing the trust ratio (as in the LARS paper).
Returns:
The corresponding `GradientTransformation`.
"""
mult = 1 / jnp.sqrt(1 + learning_rate ** 2)
return combine.chain(
transform.scale_by_trust_ratio(min_norm),
_scale_by_learning_rate(learning_rate * mult),
transform.add_decayed_weights((mult - 1)),
)
def lars(
learning_rate: ScalarOrSchedule,
weight_decay: float = 0.,
weight_decay_mask: MaskOrFn = True,
trust_coefficient: float = 0.001,
eps: float = 0.,
trust_ratio_mask: MaskOrFn = True,
momentum: float = 0.9,
nesterov: bool = False,
) -> base.GradientTransformation:
"""The LARS optimizer.
LARS is a layer-wise adaptive optimizer introduced to help scale SGD to
larger batch sizes. LARS later inspired the LAMB optimizer.
References:
You et al, 2017: https://arxiv.org/abs/1708.03888
Args:
learning_rate: A fixed global scaling factor.
weight_decay: Strength of the weight decay regularization.
weight_decay_mask: A tree with same structure as (or a prefix of) the params
PyTree, or a Callable that returns such a pytree given the params/updates.
The leaves should be booleans, `True` for leaves/subtrees you want to
apply the transformation to, and `False` for those you want to skip.
trust_coefficient: A multiplier for the trust ratio.
eps: Optional additive constant in the trust ratio denominator.
trust_ratio_mask: A tree with same structure as (or a prefix of) the params
PyTree, or a Callable that returns such a pytree given the params/updates.
The leaves should be booleans, `True` for leaves/subtrees you want to
apply the transformation to, and `False` for those you want to skip.
momentum: Decay rate for momentum.
nesterov: Whether to use Nesterov momentum.
Returns:
The corresponding `GradientTransformation`.
"""
return combine.chain(
transform.add_decayed_weights(weight_decay, mask=weight_decay_mask),
wrappers.masked(
inner=transform.scale_by_trust_ratio(
trust_coefficient=trust_coefficient, eps=eps),
mask=trust_ratio_mask),
_scale_by_learning_rate(learning_rate),
transform.trace(decay=momentum, nesterov=nesterov),
)
def lamb(
learning_rate: ScalarOrSchedule,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-6,
eps_root: float = 0.0,
weight_decay: float = 0.,
mask: MaskOrFn = None,
) -> base.GradientTransformation:
"""The LAMB optimizer.
LAMB is a general purpose layer-wise adaptive large batch optimizer designed
to provide consistent training performance across a wide range of tasks,
including those that use attention-based models (such as Transformers) and
ResNet-50. The optimizer is able to work with small and large batch sizes.
LAMB was inspired by the LARS learning algorithm.
References:
You et al, 2019: https://arxiv.org/abs/1904.00962
Args:
learning_rate: A fixed global scaling factor.
b1: Exponential decay rate to track the first moment of past gradients.
b2: Exponential decay rate to track the second moment of past gradients.
eps: A small constant applied to denominator outside of the square root
(as in the Adam paper) to avoid dividing by zero when rescaling.
eps_root: A small constant applied to denominator inside the square root (as
in RMSProp), to avoid dividing by zero when rescaling. This is needed for
instance when computing (meta-)gradients through Adam.
weight_decay: Strength of the weight decay regularization.
mask: A tree with same structure as (or a prefix of) the params PyTree,
or a Callable that returns such a pytree given the params/updates.
The leaves should be booleans, `True` for leaves/subtrees you want to
apply the transformation to, and `False` for those you want to skip.
Returns:
The corresponding `GradientTransformation`.
"""
return combine.chain(
transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root),
transform.add_decayed_weights(weight_decay=weight_decay, mask=mask),
transform.scale_by_trust_ratio(),
_scale_by_learning_rate(learning_rate),
)
def noisy_sgd(
learning_rate: ScalarOrSchedule,
eta: float = 0.01,
gamma: float = 0.55,
seed: int = 0
) -> base.GradientTransformation:
r"""A variant of SGD with added noise.
It has been found that adding noise to the gradients can improve
both the training error and the generalization error in very deep networks.
References:
Neelakantan et al, 2014: https://arxiv.org/abs/1511.06807
Args:
learning_rate: A fixed global scaling factor.
eta: Initial variance for the Gaussian noise added to gradients.
gamma: A parameter controlling the annealing of noise over time, the
variance decays according to `(1+t)^-\gamma`.
seed: Seed for the pseudo-random generation process.
Returns:
The corresponding `GradientTransformation`.
"""
return combine.chain(
transform.add_noise(eta, gamma, seed),
_scale_by_learning_rate(learning_rate),
)
def novograd(
learning_rate: ScalarOrSchedule,
b1: float = 0.9,
b2: float = 0.25,
eps: float = 1e-6,
eps_root: float = 0.0,
weight_decay: float = 0.,
) -> base.GradientTransformation:
"""NovoGrad optimizer.
NovoGrad is more robust to the initial learning rate and
weight initialization than other methods. For example,
NovoGrad works well without LR warm-up, while other methods require it.
NovoGrad performs exceptionally well for large batch training, e.g. it
outperforms other methods for ResNet-50 for all batches up to 32K.
In addition, NovoGrad requires half the memory compared to Adam.
It was introduced together with Jasper ASR model.
References:
Ginsburg et al, 2019: https://arxiv.org/abs/1905.11286
Li et al, 2019: https://arxiv.org/abs/1904.03288
Args:
learning_rate: A fixed global scaling factor.
b1: An exponential decay rate to track the first moment of past gradients.
b2: An exponential decay rate to track the second moment of past gradients.
eps: A small constant applied to denominator outside of the square root (as
in the Adam paper) to avoid dividing by zero when rescaling.
eps_root: A small constant applied to denominator inside
the square root (as in RMSProp), to avoid dividing by zero when rescaling.
This is needed for instance when computing (meta-)gradients through Adam.
weight_decay: Strength of the weight decay regularization.
Returns:
The corresponding `GradientTransformation`.
"""
return combine.chain(
transform.scale_by_novograd(
b1=b1, b2=b2, eps=eps, eps_root=eps_root, weight_decay=weight_decay),
_scale_by_learning_rate(learning_rate),
)
def optimistic_gradient_descent(
learning_rate: ScalarOrSchedule,
alpha: ScalarOrSchedule = 1.0,
beta: ScalarOrSchedule = 1.0
) -> base.GradientTransformation:
"""An Optimistic Gradient Descent optimizer.
Optimistic gradient descent is an approximation of extra-gradient methods
which require multiple gradient calls to compute the next update. It has
strong formal guarantees for last-iterate convergence in min-max games, for
which standard gradient descent can oscillate or even diverge.
References:
Mokhtari et al, 2019: https://arxiv.org/abs/1901.08511v2
Args:
learning_rate: A fixed global scaling factor.
alpha: Coefficient for generalized OGD.
beta: Coefficient for generalized OGD negative momentum.
Returns:
A `GradientTransformation`.
"""
return combine.chain(
transform.scale_by_optimistic_gradient(alpha=alpha, beta=beta),
_scale_by_learning_rate(learning_rate)
)
def radam(
learning_rate: ScalarOrSchedule,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
eps_root: float = 0.0,
threshold: float = 5.0
) -> base.GradientTransformation:
"""The Rectified Adam optimizer.
The adaptive learning rate in Adam has undesirably large variance in early
stages of training, due to the limited number of training samples used to
estimate the optimizer's statistics. Rectified Adam addresses this issue
by analytically reducing the large variance.
References:
Kingma et al, 2014: https://arxiv.org/abs/1412.6980
Args:
learning_rate: A fixed global scaling factor.
b1: Exponential decay rate to track the first moment of past gradients.
b2: Exponential decay rate to track the second moment of past gradients.
eps: A small constant applied to denominator outside of the square root
(as in the Adam paper) to avoid dividing by zero when rescaling.
eps_root: A small constant applied to denominator inside the square root (as
in RMSProp), to avoid dividing by zero when rescaling. This is needed for
instance when computing (meta-)gradients through Adam.
threshold: Threshold for variance tractability.
Returns:
The corresponding `GradientTransformation`.
"""
return combine.chain(
transform.scale_by_radam(
b1=b1, b2=b2, eps=eps, eps_root=eps_root, threshold=threshold),
_scale_by_learning_rate(learning_rate),
)
def rmsprop(
learning_rate: ScalarOrSchedule,
decay: float = 0.9,
eps: float = 1e-8,
initial_scale: float = 0.,
centered: bool = False,
momentum: Optional[float] = None,
nesterov: bool = False
) -> base.GradientTransformation:
# pylint: disable=line-too-long
"""A flexible RMSProp optimizer.
RMSProp is an SGD variant with learning rate adaptation. The `learning_rate`
used for each weight is scaled by a suitable estimate of the magnitude of the
gradients on previous steps. Several variants of RMSProp can be found
in the literature. This alias provides an easy to configure RMSProp
optimizer that can be used to switch between several of these variants.
References:
Tieleman and Hinton, 2012: http://www.cs.toronto.edu/~hinton/coursera/lecture6/lec6.pdf
Graves, 2013: https://arxiv.org/abs/1308.0850
Args:
learning_rate: A fixed global scaling factor.
decay: Decay used to track the magnitude of previous gradients.
eps: A small numerical constant to avoid dividing by zero when rescaling.
initial_scale: Initial value of accumulators tracking the magnitude of
previous updates. PyTorch uses `0`, TF1 uses `1`. When reproducing results
from a paper, verify the value used by the authors.
centered: Whether the second moment or the variance of the past gradients is
used to rescale the latest gradients.
momentum: Decay rate used by the momentum term, when it is set to `None`,
then momentum is not used at all.
nesterov: Whether Nesterov momentum is used.
Returns:
The corresponding `GradientTransformation`.
"""
# pylint: enable=line-too-long
if centered:
return combine.chain(
transform.scale_by_stddev(
decay=decay, eps=eps, initial_scale=initial_scale),
_scale_by_learning_rate(learning_rate),
(transform.trace(decay=momentum, nesterov=nesterov)
if momentum is not None else base.identity())
)
return combine.chain(
transform.scale_by_rms(
decay=decay, eps=eps, initial_scale=initial_scale),
_scale_by_learning_rate(learning_rate),
(transform.trace(decay=momentum, nesterov=nesterov)
if momentum is not None else base.identity())
)
def sgd(
learning_rate: ScalarOrSchedule,
momentum: Optional[float] = None,
nesterov: bool = False,
accumulator_dtype: Optional[Any] = None,
) -> base.GradientTransformation:
"""A canonical Stochastic Gradient Descent optimizer.
This implements stochastic gradient descent. It also includes support for
momentum, and nesterov acceleration, as these are standard practice when
using stochastic gradient descent to train deep neural networks.
References:
Sutskever et al, 2013: http://proceedings.mlr.press/v28/sutskever13.pdf
Args:
learning_rate: A fixed global scaling factor.
momentum: Decay rate used by the momentum term, when it is set to `None`,
then momentum is not used at all.
nesterov: Whether Nesterov momentum is used.
accumulator_dtype: Optional `dtype` to be used for the accumulator; if
`None` then the `dtype` is inferred from `params` and `updates`.
Returns:
A `GradientTransformation`.
"""
return combine.chain(
(transform.trace(decay=momentum, nesterov=nesterov,
accumulator_dtype=accumulator_dtype)
if momentum is not None else base.identity()),
_scale_by_learning_rate(learning_rate)
)
def sm3(
learning_rate: float,
momentum: float = 0.9
) -> base.GradientTransformation:
"""The SM3 optimizer.
SM3 (Square-root of Minima of Sums of Maxima of Squared-gradients Method) is a
memory-efficient adaptive optimizer designed to decrease memory overhead when
training very large models, such as the Transformer for machine translation,
BERT for language modeling, and AmoebaNet-D for image classification. SM3: 1)
applies to tensors of arbitrary dimensions and any predefined cover of the
parameters; 2) adapts the learning rates in an adaptive and data-driven manner
(like Adagrad and unlike Adafactor); and 3) comes with rigorous convergence
guarantees in stochastic convex optimization settings.
References:
Anil et al, 2019: https://arxiv.org/abs/1901.11150
Args:
learning_rate: A fixed global scaling factor.
momentum: Decay rate used by the momentum term (when it is not set to
`None`, then momentum is not used at all).
Returns:
The corresponding `GradientTransformation`.
"""
return combine.chain(
transform.scale_by_sm3(momentum),
transform.scale(-learning_rate),
)
def yogi(
learning_rate: ScalarOrSchedule,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-3,
) -> base.GradientTransformation:
# pylint: disable=line-too-long
"""The Yogi optimizer.
Yogi is an adaptive optimizer, which provides control in tuning the effective
learning rate to prevent it from increasing. By doing so, it focuses on
addressing the issues of convergence and generalization in exponential moving
average-based adaptive methods (such as Adam and RMSprop). Yogi is a
modification of Adam and uses the same parameters.
References:
Zaheer et al, 2018: https://proceedings.neurips.cc/paper/2018/file/90365351ccc7437a1309dc64e4db32a3-Paper.pdf
Args:
learning_rate: A fixed global scaling factor.
b1: Exponential decay rate to track the first moment of past gradients.
b2: Exponential decay rate to track the second moment of past gradients.
eps: A small constant applied to denominator outside of the square root
(as in the Adam paper) to avoid dividing by zero when rescaling.
Returns:
The corresponding `GradientTransformation`.
"""
# pylint: enable=line-too-long
return combine.chain(
transform.scale_by_yogi(b1=b1, b2=b2, eps=eps),
_scale_by_learning_rate(learning_rate),
)
def dpsgd(
learning_rate: ScalarOrSchedule,
l2_norm_clip: float,
noise_multiplier: float,
seed: int,
momentum: Optional[float] = None,
nesterov: bool = False
) -> base.GradientTransformation:
"""The DPSGD optimizer.
Differential privacy is a standard for privacy guarantees of algorithms
learning from aggregate databases including potentially sensitive information.
DPSGD offers protection against a strong adversary with full knowledge of the
training mechanism and access to the model’s parameters.
WARNING: This `GradientTransformation` expects input updates to have a batch
dimension on the 0th axis. That is, this function expects per-example
gradients as input (which are easy to obtain in JAX using `jax.vmap`).
References:
Abadi et al, 2016: https://arxiv.org/abs/1607.00133
Args:
learning_rate: A fixed global scaling factor.
l2_norm_clip: Maximum L2 norm of the per-example gradients.
noise_multiplier: Ratio of standard deviation to the clipping norm.
seed: Initial seed used for the jax.random.PRNGKey
momentum: Decay rate used by the momentum term, when it is set to `None`,
then momentum is not used at all.
nesterov: Whether Nesterov momentum is used.
Returns:
A `GradientTransformation`.
"""
return combine.chain(
privacy.differentially_private_aggregate(
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
seed=seed),
(transform.trace(decay=momentum, nesterov=nesterov)
if momentum is not None else base.identity()),
_scale_by_learning_rate(learning_rate)
)
def adamax(
learning_rate: ScalarOrSchedule,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
) -> base.GradientTransformation:
"""A variant of the Adam optimizer that uses the infinity norm.
References:
Kingma et al, 2014: https://arxiv.org/abs/1412.6980
Args:
learning_rate: A fixed global scaling factor.
b1: Exponential decay rate to track the first moment of past gradients.
b2: Exponential decay rate to track the maximum of past gradients.
eps: A small constant applied to denominator to avoid dividing by zero when
rescaling.
Returns:
The corresponding `GradientTransformation`.
"""
return combine.chain(
transform.scale_by_adamax(b1=b1, b2=b2, eps=eps,),
_scale_by_learning_rate(learning_rate),
)
def adamaxw(
learning_rate: ScalarOrSchedule,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
weight_decay: float = 1e-4,
mask: Optional[Union[Any, Callable[[base.Params], Any]]] = None,
) -> base.GradientTransformation:
"""Adamax with weight decay regularization.
AdamaxW uses weight decay to regularize learning towards small weights, as
this leads to better generalization. In SGD you can also use L2 regularization
to implement this as an additive loss term, however L2 regularization
does not behave as intended for adaptive gradient algorithms such as Adam.
WARNING: Sometimes you may want to skip weight decay for BatchNorm scale or
for the bias parameters. You can use `optax.masked` to make your own AdamaxW
variant where `additive_weight_decay` is applied only to a subset of `params`.
References:
Loshchilov et al, 2019: https://arxiv.org/abs/1711.05101
Args:
learning_rate: A fixed global scaling factor.
b1: Exponential decay rate to track the first moment of past gradients.
b2: Exponential decay rate to track the maximum of past gradients.
eps: A small constant applied to denominator to avoid dividing by zero when
rescaling.
weight_decay: Strength of the weight decay regularization. Note that this
weight decay is multiplied with the learning rate. This is consistent
with other frameworks such as PyTorch, but different from
(Loshchilov et al, 2019) where the weight decay is only multiplied with
the "schedule multiplier", but not the base learning rate.
mask: A tree with same structure as (or a prefix of) the params PyTree,
or a Callable that returns such a pytree given the params/updates.
The leaves should be booleans, `True` for leaves/subtrees you want to
apply the weight decay to, and `False` for those you want to skip. Note
that the Adamax gradient transformations are applied to all parameters.
Returns:
The corresponding `GradientTransformation`.
"""
return combine.chain(
transform.scale_by_adamax(b1=b1, b2=b2, eps=eps),
transform.add_decayed_weights(weight_decay, mask),
_scale_by_learning_rate(learning_rate),
)
| optax-master | optax/_src/alias.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optax._src.numerics."""
import functools
import itertools
import re
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import jax.numpy as jnp
import numpy as np
from optax._src import numerics
_ALL_ORDS = [None, np.inf, -np.inf, 'fro', 'nuc', 0, 1, 2, -2, -2, -1.5, 1.5]
int32_array = lambda i: jnp.array(i, dtype=jnp.int32)
float32_array = lambda i: jnp.array(i, dtype=jnp.float32)
def _invalid_ord_axis_inputs(ord_axis_keepdims):
ord_, axis = ord_axis_keepdims[0], ord_axis_keepdims[1]
return any(((ord_ == 0 and axis is None),
(isinstance(ord_, float) and axis is None),
(isinstance(ord_, str) and axis is not None)))
class NumericsTest(chex.TestCase):
@chex.all_variants
def test_safe_int32_increments(self):
inc_fn = self.variant(numerics.safe_int32_increment)
# increment small numbers correctly.
base = int32_array(3)
incremented = inc_fn(base)
np.testing.assert_array_equal(incremented, int32_array(4))
# avoid overflow when incrementing maxint.
base = int32_array(np.iinfo(np.int32).max)
incremented = inc_fn(base)
np.testing.assert_array_equal(incremented, base)
@chex.all_variants
@parameterized.parameters(
itertools.filterfalse(
_invalid_ord_axis_inputs,
itertools.product(_ALL_ORDS, [None, 0, 1], [False, True])))
def test_safe_norm(self, ord, axis, keepdims): # pylint: disable=redefined-builtin
dnorm_dx = self.variant(
jax.jacfwd(
functools.partial(
numerics.safe_norm, ord=ord, axis=axis, keepdims=keepdims),
argnums=0))
# Test gradient is 0. in 0. when zero min norm is used.
g = dnorm_dx(float32_array(jnp.zeros((3, 4))), float32_array(0.))
np.testing.assert_array_equal(g, jnp.zeros_like(g))
# Test gradient is 0. in 0. when non zero min norm is used.
g = dnorm_dx(float32_array(jnp.zeros((3, 4))), float32_array(3.))
np.testing.assert_array_equal(g, jnp.zeros_like(g))
@chex.all_variants
def test_safe_rms(self):
drms_dx = self.variant(jax.grad(numerics.safe_root_mean_squares))
# Test gradient is 0. in 0. when zero min rms is used.
g = drms_dx(float32_array(0.), float32_array(0.))
np.testing.assert_array_equal(g, jnp.zeros_like(g))
# Test gradient is 0. in 0. when non zero min rms is used.
g = drms_dx(float32_array(0.), float32_array(3.))
np.testing.assert_array_equal(g, jnp.zeros_like(g))
def test_complex_vs_real_abs_sqr(self):
# Tests that JAX generates the same HLO from `numerics.abs_sq`,
# `jnp.square(x)`, `x * x`, and `x**2`.
real_sq_fns = (lambda x: x**2, lambda x: x * x, jnp.square)
def _get_hlo_repr(f, x):
hlo_string = jax.jit(f).lower(x).compiler_ir(dialect='hlo').as_hlo_text()
return re.sub('HloModule.*?\n', '',
re.sub('ENTRY.*?{', 'ENTRY XXXX', hlo_string))
# Real arg (same HLO).
for real_sq_fn in real_sq_fns:
for real_x in (3, 3.0, np.array([4, 5.2])):
self.assertEqual(
_get_hlo_repr(real_sq_fn, real_x),
_get_hlo_repr(numerics.abs_sq, real_x))
# Complex arg (different HLOs).
for real_sq_fn in real_sq_fns:
for complex_x in (1j, 3. + 1j, np.array([4 + 1j, 5.2 + 1j])):
self.assertNotEqual(
_get_hlo_repr(real_sq_fn, complex_x),
_get_hlo_repr(numerics.abs_sq, complex_x))
if __name__ == '__main__':
absltest.main()
| optax-master | optax/_src/numerics_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `schedule.py`."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import jax.numpy as jnp
import numpy as np
from optax._src import clipping
from optax._src import schedule
from optax._src import state_utils
from optax._src import transform
from optax._src import wrappers
class ConstantTest(chex.TestCase):
@chex.all_variants
def test_constant(self):
"""Check constant schedule."""
# Get schedule function.
const_value = 10
num_steps = 15
schedule_fn = self.variant(schedule.constant_schedule(const_value))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(num_steps):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = np.array([const_value] * num_steps, dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
class PolynomialTest(chex.TestCase):
@chex.all_variants
def test_linear(self):
"""Check linear schedule."""
# Get schedule function.
schedule_fn = self.variant(
schedule.polynomial_schedule(
init_value=10., end_value=20., power=1, transition_steps=10))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(15):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = np.array(list(range(10, 20)) + [20] * 5, dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
@chex.all_variants
def test_zero_steps_schedule(self):
# Get schedule function.
initial_value = 10.
end_value = 20.
for num_steps in [-1, 0]:
schedule_fn = self.variant(
schedule.polynomial_schedule(
init_value=initial_value, end_value=end_value,
power=1, transition_steps=num_steps))
for count in range(15):
np.testing.assert_allclose(schedule_fn(count), initial_value)
@chex.all_variants
def test_nonlinear(self):
"""Check non-linear (quadratic) schedule."""
# Get schedule function.
schedule_fn = self.variant(
schedule.polynomial_schedule(
init_value=25., end_value=10., power=2, transition_steps=10))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(15):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = np.array(
[10. + 15. * (1. - n / 10)**2 for n in range(10)] + [10] * 5,
dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
@chex.all_variants
def test_with_decay_begin(self):
"""Check quadratic schedule with non-zero schedule begin."""
# Get schedule function.
schedule_fn = self.variant(
schedule.polynomial_schedule(
init_value=30., end_value=10., power=2,
transition_steps=10, transition_begin=4))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(20):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = np.array(
[30.] * 4 + [10. + 20. * (1. - n / 10)**2 for n in range(10)] +
[10] * 6,
dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
class PiecewiseConstantTest(chex.TestCase):
@chex.all_variants
def test_positive(self):
"""Check piecewise constant schedule of positive values."""
# Get schedule function.
schedule_fn = self.variant(
schedule.piecewise_constant_schedule(0.1, {3: 2., 6: 0.5}))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(10):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = np.array([0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1])
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
@chex.all_variants
def test_negative(self):
"""Check piecewise constant schedule of negative values."""
# Get schedule function.
schedule_fn = self.variant(
schedule.piecewise_constant_schedule(-0.1, {3: 2., 6: 0.5}))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(10):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_vals = -1 * np.array(
[0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1])
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
class ExponentialTest(chex.TestCase):
@chex.all_variants
@parameterized.parameters(False, True)
def test_constant_schedule(self, staircase):
"""Checks constant schedule for exponential decay schedule."""
num_steps = 15
# Get schedule function.
init_value = 1.
schedule_fn = self.variant(
schedule.exponential_decay(
init_value=init_value, transition_steps=num_steps,
decay_rate=1., staircase=staircase))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(num_steps):
generated_vals.append(schedule_fn(count))
expected_vals = np.array([init_value] * num_steps, dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
@chex.all_variants
@parameterized.parameters(False, True)
def test_nonvalid_transition_steps(self, staircase):
"""Checks nonvalid decay steps results in a constant schedule."""
init_value = 1.
for transition_steps in [-1, 0]:
schedule_fn = self.variant(
schedule.exponential_decay(
init_value=init_value, transition_steps=transition_steps,
decay_rate=1., staircase=staircase))
for count in range(15):
np.testing.assert_allclose(schedule_fn(count), init_value)
@chex.all_variants
@parameterized.parameters(False, True)
def test_nonvalid_decay_rate(self, staircase):
"""Checks nonvalid decay steps results in a constant schedule."""
init_value = 1.
schedule_fn = self.variant(
schedule.exponential_decay(
init_value=init_value, transition_steps=2,
decay_rate=0., staircase=staircase))
for count in range(15):
np.testing.assert_allclose(schedule_fn(count), init_value)
@chex.all_variants
@parameterized.parameters((False, 0), (True, 0), (False, 5), (True, 5))
def test_exponential(self, staircase, transition_begin):
"""Checks non-linear (quadratic) schedule."""
# Get schedule function.
init_value = 1.
num_steps = 15
transition_steps = 2
decay_rate = 2.
schedule_fn = self.variant(
schedule.exponential_decay(
init_value=init_value, transition_steps=transition_steps,
decay_rate=decay_rate, transition_begin=transition_begin,
staircase=staircase))
# Test that generated values equal the expected schedule values.
def _staircased(count):
p = count / transition_steps
if staircase:
p = np.floor(p)
return p
generated_vals = []
for count in range(num_steps + transition_begin):
generated_vals.append(schedule_fn(count))
expected_vals = np.array(
[init_value] * transition_begin + [
init_value * np.power(decay_rate, _staircased(count))
for count in range(num_steps)
],
dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
@chex.all_variants
@parameterized.parameters(
(0.2, 0.1, False), (1.0, 0.1, False), (2.0, 3.0, False),
(0.2, 0.1, True), (1.0, 0.1, True), (2.0, 3.0, True))
def test_end_value_with_staircase(self, decay_rate, end_value, staircase):
# Get schedule function.
init_value = 1.
num_steps = 11
transition_steps = 2
transition_begin = 3
schedule_fn = self.variant(
schedule.exponential_decay(
init_value=init_value, transition_steps=transition_steps,
decay_rate=decay_rate, transition_begin=transition_begin,
staircase=staircase, end_value=end_value))
# Test that generated values equal the expected schedule values.
def _staircased(count):
p = count / transition_steps
if staircase:
p = np.floor(p)
return p
generated_vals = []
for count in range(num_steps + transition_begin):
generated_vals.append(schedule_fn(count))
expected_vals = np.array(
[init_value] * transition_begin + [
init_value * np.power(decay_rate, _staircased(count))
for count in range(num_steps)
],
dtype=np.float32)
if decay_rate < 1.0:
expected_vals = np.maximum(expected_vals, end_value)
else:
expected_vals = np.minimum(expected_vals, end_value)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
@chex.all_variants
def test_immutable_count(self):
"""Checks constant schedule for exponential decay schedule."""
num_steps = 5
# Get schedule function.
init_value = 32.
schedule_fn = self.variant(
schedule.exponential_decay(
init_value=init_value, transition_steps=1,
decay_rate=0.5))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(num_steps):
# Jax arrays are read-only in ChexVariantType.WITHOUT_DEVICE.
immutable_count = jnp.array(count, dtype=jnp.float32)
generated_vals.append(schedule_fn(immutable_count))
expected_vals = np.array([32, 16, 8, 4, 2], dtype=np.float32)
np.testing.assert_allclose(
expected_vals, np.array(generated_vals), atol=1e-3)
class CosineDecayTest(chex.TestCase):
@chex.all_variants
def test_decay_count_smaller_count(self):
"""Check cosine schedule decay for the entire training schedule."""
initial_value = 0.1
schedule_fn = self.variant(
schedule.cosine_decay_schedule(initial_value, 10, 0.0))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(10):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_multipliers = np.array(
0.5 + 0.5 * np.cos(
np.pi * np.array(
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])))
np.testing.assert_allclose(
initial_value * expected_multipliers,
np.array(generated_vals), atol=1e-3)
@chex.all_variants
def test_decay_count_greater_count(self):
"""Check cosine schedule decay for a part of the training schedule."""
initial_value = 0.1
schedule_fn = self.variant(
schedule.cosine_decay_schedule(initial_value, 5, 0.0))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(12):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_multipliers = np.array(
0.5 + 0.5 * np.cos(
np.pi * np.array(
[0.0, 0.2, 0.4, 0.6, 0.8, 1., 1., 1., 1., 1., 1., 1.])))
np.testing.assert_allclose(
initial_value * expected_multipliers,
np.array(generated_vals), atol=1e-3)
@chex.all_variants
def test_decay_count_greater_count_with_alpha(self):
"""Check cosine schedule decay for a part of the training schedule."""
# Get schedule function.
initial_value = 0.1
schedule_fn = self.variant(
schedule.cosine_decay_schedule(initial_value, 5, 0.1))
# Test that generated values equal the expected schedule values.
generated_vals = []
for count in range(12):
# Compute next value.
generated_vals.append(schedule_fn(count))
# Test output.
expected_multipliers = np.array(
0.5 + 0.5 * np.cos(
np.pi * np.array(
[0.0, 0.2, 0.4, 0.6, 0.8, 1., 1., 1., 1., 1., 1., 1.])))
expected_multipliers = 0.9 * expected_multipliers + 0.1
np.testing.assert_allclose(
initial_value * expected_multipliers,
np.array(generated_vals), atol=1e-3)
@chex.all_variants
def test_with_exponent(self):
"""Check cosine schedule decay with exponent on."""
schedule_fn = self.variant(
schedule.cosine_decay_schedule(init_value=0.1,
decay_steps=100,
alpha=0.0,
exponent=2))
output = schedule_fn(np.array([0, 10, 50, 75, 100]))
np.testing.assert_allclose(
output,
np.array([0.1, 0.09516553580760956, 0.025, 0.0021446612663567066, 0.0]),
rtol=1e-6, atol=1e-8)
class WarmupCosineDecayTest(chex.TestCase):
@chex.all_variants
@parameterized.named_parameters(
('with end value', 10, 0.5, 1e-4),
('without end value', 5, 3, 0.),)
def test_limits(self, init_value, peak_value, end_value):
"""Check cosine schedule decay for the entire training schedule."""
schedule_fn = self.variant(schedule.warmup_cosine_decay_schedule(
init_value=init_value,
peak_value=peak_value,
warmup_steps=100,
decay_steps=1000,
end_value=end_value,
))
np.testing.assert_allclose(init_value, schedule_fn(0))
np.testing.assert_allclose(peak_value, schedule_fn(100))
np.testing.assert_allclose(end_value, schedule_fn(1000), rtol=1e-3)
@chex.all_variants
def test_with_exponent(self):
"""Check that we get correct results when running with exponent on."""
schedule_fn = self.variant(schedule.warmup_cosine_decay_schedule(
init_value=0.2,
peak_value=1.21,
end_value=-3.0,
warmup_steps=50,
decay_steps=100,
exponent=2))
output = schedule_fn(np.array([0, 10, 50, 75, 100]))
np.testing.assert_allclose(
output, np.array([0.20000004768371582,
0.4020000100135803,
1.2100000381469727,
-1.947500228881836,
-3.000000238418579]),
rtol=1e-6, atol=1e-8
)
class SGDRTest(chex.TestCase):
@chex.all_variants
@parameterized.named_parameters(
('with step decay', 1.6, 0.8, 0.4),
('without step_decay', 1.6, 1.6, 1.6),)
def test_limits(self, lr0, lr1, lr2):
"""Check cosine schedule decay for the entire training schedule."""
lr_kwargs = []
for step, lr in zip([2e3, 3e3, 5e3], [lr0, lr1, lr2]):
lr_kwargs += [dict(decay_steps=int(step), peak_value=lr,
init_value=0, end_value=0.0, warmup_steps=500)]
schedule_fn = self.variant(schedule.sgdr_schedule(lr_kwargs))
np.testing.assert_allclose(lr0, schedule_fn(500))
np.testing.assert_allclose(lr1, schedule_fn(2500))
np.testing.assert_allclose(lr2, schedule_fn(5500))
class PiecewiseInterpolateTest(chex.TestCase):
@chex.all_variants
def test_linear_piecewise(self):
schedule_fn = self.variant(schedule.piecewise_interpolate_schedule(
'linear', 200., {5: 1.5, 10: 0.25}))
generated_vals = [schedule_fn(step) for step in range(13)]
expected_vals = [200., 220., 240., 260., 280., 300., 255., 210., 165.,
120., 75., 75., 75.]
np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3)
@chex.all_variants
def test_cos_piecewise(self):
schedule_fn = self.variant(schedule.piecewise_interpolate_schedule(
'cosine', 400., {5: 1.2, 3: 0.6, 7: 1.}))
generated_vals = [schedule_fn(step) for step in range(9)]
expected_vals = [400., 360., 280., 240., 264., 288., 288., 288., 288.]
np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3)
@chex.all_variants
def test_empty_dict(self):
schedule_fn = self.variant(schedule.piecewise_interpolate_schedule(
'linear', 13., {}))
generated_vals = [schedule_fn(step) for step in range(5)]
expected_vals = [13., 13., 13., 13., 13.]
np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3)
@chex.all_variants
def test_no_dict(self):
schedule_fn = self.variant(schedule.piecewise_interpolate_schedule(
'cosine', 17.))
generated_vals = [schedule_fn(step) for step in range(3)]
expected_vals = [17., 17., 17.]
np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3)
def test_invalid_type(self):
# pytype: disable=wrong-arg-types
with self.assertRaises(ValueError):
schedule.piecewise_interpolate_schedule('linar', 13.)
with self.assertRaises(ValueError):
schedule.piecewise_interpolate_schedule('', 13., {5: 3.})
with self.assertRaises(ValueError):
schedule.piecewise_interpolate_schedule(None, 13., {})
# pytype: enable=wrong-arg-types
def test_invalid_scale(self):
with self.assertRaises(ValueError):
schedule.piecewise_interpolate_schedule('linear', 13., {5: -3})
class OneCycleTest(chex.TestCase):
@chex.all_variants
def test_linear(self):
schedule_fn = self.variant(schedule.linear_onecycle_schedule(
transition_steps=10,
peak_value=1000,
pct_start=0.3,
pct_final=0.7,
div_factor=10.,
final_div_factor=100.))
generated_vals = [schedule_fn(step) for step in range(12)]
expected_vals = [100., 400., 700., 1000., 775., 550., 325., 100., 67.,
34., 1., 1.]
np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3)
@chex.all_variants
def test_cosine(self):
schedule_fn = self.variant(schedule.cosine_onecycle_schedule(
transition_steps=5,
peak_value=1000.,
pct_start=0.4,
div_factor=10.,
final_div_factor=100.))
generated_vals = [schedule_fn(step) for step in range(7)]
expected_vals = [100., 550., 1000., 750.25, 250.75, 1., 1.]
np.testing.assert_allclose(generated_vals, expected_vals, atol=1e-3)
def test_nonpositive_transition_steps(self):
with self.assertRaises(ValueError):
schedule.cosine_onecycle_schedule(transition_steps=0, peak_value=5.)
with self.assertRaises(ValueError):
schedule.linear_onecycle_schedule(transition_steps=0, peak_value=5.)
class InjectHyperparamsTest(chex.TestCase):
"""Tests for the inject_hyperparams wrapper."""
@chex.all_variants
def test_updates(self):
optim = schedule.inject_hyperparams(transform.scale)( # stateless
step_size=schedule.piecewise_constant_schedule(
3.0, {1: 5, 7: 2, 12: 1.5}))
params = [jnp.zeros([], dtype=jnp.float32)]
state = self.variant(optim.init)(params)
# A no-op change, to verify that tree map works.
state = state_utils.tree_map_params(optim, lambda v: v, state)
update_fn = self.variant(optim.update)
expected_step_size = [3.0]*2 + [15.0]*6 + [30.0]*5 + [45.0]*3
grads = [jnp.ones([], dtype=jnp.float32)]
for i in range(15):
updates, state = update_fn(grads, state, params=params)
np.testing.assert_almost_equal(updates[0], expected_step_size[i+1])
@chex.all_variants
def test_hyperparams_state(self):
optim = schedule.inject_hyperparams(transform.trace)( # stateful
decay=schedule.piecewise_constant_schedule(
0.8, {3: 0.5, 9: 1.25}),
nesterov=True)
params = [jnp.zeros([2, 3]) for _ in range(3)]
state = self.variant(optim.init)(params)
update_fn = self.variant(optim.update)
expected_mom = [0.8]*4 + [0.4]*6 + [0.5]*2
grads = jax.tree_util.tree_map(jnp.ones_like, params)
for i in range(12):
np.testing.assert_almost_equal(state.hyperparams['decay'],
expected_mom[i])
_, state = update_fn(grads, state)
np.testing.assert_almost_equal(state.hyperparams['decay'],
expected_mom[-1])
@chex.all_variants
def test_constant_hyperparams(self):
optim = schedule.inject_hyperparams(transform.scale_by_adam)(b1=0., b2=0.)
params = [jnp.zeros([2, 3]) for _ in range(3)]
state = self.variant(optim.init)(params)
update_fn = self.variant(optim.update)
grads = jax.tree_util.tree_map(jnp.ones_like, params)
for _ in range(5):
updates, state = update_fn(grads, state, params)
np.testing.assert_almost_equal(state.hyperparams['b1'], 0.0)
np.testing.assert_almost_equal(state.hyperparams['b2'], 0.0)
np.testing.assert_almost_equal(state.hyperparams['eps'], 1e-8)
np.testing.assert_almost_equal(state.hyperparams['eps_root'], 0.0)
assert 'eps' in state.hyperparams
chex.assert_trees_all_close(updates, grads)
@chex.all_variants
def test_overriding_hyperparam(self):
optim = schedule.inject_hyperparams(clipping.clip_by_global_norm)(0.1)
params = jnp.zeros((3, 5, 7))
state = self.variant(optim.init)(params)
update_fn = self.variant(optim.update)
grads = jnp.ones_like(params)
for i in range(5):
state.hyperparams['max_norm'] = i
updates, state = update_fn(grads, state)
assert np.isclose(jnp.linalg.norm(updates.ravel()), i)
@chex.all_variants
@parameterized.named_parameters(('string', 'mask'), ('list', ['mask']))
def test_static_args(self, static_args):
@functools.partial(schedule.inject_hyperparams, static_args=static_args)
def custom_optim(learning_rate, mask):
return wrappers.masked(transform.scale(-learning_rate), mask)
optim = custom_optim(
0.1, functools.partial(jax.tree_util.tree_map, lambda x: x.ndim > 1))
params = [jnp.ones((1, 2)), jnp.ones(2), jnp.ones((1, 1, 1))]
grads = params
state = self.variant(optim.init)(params)
updates, state = self.variant(optim.update)(grads, state)
expected_updates = jax.tree_util.tree_map(
lambda x: -0.1 * x if x.ndim > 1 else x, grads)
assert set(state.hyperparams.keys()) == {'learning_rate'}, state.hyperparams
chex.assert_trees_all_close(updates, expected_updates)
@chex.all_variants
@parameterized.named_parameters(('one_arg', 'b1'), ('two_arg', ['b1', 'b2']))
def test_numeric_static_args(self, static_args):
optim = schedule.inject_hyperparams(
transform.scale_by_adam, static_args=static_args)(b1=0.9, b2=0.95)
params = [jnp.ones((1, 2)), jnp.ones(2), jnp.ones((1, 1, 1))]
grads = params
state = self.variant(optim.init)(params)
_, state = self.variant(optim.update)(grads, state)
assert not set(state.hyperparams.keys()).intersection(set(static_args))
@chex.all_variants
@parameterized.named_parameters(
('bf16hyp f32param bf16grad', jnp.bfloat16, jnp.float32, jnp.bfloat16),
('bf16hyp f32param f32_grads', jnp.bfloat16, jnp.float32, jnp.float32),
('f32hyp bf16param bf16grad', jnp.float32, jnp.bfloat16, jnp.bfloat16),
('f32hyp f32param bf16grad', jnp.float32, jnp.float32, jnp.bfloat16),
('f32hyp bf16param f32grad', jnp.float32, jnp.bfloat16, jnp.float32),
)
def test_hyperparam_dtypes(self,
hyperparam_dtype,
param_dtype,
grad_dtype):
"""Tests that hyperparam dtype override works as desired."""
optim = schedule.inject_hyperparams(
transform.scale_by_adam,
hyperparam_dtype=hyperparam_dtype)(b1=0.9, b2=0.95)
params = [jnp.ones((1, 2), dtype=param_dtype),
jnp.ones(2, dtype=param_dtype),
jnp.ones((1, 1, 1), dtype=param_dtype)]
grads = jax.tree_map(lambda x: x.astype(grad_dtype), params)
state = self.variant(optim.init)(params)
# Check that the hyperparams are overriden
self.assertEqual(state.hyperparams['b1'].dtype, hyperparam_dtype)
self.assertEqual(state.hyperparams['b2'].dtype, hyperparam_dtype)
_, state = self.variant(optim.update)(grads, state)
self.assertEqual(state.hyperparams['b1'].dtype, hyperparam_dtype)
self.assertEqual(state.hyperparams['b2'].dtype, hyperparam_dtype)
@parameterized.named_parameters(('string', 'lr'), ('list', ['lr']))
def test_static_args_error(self, static_args):
with self.assertRaises(ValueError):
schedule.inject_hyperparams(transform.scale, static_args=static_args)
@chex.all_variants
def test_inject_hyperparams_starts_with_step_count_zero(self):
"""Checks that inject_hyperparams uses step count 0 in the first update."""
# See also: https://github.com/deepmind/optax/issues/415.
opt = schedule.inject_hyperparams(transform.scale)(lambda count: count)
params = jnp.zeros(3)
grads = jnp.array([-1, 0, 1])
updates, _ = self.variant(opt.update)(grads, opt.init(params))
np.testing.assert_array_equal(updates, np.zeros(3))
if __name__ == '__main__':
absltest.main()
| optax-master | optax/_src/schedule_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Flexibly compose gradient transformations."""
from typing import Callable, NamedTuple, Union, Mapping, Hashable
import jax
from optax._src import base
from optax._src import wrappers
def chain(
*args: base.GradientTransformation,
) -> base.GradientTransformationExtraArgs:
"""Applies a list of chainable update transformations.
Given a sequence of chainable transforms, `chain` returns an `init_fn`
that constructs a `state` by concatenating the states of the individual
transforms, and returns an `update_fn` which chains the update transformations
feeding the appropriate state to each.
Args:
*args: a sequence of chainable (init_fn, update_fn) tuples.
Returns:
A ``GradientTransformationExtraArgs``, created by chaining the input
transformations. Note that independent of the argument types, the resulting
transformation always supports extra args. Any extra arguments passed to the
returned transformation will be passed only to those transformations in the
chain that support extra args.
"""
transforms = [base.with_extra_args_support(t) for t in args]
init_fns, update_fns = zip(*transforms)
def init_fn(params):
return tuple(fn(params) for fn in init_fns)
def update_fn(updates, state, params=None, **extra_args):
if len(update_fns) != len(state):
raise ValueError('The number of updates and states has to be the same in '
'chain! Make sure you have called init first!')
new_state = []
for s, fn in zip(state, update_fns):
updates, new_s = fn(updates, s, params, **extra_args)
new_state.append(new_s)
return updates, tuple(new_state)
# We opt to always return the GradientTransformationExtraArgs type here,
# instead of selecting the return type based on the arguments, since it works
# much better with the currently available type checkers. It also means that
# users will not get unexpected signature errors if they remove all of the
# transformations in a chain accepting extra args.
return base.GradientTransformationExtraArgs(init_fn, update_fn)
class MultiTransformState(NamedTuple):
inner_states: Mapping[Hashable, base.OptState]
def multi_transform(
transforms: Mapping[Hashable, base.GradientTransformation],
param_labels: Union[base.PyTree, Callable[[base.PyTree], base.PyTree]]
) -> base.GradientTransformationExtraArgs:
"""Partitions params and applies a different transformation to each subset.
Below is an example where we apply Adam to the weights and SGD to the biases
of a 2-layer neural network::
import optax
import jax
import jax.numpy as jnp
def map_nested_fn(fn):
'''Recursively apply `fn` to the key-value pairs of a nested dict'''
def map_fn(nested_dict):
return {k: (map_fn(v) if isinstance(v, dict) else fn(k, v))
for k, v in nested_dict.items()}
return map_fn
params = {'linear_1': {'w': jnp.zeros((5, 6)), 'b': jnp.zeros(5)},
'linear_2': {'w': jnp.zeros((6, 1)), 'b': jnp.zeros(1)}}
gradients = jax.tree_util.tree_map(jnp.ones_like, params) # dummy gradients
label_fn = map_nested_fn(lambda k, _: k)
tx = optax.multi_transform({'w': optax.adam(1.0), 'b': optax.sgd(1.0)},
label_fn)
state = tx.init(params)
updates, new_state = tx.update(gradients, state, params)
new_params = optax.apply_updates(params, updates)
Instead of providing a ``label_fn``, you may provide a PyTree of labels
directly. Also, this PyTree may be a prefix of the parameters PyTree. This
is demonstrated in the GAN pseudocode below::
generator_params = ...
discriminator_params = ...
all_params = (generator_params, discriminator_params)
param_labels = ('generator', 'discriminator')
tx = optax.multi_transform(
{'generator': optax.adam(0.1), 'discriminator': optax.adam(0.5)},
param_labels)
If you would like to not optimize some parameters, you may wrap
``optax.multi_transform`` with :func:`optax.masked`.
Args:
transforms: A mapping from labels to transformations. Each transformation
will be only be applied to parameters with the same label.
param_labels: A PyTree that is the same shape or a prefix of the
parameters/updates (or a function that returns one given the parameters as
input). The leaves of this PyTree correspond to the keys of the transforms
(therefore the values at the leaves must be a subset of the keys).
Returns:
An ``optax.GradientTransformation``.
"""
transforms = {
k: base.with_extra_args_support(v)
for k, v in transforms.items()
}
def make_mask(labels, group):
return jax.tree_util.tree_map(lambda label: label == group, labels)
def init_fn(params):
labels = param_labels(params) if callable(param_labels) else param_labels
label_set = set(jax.tree_util.tree_leaves(labels))
if not label_set.issubset(transforms.keys()):
raise ValueError('Some parameters have no corresponding transformation.\n'
f'Parameter labels: {list(sorted(label_set))} \n'
f'Transforms keys: {list(sorted(transforms.keys()))} \n')
inner_states = {
group: wrappers.masked(tx, make_mask(labels, group)).init(params)
for group, tx in transforms.items()
}
return MultiTransformState(inner_states)
def update_fn(updates, state, params=None, **extra_args):
labels = param_labels(updates) if callable(param_labels) else param_labels
new_inner_state = {}
for group, tx in transforms.items():
masked_tx = wrappers.masked(tx, make_mask(labels, group))
updates, new_inner_state[group] = masked_tx.update(
updates, state.inner_states[group], params, **extra_args)
return updates, MultiTransformState(new_inner_state)
return base.GradientTransformationExtraArgs(init_fn, update_fn)
| optax-master | optax/_src/combine.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.