python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| eigengame-main | eigengame/examples/synthetic_dataset_pca/__init__.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of eigengame PCA on a trivial dataset."""
import functools
from typing import Callable, Dict, Iterator, Tuple
from absl import app
from absl import flags
import chex
from eigengame import eg_experiment
from eigengame import eg_objectives
from eigengame import eg_utils
from eigengame.examples.synthetic_dataset_pca import data_pipeline
import jax
import jax.numpy as jnp
from jaxline import platform
import ml_collections
FLAGS = flags.FLAGS
class Experiment(eg_experiment.AbstractEigenGameExperiment):
"""Run PCA on low dimensional synthetic data."""
NON_BROADCAST_CHECKPOINT_ATTRS = {
'_covariance': 'covariance',
'_target_eigenvalues': 'target_eigenvalues',
'_target_eigenvectors': 'target_eigenvectors',
**eg_experiment.AbstractEigenGameExperiment.NON_BROADCAST_CHECKPOINT_ATTRS
}
def build_dataset(
self,
dataset_config: ml_collections.ConfigDict,
) -> Iterator[chex.ArrayTree]:
"""Initialize ground truths and returns iterator of samples."""
# Initialize the ground truths
key = jax.random.PRNGKey(dataset_config.seed)
if jax.host_count() > 1:
# In the case of multihost training, we want each host to get a different
# sample.
key = jax.random.split(key, jax.host_count())[jax.host_id()]
(
self._covariance,
self._target_eigenvalues,
self._target_eigenvectors,
) = data_pipeline.get_sharded_ground_truths(
key,
dataset_config.eigenvector_count,
dataset_config.dim,
)
global_batch_size = dataset_config.global_batch_size
per_device_batch_size = global_batch_size // jax.device_count()
def data_iterator(key: chex.PRNGKey):
"""Function to create the iterator which samples from the distribution."""
sample_from_key = jax.pmap(
functools.partial(
data_pipeline.generate_data,
dim=dataset_config.dim,
covariance=self._covariance,
batch_size=per_device_batch_size,
),)
while True:
key, *sharded_keys = jax.random.split(key, jax.local_device_count() + 1)
yield sample_from_key(jnp.asarray(sharded_keys))
# We need a separate function call here, since the otherwise, the
# initialization of the ground truths would be executed the first time
# next() is called instead of when when build_dataset is called.
return data_iterator(key)
def build_preprocess_function(
self,
preprocess_config: ml_collections.ConfigDict,
) -> Callable[[chex.ArrayTree, chex.PRNGKey], chex.ArrayTree]:
"""No need to do any preprocessing."""
return lambda batch, _: batch
@functools.partial(
jax.pmap,
in_axes=0,
out_axes=0,
axis_name='devices',
static_broadcasted_argnums=0,
)
def _eval_similarity(
self,
eigenvectors: chex.Array,
target_vectors: chex.Array,
) -> Tuple[chex.Array, chex.Array]:
"""pmaps the cosine similarity function."""
cosine_similarity = eg_objectives.cosine_similarity(
eigenvectors,
target_vectors,
)
return cosine_similarity # pytype: disable=bad-return-type # numpy-scalars
def evaluate(
self,
global_step: int,
rng: chex.Array,
**unused_kwargs,
) -> Dict[str, chex.Array]:
"""Override the evaluate function to return cosine similarity."""
replicated_cosine_similarity = self._eval_similarity(
self._eigenvectors, self._target_eigenvectors)
cosine_similarities = eg_utils.get_first(replicated_cosine_similarity)
return eg_utils.per_vector_metric_log( # pytype: disable=bad-return-type # numpy-scalars
'cosine_similarity',
cosine_similarities,
)
if __name__ == '__main__':
flags.mark_flag_as_required('config')
app.run(functools.partial(
platform.main, Experiment,
checkpointer_factory=eg_experiment.create_checkpointer))
| eigengame-main | eigengame/examples/synthetic_dataset_pca/experiment.py |
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setuptools installation script."""
from setuptools import find_packages
from setuptools import setup
description = """dm_construction is a set of "Construction" tasks requiring
agents to stack blocks to achieve a goal, engaging compositional and physical
reasoning.
"""
setup(
name="dm_construction",
version="1.0.0.dev",
description="DeepMind Construction tasks",
long_description=description,
author="DeepMind",
license="Apache License, Version 2.0",
keywords=["machine learning"],
url="https://github.com/deepmind/dm_construction",
packages=find_packages(),
# Additional docker requirements should be installed separately (See README)
install_requires=[
"absl-py",
"dm_env",
"dm_env_rpc==1.0.2",
"docker",
"grpcio",
"numpy",
"portpicker",
"scipy",
"setuptools",
"shapely",
],
extras_require={"demos": ["matplotlib", "jupyter"]},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| dm_construction-master | setup.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DeepMind Construction tasks."""
from dm_construction import _environment_factory
ALL_TASKS = _environment_factory.ALL_TASKS
ALL_WRAPPERS = _environment_factory.ALL_WRAPPERS
get_unity_environment = _environment_factory.get_unity_environment
get_task_environment = _environment_factory.get_task_environment
get_wrapped_environment = _environment_factory.get_wrapped_environment
get_environment = _environment_factory.get_environment
| dm_construction-master | dm_construction/__init__.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests the open source construction environments."""
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import dm_construction
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_string("backend", "docker", "")
def _make_random_action(action_spec, observation):
"""Makes a random action given an action spec and observation."""
# Sample the random action.
action = {}
for name, spec in action_spec.items():
if name == "Index":
value = np.random.randint(observation["n_edge"])
elif spec.dtype in (np.int32, np.int64, int):
value = np.random.randint(spec.minimum, spec.maximum + 1)
else:
value = np.random.uniform(spec.minimum, spec.maximum)
action[name] = value
return action
def _random_unroll(env, seed=1234, num_steps=10, difficulty=5,
random_choice_before_reset=False):
"""Take random actions in the given environment."""
np.random.seed(seed)
action_spec = env.action_spec()
if random_choice_before_reset:
np.random.choice([8], p=[1.])
timestep = env.reset(difficulty=difficulty)
trajectory = [timestep]
actions = [None]
for _ in range(num_steps):
if timestep.last():
if random_choice_before_reset:
np.random.choice([8], p=[1.])
timestep = env.reset(difficulty=difficulty)
action = _make_random_action(action_spec, timestep.observation)
timestep = env.step(action)
trajectory.append(timestep)
actions.append(action)
return trajectory, actions
class TestEnvironments(parameterized.TestCase):
def _make_environment(
self, problem_type, curriculum_sample, wrapper_type, backend_type=None):
"""Make the new version of the construction task."""
if backend_type is None:
backend_type = FLAGS.backend
return dm_construction.get_environment(
problem_type,
unity_environment=self._unity_envs[backend_type],
wrapper_type=wrapper_type,
curriculum_sample=curriculum_sample)
@classmethod
def setUpClass(cls):
super(TestEnvironments, cls).setUpClass()
# Construct the unity environment.
cls._unity_envs = {
"docker": dm_construction.get_unity_environment("docker"),
}
@classmethod
def tearDownClass(cls):
super(TestEnvironments, cls).tearDownClass()
for env in cls._unity_envs.values():
env.close()
@parameterized.named_parameters(
("covering", "covering"),
("covering_hard", "covering_hard"),
("connecting", "connecting"),
("silhouette", "silhouette"),
("marble_run", "marble_run"))
def test_discrete_relative_environments_curriculum_sample(self, name):
"""Smoke test for discrete relative wrapper with curriculum_sample=True."""
env = self._make_environment(name, True, "discrete_relative")
_random_unroll(env, difficulty=env.core_env.max_difficulty)
@parameterized.named_parameters(
("covering", "covering"),
("covering_hard", "covering_hard"),
("connecting", "connecting"),
("silhouette", "silhouette"),
("marble_run", "marble_run"))
def test_continuous_absolute_environments_curriculum_sample(self, name):
"""Smoke test for continuous absolute wrapper w/ curriculum_sample=True."""
env = self._make_environment(name, True, "continuous_absolute")
_random_unroll(env, difficulty=env.core_env.max_difficulty)
@parameterized.named_parameters(
("connecting_additional_layer", "connecting", "additional_layer"),
("connecting_mixed_height_targets", "connecting", "mixed_height_targets"),
("silhouette_double_the_targets", "silhouette", "double_the_targets"),)
def test_generalization_modes(self, name, generalization_mode):
"""Smoke test for discrete relative wrapper with curriculum_sample=True."""
env = self._make_environment(name, False, "discrete_relative")
_random_unroll(env, difficulty=generalization_mode)
if __name__ == "__main__":
absltest.main()
| dm_construction-master | dm_construction/environments_test.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Helpers to construct an inner enviromment from a config."""
from dm_construction.environments import connecting
from dm_construction.environments import covering
from dm_construction.environments import marble_run
from dm_construction.environments import silhouette
from dm_construction.unity import docker
from dm_construction.unity import environment
from dm_construction.wrappers import continuous_absolute
from dm_construction.wrappers import discrete_relative
_TASK_CLASSES = {
"connecting": connecting.ConstructionConnecting,
"covering_hard": covering.ConstructionCoveringHard,
"covering": covering.ConstructionCovering,
"marble_run": marble_run.ConstructionMarbleRun,
"silhouette": silhouette.ConstructionSilhouette
}
ALL_TASKS = sorted(_TASK_CLASSES.keys())
_WRAPPER_CLASSES = {
"continuous_absolute": continuous_absolute.ContinuousAbsoluteImageWrapper,
"discrete_relative": discrete_relative.DiscreteRelativeGraphWrapper
}
ALL_WRAPPERS = sorted(_WRAPPER_CLASSES.keys())
_LOADERS = {
"docker": docker.loader,
}
_DEFAULT_LOADER = "docker"
def get_unity_environment(backend=_DEFAULT_LOADER, **config):
return environment.UnityConstructionEnv(loader=_LOADERS[backend], **config)
def get_task_environment(unity_environment, problem_type, **env_kwargs):
"""Returns a configured instance of a task environment."""
env_cls = _TASK_CLASSES[problem_type]
task_env = env_cls(unity_environment=unity_environment, **env_kwargs)
return task_env
def get_wrapped_environment(task_environment, wrapper_type):
"""Wraps the environment with appropriate observations and actions."""
wrapper_cls = _WRAPPER_CLASSES[wrapper_type]
wrapped_environment = wrapper_cls(task_environment)
return wrapped_environment
def get_environment(
problem_type, unity_environment=None, wrapper_type="discrete_relative",
unity_backend=_DEFAULT_LOADER, **env_kwargs):
"""Returns fully configured and wrapped environments."""
if not unity_environment:
unity_environment = get_unity_environment(backend=unity_backend)
task_environment = get_task_environment(
unity_environment, problem_type, **env_kwargs)
agent_environment = get_wrapped_environment(task_environment, wrapper_type)
return agent_environment
| dm_construction-master | dm_construction/_environment_factory.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Constants for environment.py .
"""
import types
BLOCK_SIZE = 36
CONTACT_SIZE = 6
NUM_LAYERS = 4
ID_FEATURE_INDEX = 0
POSITION_FEATURE_SLICE = slice(1, 3)
POSITION_3D_FEATURE_SLICE = slice(1, 4)
POSITION_X_FEATURE_INDEX = POSITION_FEATURE_SLICE.start
POSITION_Y_FEATURE_INDEX = POSITION_FEATURE_SLICE.start + 1
POSITION_Z_FEATURE_INDEX = POSITION_FEATURE_SLICE.start + 2
ORIENTATION_FEATURE_SLICE = slice(4, 6)
COSINE_ANGLE_FEATURE_INDEX = ORIENTATION_FEATURE_SLICE.start
SINE_ANGLE_FEATURE_INDEX = ORIENTATION_FEATURE_SLICE.start + 1
SIZE_FEATURE_SLICE = slice(6, 9)
WIDTH_FEATURE_INDEX = SIZE_FEATURE_SLICE.start
HEIGHT_FEATURE_INDEX = SIZE_FEATURE_SLICE.start + 1
DEPTH_FEATURE_INDEX = SIZE_FEATURE_SLICE.start + 2
COLOR_FEATURE_SLICE = slice(9, 13)
RED_CHANNEL_FEATURE_INDEX = COLOR_FEATURE_SLICE.start
GREEN_CHANNEL_FEATURE_INDEX = COLOR_FEATURE_SLICE.start + 1
BLUE_CHANNEL_FEATURE_INDEX = COLOR_FEATURE_SLICE.start + 2
ALPHA_CHANNEL_FEATURE_INDEX = COLOR_FEATURE_SLICE.start + 3
LINEAR_VELOCITY_FEATURE_SLICE = slice(13, 15)
VELOCITY_X_FEATURE_INDEX = LINEAR_VELOCITY_FEATURE_SLICE.start
VELOCITY_Y_FEATURE_INDEX = LINEAR_VELOCITY_FEATURE_SLICE.start + 1
ANGULAR_VELOCITY_FEATURE_INDEX = 15
PHYSICAL_OBJECT_FEATURE_INDEX = 16
COLLISION_MASK_FEATURE_SLICE = slice(17, 17 + NUM_LAYERS)
DENSITY_FEATURE_INDEX = 21
BOUNCINESS_FEATURE_INDEX = 22
FRICTION_FEATURE_INDEX = 23
LINEAR_DRAG_FEATURE_INDEX = 24
ANGULAR_DRAG_FEATURE_INDEX = 25
FREE_OBJECT_FEATURE_INDEX = 26
GLUEABLE_FEATURE_INDEX = 27
STICKY_FEATURE_INDEX = 28
GLUED_FEATURE_INDEX = 29
REMAINING_GLUE_FEATURE_INDEX = 30
SHAPE_FEATURE_SLICE = slice(31, 34)
IS_BOX_FEATURE_INDEX = SHAPE_FEATURE_SLICE.start
IS_BALL_FEATURE_INDEX = SHAPE_FEATURE_SLICE.start + 1
IS_RAMP_FEATURE_INDEX = SHAPE_FEATURE_SLICE.start + 2
START_TIME_FEATURE_INDEX = 34
COLLISION_COUNT_FEATURE_INDEX = 35
BOX_SHAPE = 0
BALL_SHAPE = 1
RAMP_SHAPE = 2
ACTION_DEFAULTS = types.MappingProxyType(dict(
GravityY=-9.8,
Friction=0.4,
AngularDrag=0.05,
Glueable=1.,
Timestep=0.02,
Density=1.,
Depth=1.,
Shape=BOX_SHAPE,
PhysicalBody=1.,
CollisionMask=1.,
CameraPosY=5.,
CameraHeight=16.,
A=1.,
# Every time we send an action, we have to manually set this to 1.
# The user however is not allowed to set this action.
IsAction=1.,
))
OTHER_ACTION_DEFAULT = 0.0
ADDITIONAL_HELPER_ACTIONS = (
"R", "G", "B", "A", "RGB",
)
class RestoreVerificationError(Exception):
"""Exception to raise if verification of restored observations fails."""
pass
class MetaEnvironmentError(Exception):
"""Exception to raise when the metaenvironment is in a bad state."""
pass
| dm_construction-master | dm_construction/unity/constants.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| dm_construction-master | dm_construction/unity/__init__.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Helper functions for loading the construction tasks through Docker."""
import codecs
import json
import re
import time
from absl import logging
from dm_construction.unity import utils
from dm_env import specs as dm_env_specs
import docker
import grpc
import numpy as np
import portpicker
from dm_env_rpc.v1 import connection as dm_env_rpc_connection
from dm_env_rpc.v1 import dm_env_adaptor
from dm_env_rpc.v1 import dm_env_rpc_pb2
from dm_env_rpc.v1 import error
from dm_env_rpc.v1 import tensor_utils
# Maximum number of times to attempt gRPC connection.
_MAX_CONNECTION_ATTEMPTS = 10
# Port to expect the docker environment to internally listen on.
_DOCKER_INTERNAL_GRPC_PORT = 10000
# The name of the Docker image to pull.
_DEFAULT_DOCKER_IMAGE_NAME = (
"gcr.io/deepmind-environments/dm_construction:v1.0.0")
class _ConstructionEnv(dm_env_adaptor.DmEnvAdaptor):
"""An implementation of dm_env_rpc.DmEnvAdaptor for construction tasks."""
def __init__(self, connection, specs, channel, observations, container):
"""Initialize the docker Unity environment."""
super().__init__(connection, specs, observations, nested_tensors=False)
self._channel = channel
self._observation_names = observations
self._container = container
self._action_names_and_bounds = utils.get_action_names_and_bounds(
self.action_spec())
def close(self):
"""Close the Unity environment."""
if self._container:
super().close()
self._channel.close()
try:
self._container.kill()
except (docker.errors.NotFound, docker.errors.APIError):
pass # Ignore, container has already been closed.
self._container = None
def read_property(self, name):
"""Read a property of the Unity environment."""
properties = self._connection.send(
dm_env_rpc_pb2.ReadPropertyRequest(keys=[name])).properties
return tensor_utils.unpack_tensor(properties[name])
def write_property(self, name, value):
"""Write a property of the Unity environment."""
properties = {name: tensor_utils.pack_tensor(value)}
self._connection.send(
dm_env_rpc_pb2.WritePropertyRequest(properties=properties))
def action_spec(self):
"""Build the action spec based on the underlying environment."""
# Get the list of action names, in order.
raw_actions = self._dm_env_rpc_specs.actions
names = [raw_actions[i].name for i in range(len(raw_actions))]
# Get the inner action spec, which is a dictionary.
inner_action_spec = super().action_spec()
# Convert the dictionary of specs to a single BoundedArray.
minimums = []
maximums = []
dtypes = []
for name in names:
spec = inner_action_spec[name]
minimums.append(spec.minimum)
maximums.append(spec.maximum)
dtypes.append(spec.dtype)
shape = [len(names)]
names = "|".join(names)
dtypes = list(set(dtypes))
assert len(dtypes) == 1
minimums = np.array(minimums, dtype=dtypes[0])
maximums = np.array(maximums, dtype=dtypes[0])
return [dm_env_specs.BoundedArray(
shape=shape, dtype=dtypes[0], name=names, minimum=minimums,
maximum=maximums)]
def observation_spec(self):
"""Build the observation spec based on the underlying environment."""
# Get the inner observation spec, which is a dictionary.
inner_obs_spec = super().observation_spec()
# Convert it to a tuple of specs, in order of the observation names.
flat_spec = []
for name in self._observation_names:
spec = inner_obs_spec[name]
# For numerical specs, make sure they are an array and not a scalar.
if spec.dtype != np.dtype("<U") and not spec.shape:
spec = dm_env_specs.Array(shape=(1,), dtype=spec.dtype, name=spec.name)
flat_spec.append(spec)
return tuple(flat_spec)
def step(self, flat_actions):
"""Step the Unity environment."""
# Convert the action to a dictionary.
action_dict = {}
for name, (lower, upper) in self._action_names_and_bounds:
action = flat_actions[0][lower:upper]
if len(action) == 1:
action = action[0]
action_dict[name] = action
else:
for i in range(len(action)):
action_dict["{}.{}".format(name, i)] = action[i]
# Step the environment.
time_step = super().step(action_dict)
# Pack the observation into a tuple.
flat_observation = []
spec = self.observation_spec()
for i, name in enumerate(self._observation_names):
obs = time_step.observation[name]
if spec[i].dtype == np.float64:
obs = np.array([obs], dtype=spec[i].dtype)
flat_observation.append(obs)
flat_observation = tuple(flat_observation)
return time_step._replace(observation=flat_observation)
def reset(self):
"""Implements dm_env.Environment.reset."""
response = self._connection.send(dm_env_rpc_pb2.ResetRequest())
if self._dm_env_rpc_specs != response.specs:
raise RuntimeError("Environment changed spec after reset")
self._last_state = dm_env_rpc_pb2.EnvironmentStateType.INTERRUPTED
def _check_grpc_channel_ready(channel):
"""Helper function to check the gRPC channel is ready N times."""
for _ in range(_MAX_CONNECTION_ATTEMPTS - 1):
try:
return grpc.channel_ready_future(channel).result(timeout=1)
except grpc.FutureTimeoutError:
pass
return grpc.channel_ready_future(channel).result(timeout=1)
def _can_send_message(connection):
"""Returns if `connection` is healthy and able to process requests."""
try:
# This should return a response with an error unless the server isn't yet
# receiving requests.
connection.send(dm_env_rpc_pb2.StepRequest())
except error.DmEnvRpcError:
return True
except grpc.RpcError:
return False
def _create_channel_and_connection(port):
"""Returns a tuple of `(channel, connection)`."""
for _ in range(_MAX_CONNECTION_ATTEMPTS):
channel = grpc.secure_channel("localhost:{}".format(port),
grpc.local_channel_credentials())
_check_grpc_channel_ready(channel)
connection = dm_env_rpc_connection.Connection(channel)
if _can_send_message(connection):
break
else:
# A gRPC server running within Docker sometimes reports that the channel
# is ready but transitively returns an error (status code 14) on first
# use. Giving the server some time to breath and retrying often fixes the
# problem.
connection.close()
channel.close()
time.sleep(1.0)
return channel, connection
def _parse_exception_message(message):
"""Returns a human-readable version of a dm_env_rpc json error message."""
try:
match = re.match(r"^message\:\ \"(.*)\"$", message)
group = match.group(1) # pytype: disable=attribute-error
json_data = codecs.decode(group, "unicode-escape") # pytype: disable=wrong-arg-types
parsed_json_data = json.loads(json_data)
return ValueError(json.dumps(parsed_json_data, indent=4))
except: # pylint: disable=bare-except
return message
def _wrap_send(send):
"""Wraps `send` in order to reformat exceptions."""
try:
return send()
except ValueError as e:
e.args = [_parse_exception_message(e.args[0])]
raise
def _connect_to_environment(port, settings):
"""Helper function for connecting to a running dm_construction environment."""
channel, connection = _create_channel_and_connection(port)
original_send = connection.send
connection.send = lambda request: _wrap_send(lambda: original_send(request))
all_settings = {
key: tensor_utils.pack_tensor(val) for key, val in settings.items()}
create_settings = {
"levelName": all_settings["levelName"],
"seed": tensor_utils.pack_tensor(0),
"episodeId": tensor_utils.pack_tensor(0)
}
world_name = connection.send(
dm_env_rpc_pb2.CreateWorldRequest(settings=create_settings)).world_name
join_settings = all_settings.copy()
del join_settings["levelName"]
specs = connection.send(
dm_env_rpc_pb2.JoinWorldRequest(
world_name=world_name, settings=join_settings)).specs
return channel, connection, specs
def loader(settings, observations, version, local_path=None):
"""Creates a construction unity environment connecting to docker."""
del version # unused
client = docker.from_env()
port = portpicker.pick_unused_port()
if local_path:
image_name = local_path
else:
image_name = _DEFAULT_DOCKER_IMAGE_NAME
try:
client.images.get(image_name)
except docker.errors.ImageNotFound:
logging.info("Downloading docker image '%s'...", image_name)
client.images.pull(image_name)
logging.info("Download finished.")
container = client.containers.run(
image_name,
auto_remove=True,
detach=True,
ports={_DOCKER_INTERNAL_GRPC_PORT: port})
channel, connection, specs = _connect_to_environment(port, settings)
return _ConstructionEnv(
connection=connection,
specs=specs,
channel=channel,
observations=observations,
container=container)
| dm_construction-master | dm_construction/unity/docker.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utitilies for dealing with the underlying Unity environment."""
def get_action_names_and_bounds(action_spec):
"""Extracts action names and their sizes from a Unity action spec."""
scalar_action_names = action_spec[0].name.split("|")
names_and_bounds = []
bound_0 = 0
while scalar_action_names:
current_name_split = scalar_action_names.pop(0).split(".")
current_name = current_name_split[0]
bound_1 = bound_0 + 1
if len(current_name_split) == 2:
current_name = current_name_split[0]
while scalar_action_names:
next_name = scalar_action_names.pop(0)
if next_name.split(".")[0] == current_name:
bound_1 += 1
else:
scalar_action_names.insert(0, next_name)
break
names_and_bounds.append((current_name, (bound_0, bound_1)))
bound_0 = bound_1
return names_and_bounds
| dm_construction-master | dm_construction/unity/utils.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Wrapper for the true underlying unity meta environment.
The meta environment allows to place blocks in arbitrary positions.
Observation consists of variable size (0 in action spec means variable size)
"Blocks", "Balls" and "Contacts". It also contains a "ElapsedTime"
double containing simulation steps, "SpawnCollisionCount" with the number
of spawned collisions since the last Reset, and "CollisionStop" indicating
whether the last physics simulation ended due to a collision (only if
StopOnCollision is set).
It also includes an "RGB" image from a front ortographic camera with
configurable size and position and a "Segmentation" image containing
a 2d-array with the id of the object at each location. An additional
"ObserverRGB" image can be used for higher resolution 3d rendering.
Block Features:
-[0]: Object id.
-[1/2/3]: Horizontal/Vertical/Depth position.
-[4/5]: Cos(angle)/Sin(angle).
-[6/7/8]: Width/Height/Depth. Width/Height are scaling parameters and can be
negative.
-[9/10/11/12]: R/G/B/A.
-[13/14]: Horizontal/Vertical velocity.
-[15]: Angular velocity.
-[16]: One hot indicator if it is a physical object.
-[17/18/19/20]: Collision mask.
-[21]: Density.
-[22]: Bounciness.
-[23]: Friction.
-[24]: Linear drag.
-[25]: Angular drag.
-[26]: One hot indicator if it is a free object.
-[27]: One hot indicator if it is glueable.
-[28]: One hot indicator if it is sticky.
-[29]: One hot indicator if it is glued (sticky only on the first contact).
-[30]: One hot indicator indicating if the object is still sticky.
-[31/32/33]: One hot indicator of the type of the shape.
-[34]: Simulation time when the block was spawned.
-[35]: Number of collisions that the block has experienced.
Contact Features:
-[0/1]: Object ids of the two blocks involved in the contact.
-[2]: One hot indicator if the contact is glued.
-[3/4/5]: Relative position of the contact point with respect to the parent
object, evaluated when the objects enter the collision.
Actions (all of them float32 scalars, not enforced range in brackets) represent:
-SimulationSteps [0., 10000.]: Number of simulation steps (0.02s) to run.
Rounded to int.
-Delete [0., 1.]: if > 0.5 Removes object with the "SelectId" id or at
"Select" coordinates, if SelectId == 0.
-Reset [0., 1.]: if > 0.5 removes all objects from the scene.
-SpawnBlock [0., 1.]: if > 0.5 spawns one block at the "Set" coordinates.
-SetId [int]: Id of the spawned object. If == 0, a sequential negative Id is
given. If > 0 the value (or next available value) will be used
as Id. If < 0 the value (or previous available value) will be used
as Id.
-SetPosX [-7.5, 7.5]: Horizontal position of the spawned object.
-SetPosY [0., 15.]: Vertical position of the spawned object.
-SetPosZ [-2., 2.]: Depth position of the spawned object.
-SetAngle [-pi, pi]: Angle spawned object.
-SetVelX [-10., 10.]: Horizontal velocity of the spawned moving block.
-SetVelY [-10., 10.]: Vertical velocity of the spawned moving block.
-SetVelAngle [-10., 10.]: Angular velocity of the spawned moving block.
-Shape [0., 1., 2.]: 0: box, 1: ball, 2: ramp.
-Width [-10., 10.]: Width of the spawned object. As it is a scaling parameter,
negative values can be used to mirror the shapes horizontally.
-Height [-10., 10.]: Height of the spawned object, similar to width.
As it is a scaling parameter, negative values can be used to mirror the
shapes vertically. Unused for balls.
-Depth [0.1, 10.]: Depth of the spawned object, similar to width.
Unused for balls. This is just for visualization purposes
and does not affect the physical properties of the bodies.
-PhysicalBody [0., 1.]: if > 0.5, the spawned object is subject to physics,
otherwise it does not interact with other bodies or gravity.
-CollisionMask [0b0000, 0b1111]: Bitmap with collision map, two bodies will
collide if the bitwise "and" is positive ((mask_1 & mask_2) > 0).
-Density [0.001, 10000.]: Density of the created body.
-Bounciness [0., 1.]: Restitution coefficient of created body.
-Friction [0., 1.]: Friction of the material of the created body.
-LinearDrag [0., 100.]: Translation drag of the created body.
-AngularDrag [0., 100.]: Rotation drag of the created body.
-SelectId [int]: Id of selected objects.
-SelectPosX [-7.5, 7.5]: Horizontal position of selected objects.
-SelectPosY [0., 15.]: Vertical position of selected objects.
-RGBA [0., 1.]: Color components of the spawned element. Size 4.
-Glueable [0., 1.]: If 1. the object will be affected by glue.
-Sticky [0., 1.]: if > 0.5, the spawned block is sticky. (Only if Glueable).
-FreeBody [0., 1.]: if > 0.5, the spawned block moves freely, otherwise fixed.
-UseGlue [0., 1.]: if > 0.5, the spawned free block gets glues on the
first collision. (Only if Glueable).
-GravityX/GravityY [-100., 100.]: Vector corresponding to the gravity.
-Timestep [0., 10.]: Duration of each simulation timestep.
-StopOnCollision [0., 1.]: If > 0.5 the simulation will be stop on the first
collision, even if the number of simulation steps have not been reached.
-CameraPosX/CameraPosY [-100., 100.]: Position of the center of the camera view.
-CameraHeight [0., 1000.]: Height of the camera view.
Constants for block features and default actions are provided in constants.py.
"""
import time
from absl import logging
from dm_construction.unity import constants
from dm_construction.unity import utils
import dm_env
from dm_env import specs
import numpy as np
ACTION_DEFAULTS = constants.ACTION_DEFAULTS
OTHER_ACTION_DEFAULT = constants.OTHER_ACTION_DEFAULT
# Update together with new versions that are released.
LAST_PRODUCTION_LABEL = "production_v26"
class UnityConstructionEnv(dm_env.Environment):
"""Wrapper for the raw unity env that deserializes observations/actions."""
def __init__(self, loader, version=None, include_agent_camera=True,
width=128, height=128, include_segmentation_camera=False,
num_segmentation_layers=3, include_observer_camera=False,
observer_width=None, observer_height=None,
observer_3d=False, show_set_cursor=True, show_select_cursor=True,
max_simulation_substeps=0, local_path=None,
max_collisions_per_simulation_step=10):
"""Inits the environment.
Args:
loader: a function that loads the environment, e.g. as a Docker image.
This function should take the following arguments:
- settings: a dictionary of settings for the Unity env
- observations: a list of requested observations
- version: the version to load
- local_path: a path to a local version of the environment
And it should return the loaded Unity environment.
version: Label of the version of the construction mpm to be used. If None,
the latest version of the mpm known to this module and stored in
LAST_PRODUCTION_LABEL will be used.
include_agent_camera: If True, an "RGB" field will contain a camera render
as part fo the observation.
width: Horizontal resolution of the agent camera observation.
height: Vertical resolution of the agent camera observation.
include_segmentation_camera: If True, a "Segmentation" camera will be
provided as part of the observation.
num_segmentation_layers: Maximum number of objects per pixel allowed
in the segmented observation.
include_observer_camera: If True, a separate "ObserverRGB" field will
contain a second camera with potentically different resolution.
observer_width: Horizontal resolution of the observer camera observation.
If None it will default to `width`.
observer_height: Vertical resolution of the observer camera observation.
If None it will default to `height`.
observer_3d: If True, the observer camera will render in 3d.
show_set_cursor: If True, the set cursor will be visible.
show_select_cursor: If True, the select cursor will be visible.
max_simulation_substeps: If 0, the number of "SimulationSteps" will be
executed at once together with the actions for maximum efficiency when
training agents.
If max_simulation_substeps > 0, It will proceed as follows:
1. Store the "SimulationSteps" as pending simulation steps.
2. Apply a first step to the environment with all passed actions except
overriding SimulationSteps to 0.
3. Keep applying environment steps with
SimulationSteps = max_simulation_substeps, until there are no pending
simulation steps. (Depending on rounding the last environment step
may contain less than max_simulation_substeps).
This allows to visualize trajectories in between agent steps by
using an observer via the `add_observer` methods, together with this
option.
local_path: If provided, it will use a local build of the unity
environment.
max_collisions_per_simulation_step: The maximum number of new collisions
that can happen within a single simulation step. A large number of new
collisions occurring in a very short period of time usually indicates
instability in the simulation. A MetaEnvironmentError is raised, and
the environment is reset to an empty safe state.
"""
if version is None:
version = LAST_PRODUCTION_LABEL
if observer_width is None:
observer_width = width
if observer_height is None:
observer_height = height
self._version = version
self._include_agent_camera = include_agent_camera
self._width = width
self._height = height
self._include_segmentation_camera = include_segmentation_camera
self._include_observer_camera = include_observer_camera
self._num_segmentation_layers = num_segmentation_layers
self._observer_width = observer_width
self._observer_height = observer_height
self._observer_3d = observer_3d
self._local_path = local_path
self._show_set_cursor = show_set_cursor
self._show_select_cursor = show_select_cursor
self._max_collisions_per_simulation_step = (
max_collisions_per_simulation_step)
self._load_env(loader)
self._raw_observations_observers = []
self._max_simulation_substeps = max_simulation_substeps
self._action_names_bounds = utils.get_action_names_and_bounds(
self._env.action_spec())
# The "IsAction" action is for internal use only.
self._valid_action_names = [name for name, _ in self._action_names_bounds]
self._valid_action_names.remove("IsAction")
self._valid_action_names += constants.ADDITIONAL_HELPER_ACTIONS
self._observation_spec = self._build_observation_spec()
self._action_spec = self._build_action_spec()
# Empirically we have observed that observations sometimes come empty
# but only the very first time and right after instantiating the
# environment. Sleeping and forcing reset seems to fix it.
time.sleep(1)
self.reset()
time.sleep(1)
def add_observer(self, observer):
"""Adds a raw observation observer.
The observer will be notified for each new observation obtained from the
mpm process. If the `max_simulation_substeps` argument is provided when
instantiating the class, the observer will also be notified of additional
intermediate observations corresponding to dynamics substeps within a
single `step` call.
Args:
observer: A callable that takes as argument an observation.
"""
self._raw_observations_observers.append(observer)
def remove_observer(self, observer):
"""Removes a raw observation observer that was previously added.
Args:
observer: A callable that takes as argument an observation.
"""
self._raw_observations_observers.remove(observer)
def _get_simulation_steps_action(self, previous_action, num_steps=1):
# We want to simply run simulation steps with cursors still pointing to
# the same location used by a previous action, and same gravity/timestep.
action_dict = {"SimulationSteps": float(num_steps)}
actions_to_repeat = ["SelectPosX", "SelectPosY", "SetPosX",
"SetPosY", "GravityX", "GravityY", "Timestep",
"StopOnCollision", "CameraPosX", "CameraPosY",
"CameraHeight"]
for action in actions_to_repeat:
if action in previous_action:
action_dict[action] = previous_action[action]
return self._flatten_action_dict(action_dict)
def _flatten_action_dict(self, action_dict):
for action_name in action_dict:
if action_name not in self._valid_action_names:
raise ValueError("Unrecognized action {}, valid actions are {}."
.format(action_name, self._valid_action_names))
action_dict = _replace_helper_actions(action_dict)
action_values = {
name: action_dict.get(
name, ACTION_DEFAULTS.get(name, OTHER_ACTION_DEFAULT))
for name, _ in self._action_names_bounds}
action_list = [
_prepare_action(name, action_values[name], bound_1-bound_0)
for name, (bound_0, bound_1) in self._action_names_bounds]
flat_actions = (np.concatenate(action_list, axis=0).astype(np.float32),)
return flat_actions
def _get_config_json(self):
config_json = {"levelName": "ConstructionMetaEnvironment",
"ShowSetCursor": self._show_set_cursor,
"ShowSelectCursor": self._show_select_cursor,
"MaxCollisionsPerSimulationStep": (
self._max_collisions_per_simulation_step)}
observations = [
"Blocks", "Contacts", "ElapsedTime",
"SpawnCollisionCount", "CollisionStop", "Message"]
if self._include_agent_camera or self._include_segmentation_camera:
height = self._height
width = self._width
config_json.update({
"AgentCameraWidth": width,
"AgentCameraHeight": height})
if self._include_agent_camera:
observations.append("AgentCamera")
if self._include_segmentation_camera:
config_json.update({
"NumSegmentationLayers": self._num_segmentation_layers})
observations.append("SegmentationCamera")
if self._include_observer_camera:
height = self._observer_height
width = self._observer_width
config_json.update({
"ObserverCameraWidth": width,
"ObserverCameraHeight": height,
"ObserverCamera3D": self._observer_3d})
observations.append("ObserverCamera")
self._obs_to_ind_map = {
obs: index for index, obs in enumerate(observations)}
return config_json, observations
def _load_env(self, loader):
config_json, observations = self._get_config_json()
self._env = loader(
config_json, observations, self._version, local_path=self._local_path)
# Verify that the version is consistent.
version = self._env.read_property("Version")
if version != self._version:
raise ValueError("Wrong version loaded: required `{}`, got `{}`."
.format(self._version, version))
else:
msg = ("Construction meta-environment running at version `%s`." %
version)
if self._local_path:
msg += " (Local build)"
logging.info(msg)
def __del__(self):
if hasattr(self, "_env"):
self._env.close()
def close(self):
return self._env.close()
def hard_reset(self):
# Perform a hard reset of the environment, which does not return anything.
# Then, perform a soft reset so we can actually get a timestep.
self._env.reset()
return self.reset()
def reset(self):
return self.step({"Reset": 1.})
@property
def last_observation(self):
"""Returns the last observation."""
return self._last_observation
def restore_state(self,
observation,
verify_restored_state=True,
verification_threshold_abs=1e-3,
verification_threshold_rel=1e-5,
verify_velocities=True):
"""Restores the environment to the state given by an observation.
Args:
observation: Environment observation.
verify_restored_state: If True, it will verify that the observation
after restoring is consistent with the observation set.
verification_threshold_abs: Maximum absolute difference disagreement
between features in the input observation and the restore observation
allowed when `verify_restored_state==True`.
verification_threshold_rel: Maximum relative difference disagreement
between features in the input observation and the restore observation
allowed when `verify_restored_state==True`.
verify_velocities: If False, the velocities will not be verified. This is
sometimes required in environments that make extensive use of glue,
as the velocities cannot always be set correctly for constraints.
Returns:
A timestep with the first observation after restoring the state. All
fields should be equal to those in the observation passed as argument
(except for numerical precision errors). Camera renders are just copied
from the input observation. As the observation is not informative enough
to tell the placement of the cameras, and also cursors may be in different
positions.
Raises:
ValueError if the shapes or values of the observation after restoring
are different than those being restored.
"""
serialized_blocks = _serialize_array(observation["Blocks"])
serialized_contacts = _serialize_array(observation["Contacts"])
string_spawn_collision_count = "%d" % observation["SpawnCollisionCount"]
string_elapsed_time = "%g" % observation["ElapsedTime"]
string_collision_stop = "%d" % int(observation["CollisionStop"])
empty_markers = "" # For deprecated marker behavior.
serialized_observation = "/".join([
empty_markers, serialized_blocks, serialized_contacts,
string_spawn_collision_count, string_elapsed_time,
string_collision_stop])
self._env.write_property(
"RestoreObservation", serialized_observation)
# We need to send a null action after setting the property with
# the sequence, to run the effects, and get the observation back.
restored_timestep = self._one_step({})
if verify_restored_state:
_verify_restored_observation(
observation,
restored_timestep.observation,
difference_threshold_abs=verification_threshold_abs,
difference_threshold_rel=verification_threshold_rel,
verify_velocities=verify_velocities)
if self._include_agent_camera:
restored_timestep.observation["RGB"] = observation["RGB"].copy()
if self._include_observer_camera:
restored_timestep.observation["ObserverRGB"] = (
observation["ObserverRGB"].copy())
self._last_observation = restored_timestep.observation.copy()
return restored_timestep
def step(self, actions):
"""Applies the actions to the environment.
Args:
actions: Dictionary of actions containing an action set as indicated by
the action spec. Keys that are not specified will take the default
value as contained in ACTION_DEFAULTS. Limits of the actions are not
enforced for additional flexibility. Optionally, a list of dictionaries
may be passed instead, in which case the entire sequence of action sets
will be sent and processed by the unity backend in a single interaction,
speeding up the execution of the sequence.
Returns:
TimeStep with the final observation resulting from applying the action
set, or the entire action set list.
Raises:
ValueError: if the actions are not a dictionary or a list.
MetaEnvironmentError: if an error occurs in the underlying Unity
environment.
"""
if isinstance(actions, list):
return self._multiple_steps(actions)
elif isinstance(actions, dict):
return self._one_step(actions)
else:
raise ValueError("Unrecognized action type {}, should be a list or a dict"
.format(type(actions)))
def _multiple_steps(self, action_dict_list):
if action_dict_list:
# If we are storing timesteps, we actually actions one by one.
if self._max_simulation_substeps > 0:
for action_dict in action_dict_list:
time_step = self._one_step(action_dict)
return time_step
# Otherwise, we pack all of the actions, and send them as one.
flat_actions_list = [self._flatten_action_dict(action_dict)[0]
for action_dict in action_dict_list]
serialized_action_sequence = _serialize_array(flat_actions_list)
self._env.write_property(
"ChainedActionSequence", serialized_action_sequence)
# We need to send a null action after setting the property with
# the sequence, to run the effects, and get the observation back.
# If the list is empty this will just return the observation with the
# current state.
return self._one_step({})
def _one_step(self, action_dict):
# If we want to explicitly run simulation steps, we set them to 0, and
# run them later in a loop.
if self._max_simulation_substeps:
action_dict = action_dict.copy()
num_pending_simulation_steps = action_dict.get("SimulationSteps", 0)
num_pending_simulation_steps = int(round(num_pending_simulation_steps))
action_dict["SimulationSteps"] = 0.
else:
num_pending_simulation_steps = 0
time_step = self._process_and_store_timestep(
self._env.step(self._flatten_action_dict(action_dict)))
# We simulate exactly `num_extra_simulation_steps` by sending multiple
# environment steps, each with not more than self._explicit_simulation_steps
if num_pending_simulation_steps:
while (num_pending_simulation_steps > 0 and
not time_step.observation["CollisionStop"]):
if num_pending_simulation_steps >= self._max_simulation_substeps:
num_substeps = self._max_simulation_substeps
num_pending_simulation_steps -= self._max_simulation_substeps
else:
num_substeps = num_pending_simulation_steps
num_pending_simulation_steps = 0
time_step = self._process_and_store_timestep(
self._env.step(self._get_simulation_steps_action(
action_dict, num_substeps)))
return time_step
def _build_action_spec(self):
# Separate each of the actions into a dictionary.
flat_action_spec = self._env.action_spec()[0]
action_spec = {}
for name, (bound_0, bound_1) in self._action_names_bounds:
size = bound_1 - bound_0
if size <= 1:
shape = []
index = bound_0
else:
shape = [size]
index = slice(bound_0, bound_1)
action_spec[name] = specs.BoundedArray(
shape, dtype=np.float32,
minimum=flat_action_spec.minimum[index],
maximum=flat_action_spec.maximum[index])
del action_spec["IsAction"]
return action_spec
def action_spec(self, *args, **kwargs):
return self._action_spec
def _build_observation_spec(self):
parent_observation_spec = self._env.observation_spec()
observation_spec = {
"Blocks": specs.Array(
[0, constants.BLOCK_SIZE], dtype=np.float32, name="Blocks"),
"Contacts": specs.Array(
[0, constants.CONTACT_SIZE], dtype=np.float32, name="Contacts"),
"ElapsedTime": specs.Array(
(), dtype=np.float32,
name="ElapsedTime"),
"SpawnCollisionCount": specs.Array(
(), dtype=np.int32,
name="SpawnCollisionCount"),
"CollisionStop": specs.Array(
(), dtype=np.bool,
name="SpawnCollisionCount")
}
if self._include_agent_camera:
observation_spec["RGB"] = parent_observation_spec[
self._obs_to_ind_map["AgentCamera"]]
if self._include_observer_camera:
observation_spec["ObserverRGB"] = parent_observation_spec[
self._obs_to_ind_map["ObserverCamera"]]
if self._include_segmentation_camera:
raw_spec = parent_observation_spec[
self._obs_to_ind_map["SegmentationCamera"]]
observation_spec["Segmentation"] = specs.Array(
raw_spec.shape, dtype=np.int32, name="Segmentation")
return observation_spec
def observation_spec(self, *args, **kwargs):
return self._observation_spec
def _process_message(self, message):
messages = message.split(";")
for message in messages:
if not message:
continue
if message.startswith("E:"):
raise constants.MetaEnvironmentError(message)
else:
logging.info(message)
def _process_and_store_timestep(self, time_step):
"""Deserialize string observations into arrays, removing ignored ones."""
blocks = _deserialize_array(
time_step.observation[self._obs_to_ind_map["Blocks"]],
expected_size=constants.BLOCK_SIZE)
contacts = _deserialize_array(
time_step.observation[self._obs_to_ind_map["Contacts"]],
expected_size=constants.CONTACT_SIZE)
new_observation = {
"Blocks": blocks,
"Contacts": contacts,
"ElapsedTime": time_step.observation[
self._obs_to_ind_map["ElapsedTime"]][0].astype(np.float32),
"SpawnCollisionCount": np.array(
round(time_step.observation[
self._obs_to_ind_map["SpawnCollisionCount"]][0]),
dtype=np.int32),
"CollisionStop": np.array(
round(time_step.observation[
self._obs_to_ind_map["CollisionStop"]][0]), dtype=np.bool)}
if self._include_agent_camera:
new_observation["RGB"] = time_step.observation[
self._obs_to_ind_map["AgentCamera"]]
if self._include_observer_camera:
new_observation["ObserverRGB"] = time_step.observation[
self._obs_to_ind_map["ObserverCamera"]]
if self._include_segmentation_camera:
new_observation["Segmentation"] = time_step.observation[
self._obs_to_ind_map["SegmentationCamera"]].astype(np.int32)
message = str(time_step.observation[self._obs_to_ind_map["Message"]])
self._process_message(message)
for observer in self._raw_observations_observers:
observer(new_observation.copy())
self._last_observation = new_observation.copy()
return time_step._replace(observation=new_observation,
discount=np.array(time_step.discount,
dtype=np.float32))
def block_to_actions(block, delete_existing=False):
"""Converts a block vector representation into actions that create the block.
The idea here is that a block with the properties of `block` will be created
when the returned actions are executed in the unity environment.
Note that if delete_existing=False, and an object already exists with that id,
the actions will still create an object, but it will have a different id than
the one specified in `block`.
Args:
block: a vector of block properties
delete_existing: whether to delete an existing block with the given id
Returns:
action: a dictionary of actions to create the block
"""
action = {
"SpawnBlock": 1.,
"SetId": block[constants.ID_FEATURE_INDEX],
"SetPosX": block[constants.POSITION_X_FEATURE_INDEX],
"SetPosY": block[constants.POSITION_Y_FEATURE_INDEX],
"SetPosZ": block[constants.POSITION_Z_FEATURE_INDEX],
"SetAngle": np.arctan2(block[constants.SINE_ANGLE_FEATURE_INDEX],
block[constants.COSINE_ANGLE_FEATURE_INDEX]),
"Width": block[constants.WIDTH_FEATURE_INDEX],
"Height": block[constants.HEIGHT_FEATURE_INDEX],
"Depth": block[constants.DEPTH_FEATURE_INDEX],
"RGBA": np.asarray([block[constants.RED_CHANNEL_FEATURE_INDEX],
block[constants.GREEN_CHANNEL_FEATURE_INDEX],
block[constants.BLUE_CHANNEL_FEATURE_INDEX],
block[constants.ALPHA_CHANNEL_FEATURE_INDEX]]),
"SetVelX": block[constants.VELOCITY_X_FEATURE_INDEX],
"SetVelY": block[constants.VELOCITY_Y_FEATURE_INDEX],
"SetVelAngle": block[constants.ANGULAR_VELOCITY_FEATURE_INDEX],
"PhysicalBody": block[constants.PHYSICAL_OBJECT_FEATURE_INDEX],
"CollisionMask": (block[constants.COLLISION_MASK_FEATURE_SLICE]*
np.power(2, np.arange(constants.NUM_LAYERS))).sum(),
"Density": block[constants.DENSITY_FEATURE_INDEX],
"Bounciness": block[constants.BOUNCINESS_FEATURE_INDEX],
"Friction": block[constants.FRICTION_FEATURE_INDEX],
"LinearDrag": block[constants.LINEAR_DRAG_FEATURE_INDEX],
"AngularDrag": block[constants.ANGULAR_DRAG_FEATURE_INDEX],
"FreeBody": block[constants.FREE_OBJECT_FEATURE_INDEX],
"Glueable": block[constants.GLUEABLE_FEATURE_INDEX],
"Sticky": block[constants.STICKY_FEATURE_INDEX],
"UseGlue": block[constants.GLUED_FEATURE_INDEX],
"Shape": np.argmax(block[constants.SHAPE_FEATURE_SLICE]),
}
if delete_existing:
action["Delete"] = 1.
action["SelectId"] = action["SetId"]
return action
def _deserialize_array(string, expected_size=2, dtype=np.float32):
if not string:
return np.zeros([0, expected_size], dtype=dtype)
return np.array(
[[float(item_element)
for item_element in item.split(",")]
for item in str(string).split(";")],
dtype=dtype)
def _serialize_array(array):
return ";".join([",".join(["%g" % e for e in row]) for row in array])
def _verify_restored_observation(
input_observation, restored_observation,
difference_threshold_abs=1e-3, difference_threshold_rel=1e-5,
verify_velocities=True):
"""Verifies if a restored observation is equal to an input observation."""
observation_names = list(input_observation.keys())
error_messages = []
for observation_name in observation_names:
# We ignore cameras, as they are just copied from the inputs.
if observation_name in ["RGB", "ObserverRGB"]:
continue
input_ = input_observation[observation_name]
restored = restored_observation[observation_name]
# This can happen if there are a different number of contacts.
if input_.shape != restored.shape:
error_messages.append(
"Shape for the restored observation {} is different than the shape "
"for the input observation {} for observation `{}`."
.format(restored.shape, input_.shape, observation_name))
continue
if not input_.size:
continue
target = input_.copy().astype(np.float32)
comparison = restored.copy().astype(np.float32)
if not verify_velocities and observation_name == "Blocks":
idx = [
constants.VELOCITY_X_FEATURE_INDEX,
constants.VELOCITY_Y_FEATURE_INDEX,
constants.ANGULAR_VELOCITY_FEATURE_INDEX
]
target[:, idx] = 0
comparison[:, idx] = 0
threshold = (
difference_threshold_abs + difference_threshold_rel * np.abs(target))
too_far = np.abs(target - comparison) > threshold
if too_far.any():
difference = np.abs(target - comparison) * too_far
if difference.shape:
max_diff_index = np.unravel_index(
np.argmax(difference), difference.shape)
max_difference = difference[max_diff_index]
difference_threshold = threshold[max_diff_index]
input_value = input_[max_diff_index]
else:
max_diff_index = None
max_difference = difference
difference_threshold = threshold
input_value = input_
error_messages.append(
"Feature at index {} of `{}` observation with shape {} differs by "
"{} (more than {}) from the input observation with value {}."
.format(max_diff_index, observation_name, input_.shape,
max_difference, difference_threshold, input_value))
if error_messages:
raise constants.RestoreVerificationError("\n".join(error_messages))
def _prepare_action(name, value, size):
"""Adds a leading axis to scalars and verifies the size."""
value = np.asarray(value)
if not value.shape:
value = value[np.newaxis]
if value.shape[0] != size:
raise ValueError("Invalid size value for %s, expected %d, got %d"%
(name, size, value.shape[0]))
return value
def _verify_mutually_exclusive_actions(
action_dict, action_name, invalid_action_names):
for other_name in invalid_action_names:
if other_name in action_dict:
raise ValueError("Got %s action, but %d was already provided" %
(action_name, other_name))
def _replace_helper_actions(action_dict):
"""Replaces helper actions by the corresponding actions."""
_replace_color_helper_actions(action_dict)
return action_dict
def _replace_color_helper_actions(action_dict):
"""Replaces all color-related helper actions ensuring on RGBA is left."""
if "RGBA" in action_dict:
_verify_mutually_exclusive_actions(
action_dict, "RGBA", ["RGB", "R", "G", "B", "A"])
else:
if "A" in action_dict:
alpha = action_dict["A"]
del action_dict["A"]
else:
alpha = ACTION_DEFAULTS.get("A", OTHER_ACTION_DEFAULT)
if "RGB" in action_dict:
_verify_mutually_exclusive_actions(action_dict, "RGB", ["R", "G", "B"])
action_dict["RGBA"] = np.concatenate(
[action_dict["RGB"], np.asarray(alpha)[None]], axis=0)
del action_dict["RGB"]
else:
channel_values = []
for channel in ["R", "G", "B"]:
if channel in action_dict:
value = action_dict[channel]
del action_dict[channel]
else:
value = ACTION_DEFAULTS.get(channel, OTHER_ACTION_DEFAULT)
channel_values.append(value)
channel_values.append(alpha)
action_dict["RGBA"] = np.stack(channel_values, axis=0)
return action_dict
| dm_construction-master | dm_construction/unity/environment.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for unity.environment."""
from absl import flags
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
from dm_construction.unity import constants
from dm_construction.unity import docker
from dm_construction.unity import environment as unity_environment
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_boolean("skip_local", False, "")
flags.DEFINE_boolean("skip_mpm", False, "")
flags.DEFINE_string("local_path", None, "")
flags.DEFINE_string("loader", "docker", "")
FRAME_WIDTH = 30
FRAME_HEIGHT = 40
OBSERVER_WIDTH = 60
OBSERVER_HEIGHT = 80
_LOADERS = {
"docker": docker.loader,
}
# To make the run tests faster, we are going to have a single test that runs
# everything loading the meta environment only once.
def print_func(fn):
"""Prints a method name function before calling it."""
fn_name = fn.__name__
def decorated_fn(*args, **kwargs):
logging.info(fn_name)
output = fn(*args, **kwargs)
logging.info("%s: done", fn_name)
return output
return decorated_fn
class CoreTest(parameterized.TestCase):
def _get_local_path(self):
if FLAGS.local_path is not None:
return FLAGS.local_path
else:
raise ValueError("local path is not defined")
def _get_loader(self):
return _LOADERS[FLAGS.loader]
@print_func
def _create_environment_features(self, is_local):
kwargs = {}
if is_local:
kwargs["local_path"] = self._get_local_path()
return unity_environment.UnityConstructionEnv(
loader=self._get_loader(), include_agent_camera=False, **kwargs)
@print_func
def _create_environment_pixels(self, is_local):
kwargs = {}
if is_local:
kwargs["local_path"] = self._get_local_path()
return unity_environment.UnityConstructionEnv(
loader=self._get_loader(),
include_agent_camera=True,
width=FRAME_WIDTH,
height=FRAME_HEIGHT,
**kwargs)
@print_func
def _create_environment_video(self, is_local):
kwargs = {}
if is_local:
kwargs["local_path"] = self._get_local_path()
return unity_environment.UnityConstructionEnv(
loader=self._get_loader(),
include_agent_camera=True,
width=FRAME_WIDTH,
height=FRAME_HEIGHT,
include_observer_camera=True,
observer_3d=True,
observer_width=OBSERVER_WIDTH,
observer_height=OBSERVER_HEIGHT,
max_simulation_substeps=20,
**kwargs)
@parameterized.named_parameters(
("LocalPath", True),
("MPM", False),
)
def test_meta_environment(self, use_local_path):
if FLAGS.skip_local and use_local_path:
logging.info("Skipping local test")
return
if FLAGS.skip_mpm and not use_local_path:
logging.info("Skipping mpm test")
return
self._unity_env_features = self._create_environment_features(use_local_path)
self._unity_env_pixels = self._create_environment_pixels(use_local_path)
self._unity_env_video = self._create_environment_video(use_local_path)
# Test for specific features.
self._stop_on_collision_feature_test(self._unity_env_features)
self._collision_masks_test(self._unity_env_features)
self._spawn_collision_test(self._unity_env_features)
# Test restoration of states.
self._restore_test(self._unity_env_features)
# Test that multiple modes give same results.
actions_setup, actions_dynamics = self._stop_on_collision_actions()
self._different_types_test(actions_setup + actions_dynamics)
self._action_list_test(actions_setup + actions_dynamics)
self._verify_restored_observation_test()
self._unity_env_features.close()
self._unity_env_pixels.close()
self._unity_env_video.close()
def _rollout_environment(self, environment, actions_list,
send_actions_as_list=False):
reset_observation = environment.reset().observation
if send_actions_as_list:
return [reset_observation,
environment.step(actions_list).observation]
else:
return ([reset_observation] +
[environment.step(action).observation
for action in actions_list])
@print_func
def _different_types_test(self, actions_list):
# Verify that observation sequence is consistent across environment modes.
observations_1 = self._rollout_environment(
self._unity_env_features, actions_list)
observations_2 = self._rollout_environment(
self._unity_env_pixels, actions_list)
observations_3 = self._rollout_environment(
self._unity_env_video, actions_list)
for obs_1, obs_2, obs_3 in zip(observations_1, observations_2,
observations_3):
unity_environment._verify_restored_observation(obs_1, obs_2)
unity_environment._verify_restored_observation(obs_1, obs_3)
@print_func
def _action_list_test(self, actions_list):
# Verify that final observation is the same regardless whether actions
# were sent one by one, or as a single list.
observations_1 = self._rollout_environment(
self._unity_env_features, actions_list, send_actions_as_list=False)
observations_2 = self._rollout_environment(
self._unity_env_pixels, actions_list, send_actions_as_list=False)
observations_3 = self._rollout_environment(
self._unity_env_video, actions_list, send_actions_as_list=False)
final_observation_1 = self._rollout_environment(
self._unity_env_features, actions_list, send_actions_as_list=True)[-1]
final_observation_2 = self._rollout_environment(
self._unity_env_pixels, actions_list, send_actions_as_list=True)[-1]
final_observation_3 = self._rollout_environment(
self._unity_env_video, actions_list, send_actions_as_list=True)[-1]
unity_environment._verify_restored_observation(
observations_1[-1], final_observation_1)
unity_environment._verify_restored_observation(
observations_2[-1], final_observation_2)
unity_environment._verify_restored_observation(
observations_3[-1], final_observation_3)
@print_func
def _collision_masks_test(self, unity_env):
"""Compares final position of the blocks after falling."""
actions = self._collision_masks_actions()
unity_env.reset()
for action in actions:
observation = unity_env.step(action).observation
expected_y_coordinates = (0., 2.5, 5.0, 7.5, 8.114995,
5.61499691, 3.11499906, 0.61499971)
self.assertEqual(8, observation["Blocks"].shape[0])
delta = 1e-4
for block_i in range(8):
self.assertAlmostEqual(
expected_y_coordinates[block_i],
observation["Blocks"][block_i, constants.POSITION_Y_FEATURE_INDEX],
delta=delta)
@print_func
def _restore_test(self, unity_env):
"""Compares final position of the blocks after falling."""
actions = self._multiple_balls_actions()
unity_env.reset()
observation_sequence = []
for action in actions:
observation = unity_env.step(action).observation
observation_sequence.append(observation)
# Restore the state from each of the observations, and compare the
# observation for all subsequent steps.
for restore_index, initial_observation in enumerate(observation_sequence):
unity_env.reset()
obs_restored = unity_env.restore_state(initial_observation)
restored_observations = [obs_restored.observation]
extra_actions = actions[restore_index+1:]
restored_observations += [unity_env.step(action).observation
for action in extra_actions]
for restored_observation, original_observation in zip(
restored_observations, observation_sequence[restore_index:]):
unity_environment._verify_restored_observation(
original_observation, restored_observation)
@print_func
def _stop_on_collision_feature_test(self, unity_env):
"""Compares the set of collisions."""
actions_setup, actions_dynamics = self._stop_on_collision_actions()
unity_env.reset()
for action in actions_setup:
observation = unity_env.step(action).observation
observation_sequence = (
[observation] +
[unity_env.step(action).observation for action in actions_dynamics])
expected_x_coordinates = [
(7.5, -7.5, -4.0, 0.0),
(7.5, -7.5, -0.489688, 0.0896859),
(7.5, -7.5, -0.489688, 7.11094),
(7.5, -7.5, -0.57874, -9.19681e-09),
(7.5, -7.5, -7.12189, -9.19681e-09),
(7.5, -7.5, -0.489688, 0.0677929),
(7.5, -7.5, -0.489688, 7.13284),
]
expected_y_coordinates = [
(5.0, 5.0, 5.5, 5.5),
(5.0, 5.0, 5.5, 5.5),
(5.0, 5.0, 5.5, 5.5),
(5.0, 5.0, 5.5, 5.5),
(5.0, 5.0, 5.5, 5.5),
(5.0, 5.0, 5.5, 5.5),
(5.0, 5.0, 5.5, 5.5),
]
expected_num_collisions = [
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 1.0, 1.0),
(1.0, 0.0, 1.0, 2.0),
(1.0, 0.0, 2.0, 3.0),
(1.0, 1.0, 3.0, 3.0),
(1.0, 1.0, 4.0, 4.0),
(2.0, 1.0, 4.0, 5.0),
]
delta = 1e-4
for time, observation in enumerate(observation_sequence):
self.assertEqual(4, observation["Blocks"].shape[0])
for block_i in range(4):
self.assertAlmostEqual(
expected_x_coordinates[time][block_i],
observation["Blocks"][block_i, constants.POSITION_X_FEATURE_INDEX],
delta=delta)
self.assertAlmostEqual(
expected_y_coordinates[time][block_i],
observation["Blocks"][block_i, constants.POSITION_Y_FEATURE_INDEX],
delta=delta)
self.assertAlmostEqual(
expected_num_collisions[time][block_i],
observation["Blocks"][
block_i, constants.COLLISION_COUNT_FEATURE_INDEX],
delta=delta)
@print_func
def _spawn_collision_test(self, unity_env):
"""Compares final position of the blocks after falling."""
# Remove the final action, since we do not want to run the physics.
actions = self._collision_masks_actions()[:-1]
# We know there are objects at y = 10 and x = -3, -1, 1, 3, with collision
# masks 0b0001, 0b0010, 0b0100 and 0b1000.
x_positions = [-3, -1, 1, 3]
shared_action = {
"SpawnBlock": 1.0, "Width": 0.5, "Height": 0.5, "SetPosY": 10.0,
"FreeBody": 1}
# Try adding objects that do should not cause an spawn collision.
masks_no_collisions = [0b1110, 0b1101, 0b1011, 0b0111]
masks_collisions = [0b0001, 0b0010, 0b0100, 0b1000]
for expected_spawn_collision_count, masks in [(0, masks_no_collisions),
(1, masks_collisions)]:
for x_pos, mask in zip(x_positions, masks):
self._rollout_environment(
unity_env, actions, send_actions_as_list=True)
action = shared_action.copy()
action.update({"SetPosX": x_pos, "CollisionMask": mask})
new_observation = unity_env.step(action).observation
self.assertEqual(expected_spawn_collision_count,
new_observation["SpawnCollisionCount"])
@print_func
def _verify_restored_observation_test(self):
unity_env = self._unity_env_features
actions = self._collision_masks_actions()
unity_env.reset()
observation = unity_env.step(actions).observation
unity_environment._verify_restored_observation(
observation, observation, difference_threshold_abs=1e-9)
for observation_name in observation.keys():
if observation_name in ["RGB", "ObserverRGB"]:
continue
observation_item = observation[observation_name]
observation_item_flat = observation_item.flatten().copy()
if not observation_item_flat.size:
continue
if observation_item.dtype == np.bool:
observation_item_flat[0] = not observation_item_flat[0]
elif observation_item.dtype == np.int32:
observation_item_flat[0] += 1
elif observation_item.dtype == np.float32:
observation_item_flat[0] += 5e-4
else:
raise ValueError("Unknown observation type.")
bad_observation_item = np.reshape(
observation_item_flat, observation_item.shape)
bad_observation = observation.copy()
bad_observation[observation_name] = bad_observation_item
if observation_item.dtype == np.float32:
# This should not fail, since it is under the threshold.
unity_environment._verify_restored_observation(
observation, bad_observation, difference_threshold_abs=1e-3)
with self.assertRaisesRegex(
constants.RestoreVerificationError,
"`%s` observation with shape" % observation_name):
unity_environment._verify_restored_observation(
observation, bad_observation, difference_threshold_abs=1e-4)
# Check that verify_velocities == False does ignore block velocities.
bad_observation = observation.copy()
bad_observation["Blocks"] = observation["Blocks"].copy()
bad_observation["Blocks"][:, constants.VELOCITY_X_FEATURE_INDEX] = 1000
bad_observation["Blocks"][:, constants.VELOCITY_Y_FEATURE_INDEX] = 1000
bad_observation["Blocks"][
:, constants.ANGULAR_VELOCITY_FEATURE_INDEX] = 1000
unity_environment._verify_restored_observation(
observation, bad_observation, difference_threshold_abs=1e-9,
verify_velocities=False)
with self.assertRaisesRegex(constants.RestoreVerificationError,
"`Blocks` observation with shape"):
unity_environment._verify_restored_observation(
observation, bad_observation, difference_threshold_abs=100.,
verify_velocities=True)
def _collision_masks_actions(self):
"""Generates 4 floors and 4 blocks falling with custom collision masks."""
actions = [
{"SpawnBlock": 1.0, "SetPosY": 0.0, "Width": 50.0,
"Height": 0.2, "R": 0.2, "CollisionMask": 0b1111},
{"SpawnBlock": 1.0, "SetPosY": 2.5, "Width": 50.0,
"Height": 0.2, "R": 0.2, "CollisionMask": 0b0111},
{"SpawnBlock": 1.0, "SetPosY": 5.0, "Width": 50.0,
"Height": 0.2, "R": 0.2, "CollisionMask": 0b0011},
{"SpawnBlock": 1.0, "SetPosY": 7.5, "Width": 50.0,
"Height": 0.2, "R": 0.2, "CollisionMask": 0b0001},
{"SpawnBlock": 1.0, "Width": 1., "Height": 1., "SetPosX": -3.0,
"SetPosY": 10.0, "R": 0.4, "CollisionMask": 0b0001, "FreeBody": 1},
{"SpawnBlock": 1.0, "Width": 1., "Height": 1., "SetPosX": -1.0,
"SetPosY": 10.0, "R": 0.4, "CollisionMask": 0b0010, "FreeBody": 1},
{"SpawnBlock": 1.0, "Width": 1., "Height": 1., "SetPosX": 1.0,
"SetPosY": 10.0, "R": 0.4, "CollisionMask": 0b0100, "FreeBody": 1},
{"SpawnBlock": 1.0, "Width": 1., "Height": 1., "SetPosX": 3.0,
"SetPosY": 10.0, "R": 0.4, "CollisionMask": 0b1000, "FreeBody": 1},
{"SimulationSteps": 400}]
return actions
def _multiple_balls_actions(self):
"""Spawns a floor, some bumpers, and som balls."""
# Floor and bumpers.
setup_actions = [
{"SpawnBlock": 1.0, "SetPosX": 0.0, "SetPosY": 0.0, "Width": 50.0,
"Height": 0.2, "R": 0.2},
{"SpawnBlock": 1.0, "SetPosX": 3.0, "SetPosY": 4.0, "SetAngle": 3.14/4,
"Width": 2.0, "Height": 0.1, "R": 0.4, "Sticky": 0},
{"SpawnBlock": 1.0, "SetPosX": 1.0, "SetPosY": 2.0, "SetAngle": 3.14/7,
"Width": 2.0, "Height": 1.0, "R": 0.4},
{"SpawnBlock": 1.0, "Shape": 2.0, "SetPosX": -2.0, "SetPosY": 0.5,
"SetAngle": 0., "Width": 5.0, "Height": 1.0, "R": 0.4},
]
# Balls spawned at intervals.
periodic_actions = [
{"SpawnBlock": 1.0, "Shape": 1.0, "FreeBody": 1,
"SetPosX": 3.0, "SetPosY": 5, "Width": 0.5, "Height": 0.5, "G": 1.0,
"LinearDrag": 0.5},
{"SimulationSteps": 100, "StopOnCollision": 1.},
{"SimulationSteps": 200, "StopOnCollision": 0.}]
return setup_actions + periodic_actions * 2
def _stop_on_collision_actions(self):
"""Generates a set of actions of two balls bouncing."""
simulation_steps = 600
size_ball_1 = 0.5
size_ball_2 = 0.5
density_ball_1 = 1.0
density_ball_2 = 1.0
bounciness = 1.0
linear_drag = 0.0
angular_drag = 1000
actions_setup = [{ # Right wall.
"SpawnBlock": 1.0,
"SetPosX": 7.5,
"SetPosY": 5.0,
"Width": 0.2,
"Height": 10,
"Bounciness": bounciness,
"LinearDrag": linear_drag,
"G": 0.3
}, { # Left wall.
"SpawnBlock": 1.0,
"SetPosX": -7.5,
"SetPosY": 5.0,
"Width": 0.2,
"Height": 10,
"Bounciness": bounciness,
"LinearDrag": linear_drag,
"G": 0.3
}]
actions_setup.append({ # Left ball.
"SpawnBlock": 1.0,
"Shape": constants.BALL_SHAPE,
"FreeBody": 1,
"SetPosX": -4.0,
"SetPosY": 5.5,
"Width": size_ball_1,
"Height": size_ball_1,
"Sticky": 0.0,
"R": 1.0,
"SetVelX": 5.0,
"SetVelY": 0.0,
"Bounciness": bounciness,
"LinearDrag": linear_drag,
"AngularDrag": angular_drag,
"Density": density_ball_1
})
actions_setup.append({ # Right ball.
"SpawnBlock": 1.0,
"Shape": constants.BALL_SHAPE,
"FreeBody": 1,
"SetPosX": 0.0,
"SetPosY": 5.5,
"Width": size_ball_2,
"Height": size_ball_2,
"Sticky": 0.0,
"G": 1.0,
"SetVelX": 0.0,
"SetVelY": 0.0,
"Bounciness": bounciness,
"LinearDrag": linear_drag,
"AngularDrag": angular_drag,
"Density": density_ball_2
})
actions_dynamics = []
for _ in range(6):
actions_dynamics.append({
"SimulationSteps": simulation_steps,
"StopOnCollision": 1.0,
"GravityY": 0.0,
"GravityX": 0.0
})
return actions_setup, actions_dynamics
if __name__ == "__main__":
absltest.main()
| dm_construction-master | dm_construction/unity/environment_test.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A collection of observation wrappers for construction tasks."""
from dm_construction.wrappers import base
from dm_env import specs
import numpy as np
def _rescale_axis(array, target_size, axis=0, dtype=None):
"""Rescales the axis of an array to a target size using the mean.
Args:
array: input array.
target_size: the desired size for axis. The target size must divide exactly
the input size of the axis.
axis: Axis to rescale to target_size.
dtype: dtype of the output array. Tf None, the dtype input array will be
preserved, although the mean will be calculated as np.float32 .
Returns:
array with axis rescaled to the size.
"""
if dtype is None:
dtype = array.dtype
if array.shape[axis] == target_size:
return array.astype(dtype)
if array.shape[axis] % target_size != 0:
raise ValueError("The target size {} should divide exactly "
"the input size {}."
.format(target_size, array.shape[axis]))
downsize = array.shape[axis] // target_size
leading_dims = array.shape[:axis]
trailing_dims = array.shape[axis+1:]
array_reshaped = np.reshape(
array, leading_dims + (target_size, downsize) + trailing_dims)
averaged_array = np.mean(
array_reshaped, axis=axis+1, dtype=np.float32).astype(dtype)
return averaged_array
def _rescale_frame(frame, target_size, axis_height=0, axis_width=1, dtype=None):
"""Rescales a frame to a target size using the mean..
Args:
frame: Frame with at least rank 2, corresponding to the axis spatial
axis of the frame indicated by axis_height and axis_width.
target_size: 2-tuple with the desired size for axis_height and axis_width
respectively. The target size must divide exactly the input sizes of the
corresponding axes.
axis_height: Axis to rescale to target_size[0].
axis_width: Axis to rescale to target_size[1].
dtype: dtype of the output frame. Tf None, the dtype input frame will be
preserved, although the mean will be calculated as np.float32 .
Returns:
Frame rescaled to the target size.
"""
return _rescale_axis(
_rescale_axis(frame, target_size[0], axis=axis_height, dtype=dtype),
target_size[1], axis=axis_width, dtype=dtype)
class ContinuousAbsoluteImageWrapper(base.ConstructionBaseWrapper):
"""Rescales and exposes RGB observations with continuous absolute actions."""
def __init__(self, env, output_resolution=(64, 64)):
super(ContinuousAbsoluteImageWrapper, self).__init__(env=env)
self._output_resolution = output_resolution
def observation_spec(self):
rgb_spec = self._env.observation_spec()["RGB"]
shape = list(self._output_resolution) + [rgb_spec.shape[-1]]
dtype = rgb_spec.dtype
obs_spec = specs.Array(shape, dtype=dtype, name=rgb_spec.name)
return obs_spec
def action_spec(self):
spec = self._env.action_spec().copy()
spec["Sticky"] = specs.BoundedArray(
[], dtype=np.float32, minimum=-1, maximum=1)
return spec
def _process_time_step(self, time_step):
rgb_observation = time_step.observation["RGB"]
# Remove extra time dimension returned by some environments (ie marble run)
if rgb_observation.ndim == 4:
rgb_observation = rgb_observation[0]
observation = _rescale_frame(rgb_observation, self._output_resolution)
return time_step._replace(observation=observation)
def step(self, action):
updated_action = action.copy()
# Convert continuous sticky action to discrete.
updated_action["Sticky"] = int(action["Sticky"] > 0)
return super(ContinuousAbsoluteImageWrapper, self).step(updated_action)
| dm_construction-master | dm_construction/wrappers/continuous_absolute.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| dm_construction-master | dm_construction/wrappers/__init__.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Task utils."""
import warnings
import numpy as np
def pad_to_size(array, size, axis, repeat=False, error_on_trim=True,
warning_on_trim=True):
"""Pads an array to the desired size in the specified axis.
Args:
array: Input array to be padded.
size: desired dimension for the indictated axis.
axis: axis to along which to pad the array.
repeat: it False, it will be padded with zeros, otherwise, it will be padded
with the last element of the array along that axis.
error_on_trim: If True it will show an error if the size of the axis is
larger than the desired size.
warning_on_trim: If True and error_on_trim==False, it will show a warning
instead of an error.
Returns:
The padded array.
Raises:
ValueError: If the array is larger than size along the specified axis.
"""
if array.shape[axis] < size:
if repeat:
padding_slice = np.take(array, -1, axis=axis)
missing_length = size - array.shape[axis]
missing_block = np.stack([padding_slice]*missing_length, axis=axis)
else:
padding_shape = list(array.shape)
padding_shape[axis] = size - padding_shape[axis]
missing_block = np.zeros(padding_shape, array.dtype)
array = np.concatenate(
[array, missing_block],
axis=axis)
elif array.shape[axis] > size:
if error_on_trim:
raise ValueError("Trying to pad into a smaller size %d->%d"%
(array.shape[axis], size))
if warning_on_trim:
warnings.warn("Padding into a smaller size results on trimming %d->%d."%
(array.shape[axis], size))
array = np.take(array, list(range(size)), axis=axis)
return array
| dm_construction-master | dm_construction/wrappers/utils.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A collection of observation wrappers for construction tasks."""
from dm_construction.unity import constants as unity_constants
from dm_construction.utils import constants
from dm_construction.wrappers import base
import dm_env
from dm_env import specs
import numpy as np
# Used to avoid putting actions at exactly the limits of the scene.
_SMALL_EPSILON = 1e-6
# Added to y-coordinates of actions, to avoid triggering a collision between the
# object being placed and the object below it. We cannot use a value too low,
# because of Unity's collision behavior
_Y_MARGIN = 4e-2
def _discrete_array_spec(shape, base_name):
return specs.Array(shape, dtype=np.int32, name=base_name + "_spec")
def _continuous_array_spec(shape, base_name):
return specs.Array(shape, dtype=np.float32, name=base_name + "_spec")
def _slices_and_indices_to_indices(slices_or_indices):
indices = []
for slice_or_index in slices_or_indices:
if isinstance(slice_or_index, slice):
if slice_or_index.step not in [None, 1]:
raise ValueError("slices should only use a step size of 1")
indices.extend(list(range(slice_or_index.start, slice_or_index.stop)))
else:
indices.append(slice_or_index)
return sorted(indices)
def _get_relative_discretization_grid(point_counts_inside):
if point_counts_inside > 0:
extra_side_width = max(1. / point_counts_inside, _Y_MARGIN)
else:
extra_side_width = _Y_MARGIN
bound = 1. + extra_side_width
# Create a linspace that allows to stack blocks on the sides of each other
# as well.
return np.linspace(-bound, bound, point_counts_inside + 3)
class DiscreteRelativeGraphWrapper(base.ConstructionBaseWrapper):
"""Creates graph-based observations with discrete relative actions."""
def __init__(self,
env,
allow_reverse_action=False,
max_x_width=2.0,
max_y_width=2.0,
discretization_steps=12,
invalid_edge_penalty=0.,
enable_y_action=False,
enable_glue_action=True,
enable_selection_action=True):
"""Wraps an environment with graph-structured observations and actions.
The environment support the following modes of discrete actions:
- Agent only providing "x_action" (defining the x-coordinate of the placed
block, starting from the leftmost position).
- Agent additionally providing "y_action" (defining the y-coordinate of
the placed block, starting from the bottom most position). To use this
mode, set enable_y_position to True.
Args:
env: An instance of `ConstructionBaseWrapper`.
allow_reverse_action: Whether to allow the action to be attached to an
edge in the reverse direction. If this option is set to True and that
the edge to which the action is attached is going from a valid moved
block to a valid base block (instead of the reverse direction), then the
action corresponding to the reverse edge will be taken. Otherwise, the
episode will end with an `invalid_edge` termination.
max_x_width: The accessible width along the x axis, centered on the chosen
block center.
max_y_width: The accessible width along the y axis, centered on the chosen
block center.
discretization_steps: The number of discrete steps along the x and y axis.
invalid_edge_penalty: The penalty received when selecting an invalid edge
(a positive number; the reward will be minus that).
enable_y_action: Whether the agent also select the y-coordinate. If False,
the y coordinate is set to be a small margin on top of the block, at the
given y coordinate.
enable_glue_action: Whether the agent select whether to glue or not. If
False, glue is always applied.
enable_selection_action: Whether the agent selects the order of the
blocks.
"""
super(DiscreteRelativeGraphWrapper, self).__init__(env=env)
self._allow_reverse_action = allow_reverse_action
self._discretization_steps = discretization_steps
assert invalid_edge_penalty > -1e-6
self._invalid_edge_penalty = invalid_edge_penalty
self._enable_y_action = enable_y_action
self._enable_glue_action = enable_glue_action
self._enable_selection_action = enable_selection_action
self._init_observation_wrapping()
self._init_action_wrapping()
def _init_observation_wrapping(self):
"""Sets up attributes needed for wrapping observations."""
# Which keys from the underlying observation to include as nodes in the
# graph observation.
self._node_types = [
constants.BLOCK, constants.AVAILABLE_BLOCK, constants.OBSTACLE,
constants.TARGET]
if constants.BALL in self._env.observation_spec():
self._node_types.append(constants.BALL)
# We will first concatenate on one hots, then cherry pick the node features
# that we want. Before doing the cherry picking, these will be the indices
# of the one hot node types.
self._one_hot_feature_slice = slice(
unity_constants.BLOCK_SIZE,
unity_constants.BLOCK_SIZE + len(self._node_types))
# Which features from the underlying observation to include in the node
# attributes.
self._node_features = _slices_and_indices_to_indices([
unity_constants.POSITION_FEATURE_SLICE,
unity_constants.ORIENTATION_FEATURE_SLICE,
unity_constants.WIDTH_FEATURE_INDEX,
unity_constants.HEIGHT_FEATURE_INDEX,
unity_constants.LINEAR_VELOCITY_FEATURE_SLICE,
unity_constants.ANGULAR_VELOCITY_FEATURE_INDEX,
unity_constants.STICKY_FEATURE_INDEX,
unity_constants.FREE_OBJECT_FEATURE_INDEX,
unity_constants.SHAPE_FEATURE_SLICE,
self._one_hot_feature_slice
])
def _init_action_wrapping(self):
"""Sets up attributes needed for wrapping actions."""
valid_base_block_types = [
constants.BLOCK, constants.OBSTACLE, constants.TARGET]
if "Balls" in self._env.observation_spec():
valid_base_block_types.append(constants.BALL)
self._valid_base_block_one_hots = [
self._get_node_one_hot_index(x)
for x in valid_base_block_types
]
self._valid_moved_block_one_hots = [
self._get_node_one_hot_index(x)
for x in [constants.AVAILABLE_BLOCK]
]
self._non_physical_one_hots = [
self._get_node_one_hot_index(x)
for x in [constants.TARGET]
]
self._x_feature_index = self._get_feature_index(
unity_constants.POSITION_X_FEATURE_INDEX)
self._y_feature_index = self._get_feature_index(
unity_constants.POSITION_Y_FEATURE_INDEX)
self._height_feature_index = (
self._get_feature_index(
unity_constants.HEIGHT_FEATURE_INDEX))
standard_action_spec = self._env.action_spec()
if "Selector" not in standard_action_spec:
self._enable_selection_action = False
self._min_x = (
float(standard_action_spec["Horizontal"].minimum) + _SMALL_EPSILON)
self._min_y = (
float(standard_action_spec["Vertical"].minimum) + _SMALL_EPSILON)
self._max_x = (
float(standard_action_spec["Horizontal"].maximum) - _SMALL_EPSILON)
self._max_y = (
float(standard_action_spec["Vertical"].maximum) - _SMALL_EPSILON)
self._num_x_actions = self._discretization_steps + 3
self._num_y_actions = self._discretization_steps + 3
self._relative_x_positions = _get_relative_discretization_grid(
self._discretization_steps)
self._relative_y_positions = _get_relative_discretization_grid(
self._discretization_steps)
# Ignoring attributes with nested structure that are constant to avoid
# unnecessary deepcopies of those when restoring states. This is not
# technically necessary (e.g. we do not bother with scalar attributes).
self._state_ignore_fields.extend([
"_valid_base_block_one_hots", "_valid_moved_block_one_hots",
"_non_physical_one_hots", "_relative_x_positions",
"_relative_y_positions"
])
def _get_feature_index(self, core_index):
return self._node_features.index(core_index)
def _get_node_one_hot_index(self, object_type):
# Get the index just in the one-hots
base_index = self._node_types.index(object_type)
# Get the feature index into node_features
features = _slices_and_indices_to_indices([self._one_hot_feature_slice])
feature_index = features[base_index]
# Look up the actual index
one_hot_index = self._node_features.index(feature_index)
return one_hot_index
def action_spec(self):
edge_spec = {
"Index": specs.Array([], dtype=np.int32),
"x_action": specs.BoundedArray(
[], np.int32, 0, self._num_x_actions - 1)
}
if self._enable_y_action:
edge_spec.update({
"y_action": specs.BoundedArray(
[], np.int32, 0, self._num_y_actions - 1)
})
if self._enable_glue_action:
edge_spec.update({"sticky": specs.BoundedArray([], np.int32, 0, 1)})
return edge_spec
def observation_spec(self):
"""The observation spec as a graph.
Note that while this method returns a dictionary, it is compatible with the
GraphsTuple data structure from the graph_nets library. To convert the spec
from this method to a GraphsTuple:
from graph_nets import graphs
spec = graphs.GraphsTuple(**env.observation_spec())
Returns:
spec: the observation spec as a dictionary
"""
node_size = len(self._node_features)
nodes_spec = _continuous_array_spec([0, node_size], "nodes")
edges_spec = _continuous_array_spec([0, 1], "edges")
senders_spec = _discrete_array_spec([0], "senders")
receivers_spec = _discrete_array_spec([0], "receivers")
globals_spec = _continuous_array_spec([1, 1], "globals")
n_node_spec = _discrete_array_spec([1], "n_node")
n_edge_spec = _discrete_array_spec([1], "n_edge")
observation_spec = dict(
nodes=nodes_spec,
edges=edges_spec,
globals=globals_spec,
n_node=n_node_spec,
n_edge=n_edge_spec,
receivers=receivers_spec,
senders=senders_spec
)
return observation_spec
def _get_nodes(self, observation):
"""Returns node attributes."""
objects = []
for i, key in enumerate(self._node_types):
# Remove extra time dimension returned by some environments
# (like marble run)
features = observation[key]
if features.ndim == 3:
features = features[:, 0]
# Add a one-hot indicator of the node type.
one_hot = np.zeros(
(features.shape[0], len(self._node_types)), dtype=np.float32)
one_hot[:, i] = 1
features = np.concatenate([features, one_hot], axis=1)
objects.append(features)
return np.concatenate(objects, axis=0)
def _get_edges(self, nodes):
sender_node_inds = np.arange(len(nodes))
receiver_node_inds = np.arange(len(nodes))
senders, receivers = np.meshgrid(sender_node_inds, receiver_node_inds)
senders, receivers = senders.flatten(
).astype(np.int32), receivers.flatten().astype(np.int32)
# This removes self-edges.
same_index = senders == receivers
senders = senders[~same_index]
receivers = receivers[~same_index]
edge_content = np.zeros([senders.shape[0], 1], dtype=np.float32)
return edge_content, senders, receivers
def _get_globals(self):
return np.zeros([1], dtype=np.float32)
def _order_nodes(self, observation):
"""Order nodes based on object id."""
indices = observation["nodes"][
:, unity_constants.ID_FEATURE_INDEX].astype(int)
ordering = np.argsort(indices)
# update nodes
nodes = observation["nodes"][ordering]
# update senders/receivers
ordering = list(ordering)
inverse_ordering = np.array(
[ordering.index(i) for i in range(len(ordering))], dtype=np.int32)
if observation["senders"] is not None:
senders = inverse_ordering[observation["senders"]]
else:
senders = None
if observation["receivers"] is not None:
receivers = inverse_ordering[observation["receivers"]]
else:
receivers = None
new_observation = observation.copy()
new_observation.update(dict(
nodes=nodes,
senders=senders,
receivers=receivers))
return new_observation
def _select_node_features(self, observation):
"""Cherry-pick desired node features."""
nodes = observation["nodes"][:, self._node_features]
new_observation = observation.copy()
new_observation["nodes"] = nodes
return new_observation
def _process_time_step(self, time_step):
nodes = self._get_nodes(time_step.observation)
edges, senders, receivers = self._get_edges(nodes)
globals_ = self._get_globals()
observation = dict(
nodes=nodes,
edges=edges,
globals=globals_[np.newaxis],
n_node=np.array([nodes.shape[0]], dtype=int),
n_edge=np.array([edges.shape[0]], dtype=int),
receivers=receivers,
senders=senders)
observation = self._order_nodes(observation)
observation = self._select_node_features(observation)
time_step = time_step._replace(observation=observation)
return time_step
def _compute_continuous_action(self, base_pos, base_length, moved_length,
offset, min_pos, max_pos):
ratio = (base_length + moved_length) / 2.
return np.clip(base_pos + offset * ratio, min_pos, max_pos)
def reset(self, *args, **kwargs): # pylint: disable=useless-super-delegation
"""Reset the environment.
Note that while this method returns observations as a dictionary, they are
compatible with the GraphsTuple data structure from the graph_nets library.
To convert the observations returned by this method to a GraphsTuple:
from graph_nets import graphs
timestep = env.reset()
timestep = timestep._replace(
observation=graphs.GraphsTuple(**timestep.observation))
Args:
*args: args to pass to super
**kwargs: args to pass to super
Returns:
timestep: a dm_env.TimeStep
"""
return super(DiscreteRelativeGraphWrapper, self).reset(*args, **kwargs)
def step(self, action):
"""Step the environment.
Note that while this method returns observations as a dictionary, they are
compatible with the GraphsTuple data structure from the graph_nets library.
To convert the observations returned by this method to a GraphsTuple:
from graph_nets import graphs
timestep = env.step(action)
timestep = timestep._replace(
observation=graphs.GraphsTuple(**timestep.observation))
Args:
action: the action to take in the environment.
Returns:
timestep: a dm_env.TimeStep
"""
valid_action, base_block, moved_block = self._validate_edge_index(
int(action["Index"]))
if not valid_action:
self._termination_reason = constants.TERMINATION_INVALID_EDGE
self._last_time_step = dm_env.TimeStep(
step_type=dm_env.StepType.LAST,
observation=self._last_time_step.observation,
reward=-self._invalid_edge_penalty,
discount=0)
return self._last_time_step
block_x = base_block[self._x_feature_index]
block_y = base_block[self._y_feature_index]
selector = moved_block[self._x_feature_index]
width_index = self._get_feature_index(
unity_constants.WIDTH_FEATURE_INDEX)
base_width = np.abs(base_block[width_index])
moved_width = np.abs(moved_block[width_index])
base_height = np.abs(base_block[self._height_feature_index])
moved_height = np.abs(moved_block[self._height_feature_index])
x_continuous_action = self._compute_continuous_action(
base_pos=block_x,
base_length=base_width,
moved_length=moved_width,
offset=self._relative_x_positions[action["x_action"]],
min_pos=self._min_x,
max_pos=self._max_x)
if self._enable_y_action:
y_continuous_action = self._compute_continuous_action(
base_pos=block_y,
base_length=base_height,
moved_length=moved_height,
offset=self._relative_y_positions[action["y_action"]],
min_pos=self._min_y,
max_pos=self._max_y)
else:
y_continuous_action = block_y + _Y_MARGIN
if all(base_block[self._non_physical_one_hots] < 0.5):
y_continuous_action += (base_height + moved_height) / 2.
updated_action = {
"Horizontal": np.array(x_continuous_action, dtype=np.float32),
"Vertical": np.array(y_continuous_action, dtype=np.float32),
"Sticky": np.array(1., dtype=np.int32),
}
if self._enable_glue_action:
updated_action["Sticky"] = action["sticky"]
if self._enable_selection_action:
updated_action["Selector"] = selector
self._last_time_step = self._process_time_step(
self._env.step(updated_action))
return self._last_time_step
def _validate_edge_index(self, edge_index):
"""Checks that an action connecting first_node to second_node is valid.
An action is valid if it connects a marker or block to an avaible block.
Args:
edge_index: Index of the edge to apply the action relatively with.
Returns:
is_valid: A boolean indicating whether the action was valid.
base_block: The features of the base block, or None.
moved_block: The features of the moved block, or None.
"""
previous_observation = self._last_time_step.observation
edges = list(
zip(previous_observation["senders"], previous_observation["receivers"]))
edge = edges[edge_index]
nodes = previous_observation["nodes"]
first_node_features = nodes[edge[0]]
second_node_features = nodes[edge[1]]
if not self._enable_selection_action:
first_movable_block = next((i for i, x in enumerate(nodes)
if x[self._valid_moved_block_one_hots] > 0.5),
None)
if edge[0] != first_movable_block and edge[1] != first_movable_block:
return False, None, None
if self._allow_reverse_action and any(
first_node_features[self._valid_base_block_one_hots] > 0.5):
base_block = first_node_features
moved_block = second_node_features
elif any(second_node_features[self._valid_base_block_one_hots] > 0.5):
base_block = second_node_features
moved_block = first_node_features
else:
return False, None, None # Not a valid base block.
if not any(moved_block[self._valid_moved_block_one_hots] > 0.5):
return False, None, None # Not a valid moved block.
return True, base_block, moved_block
@property
def termination_reason(self):
if self._termination_reason:
return self._termination_reason
return super(DiscreteRelativeGraphWrapper, self).termination_reason
@property
def all_termination_reasons(self):
return self.core_env.all_termination_reasons + [
constants.TERMINATION_INVALID_EDGE]
| dm_construction-master | dm_construction/wrappers/discrete_relative.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A collection of observation wrappers for construction tasks."""
from dm_construction.utils import serialization
import dm_env
class ConstructionBaseWrapper(dm_env.Environment):
"""A base class for wrappers around construction tasks."""
def __init__(self, env):
self._env = env
super(ConstructionBaseWrapper, self).__init__()
self._state_ignore_fields = ["_env"]
self._last_time_step = None
def close(self):
self._env.close()
def _process_time_step(self, time_step):
return time_step
def reset(self, *args, **kwargs):
self._termination_reason = None
self._last_time_step = self._process_time_step(
self._env.reset(*args, **kwargs))
return self._last_time_step
def step(self, action):
self._last_time_step = self._process_time_step(self._env.step(action))
return self._last_time_step
def action_spec(self):
return self._env.action_spec()
def observation_spec(self):
return self._env.observation_spec()
@property
def core_env(self):
return self._env.core_env
def get_state(self):
state = serialization.get_object_state(self, self._state_ignore_fields)
state["_env"] = self._env.get_state()
return state
def set_state(self, state):
serialization.set_object_state(self, state, self._state_ignore_fields)
self._env.set_state(state["_env"])
@property
def last_time_step(self):
return self._last_time_step
@property
def termination_reason(self):
"""A string indicating the reason why an episode was terminated."""
return self.core_env.termination_reason
@property
def all_termination_reasons(self):
"""All possible termination reasons for this environment."""
return self.core_env.all_termination_reasons
@property
def episode_logs(self):
if hasattr(self.core_env, "episode_logs"):
return self.core_env.episode_logs
return {}
| dm_construction-master | dm_construction/wrappers/base.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""An environment for stacking blocks from the floor to targets.
See: Bapst, V., Sanchez-Gonzalez, A., Doersch, C., Stachenfeld, K., Kohli, P.,
Battaglia, P., & Hamrick, J. (2019, May). Structured agents for physical
construction. In International Conference on Machine Learning (pp. 464-474).
"""
from dm_construction.environments import stacking
from dm_construction.unity import constants as unity_constants
from dm_construction.utils import block as block_utils
from dm_construction.utils import constants
from dm_construction.utils import geometry
import numpy as np
def _is_target_reached(block, target):
"""Returns `True` if block overlaps with target, `False` otherwise."""
target_center_x = target[unity_constants.POSITION_X_FEATURE_INDEX]
target_center_y = target[unity_constants.POSITION_Y_FEATURE_INDEX]
center_x = block[unity_constants.POSITION_X_FEATURE_INDEX]
center_y = block[unity_constants.POSITION_Y_FEATURE_INDEX]
cos_theta = block[unity_constants.COSINE_ANGLE_FEATURE_INDEX]
sin_theta = block[unity_constants.SINE_ANGLE_FEATURE_INDEX]
width = block[unity_constants.WIDTH_FEATURE_INDEX]
height = block[unity_constants.HEIGHT_FEATURE_INDEX]
x1, y1 = geometry.rotate_rectangle_corner(
center_x + width / 2, center_y - height / 2,
center_x, center_y, cos_theta, sin_theta)
x2, y2 = geometry.rotate_rectangle_corner(
center_x + width / 2, center_y + height / 2,
center_x, center_y, cos_theta, sin_theta)
x3, y3 = geometry.rotate_rectangle_corner(
center_x - width / 2, center_y + height / 2,
center_x, center_y, cos_theta, sin_theta)
x4, y4 = geometry.rotate_rectangle_corner(
center_x - width / 2, center_y - height / 2,
center_x, center_y, cos_theta, sin_theta)
return geometry.is_point_in_rectangle(
x1, y1, x2, y2, x3, y3, x4, y4, target_center_x, target_center_y,)
def _count_targets_reached(targets, blocks):
"""Returns the reward for the connecting task."""
targets_reached = 0
for target in targets:
for block in blocks:
is_reached = _is_target_reached(block, target)
if is_reached:
targets_reached += 1
break
return targets_reached
class ConstructionConnecting(stacking.ConstructionStacking):
"""Environment for the Connecting task.
In the Connecting task, the agent must stack blocks to connect the floor to
three different target locations, avoiding randomly positioned obstacles
arranged in layers. The reward function is: +1 for each target whose center is
touched by at least one block, and 0 (no penalty) for each block set to
sticky. The task-specific termination criterion is achieved when all targets
are connected to the floor.
Generalization levels:
* `"mixed_height_targets"`: Scenes where different targets may be at
different vertical positions interleaved with the obstacle layers (in
other levels all three targets are always on top of the highest layer of
obstacles).
* `"additional_layer"`: Scenes with 4 layers of obstacles, with targets
above the new highest obstacle layer (maximum number of obstacle
layers for other levels is 3).
"""
def __init__(self,
unity_environment,
sticky_penalty=0.0,
**stacking_kwargs):
"""Inits the environment.
Args:
unity_environment: See base class.
sticky_penalty: See base class.
**stacking_kwargs: keyword arguments passed to
covering.ConstructionStacking.
"""
self._max_steps = None
super(ConstructionConnecting, self).__init__(
unity_environment=unity_environment,
block_replacement=True,
sticky_penalty=sticky_penalty,
max_difficulty=9,
progress_threshold=0.98,
**stacking_kwargs)
def _compute_max_episode_reward(self, obs):
return len(obs.targets)
def _maybe_update_max_steps(self):
pass
def _get_task_reward(self, obstacles, targets, blocks):
"""Computes the current score based on the targets and placed blocks."""
del obstacles
targets_reached = _count_targets_reached(targets, blocks)
return targets_reached
def _get_generator(self, difficulty):
offset = 0
if isinstance(difficulty, int):
min_num_targets = 3
max_num_targets = 3
# Targets on floor level, no obstacles
if difficulty >= 0:
min_num_obstacles = 0
max_num_obstacles = 0
obstacles_ys_range = [(0,)]
targets_ys_range = [(0,)]
self._max_steps = 7
# Targets one level higher
if difficulty >= 1:
targets_ys_range = [(1,)]
self._max_steps = 7
# Obstacles on the floor, targets one level higher
if difficulty >= 2:
min_num_obstacles = 1
max_num_obstacles = 1
obstacles_ys_range = [(0,)]
targets_ys_range = [(1 + offset,)]
self._max_steps = 7
# More obstacles
if difficulty >= 3:
max_num_obstacles = 2
# Even more obstacles (3 per layer), and more targets
if difficulty >= 4:
min_num_obstacles = 2
max_num_obstacles = 3
# Make the targets higher
if difficulty >= 5:
targets_ys_range = [(2 + offset,)]
self._max_steps = 14
# Even higher, and more obstacles
if difficulty >= 6:
min_num_obstacles = 3
targets_ys_range = [(3 + offset,)]
self._max_steps = 21
# Second layer of obstacles, and more targets
if difficulty >= 7:
obstacles_ys_range = [(0, 2)]
# Move targets higher
if difficulty >= 8:
targets_ys_range = [(4 + offset,)]
# More obstacles, higher targets
if difficulty >= 9:
obstacles_ys_range = [(0, 2, 4)]
targets_ys_range = [(5 + offset,)]
elif difficulty == "mixed_height_targets":
# Targets at different heights, instead of a single height.
min_num_targets = 1
max_num_targets = 1
min_num_obstacles = 3
max_num_obstacles = 3
obstacles_ys_range = [(0, 2, 4)]
targets_ys_range = [(1 + offset, 3 + offset, 5 + offset,)]
self._max_steps = 26
elif difficulty == "additional_layer":
# Targets at a new height
min_num_targets = 3
max_num_targets = 3
min_num_obstacles = 3
max_num_obstacles = 3
obstacles_ys_range = [(0, 2, 4, 6)]
targets_ys_range = [(7 + offset,)]
self._max_steps = 26
else:
raise ValueError("Unrecognized difficulty: %s" % difficulty)
return ConnectingGenerator(
num_obstacles_range=(min_num_obstacles, max_num_obstacles + 1),
num_targets_range=(min_num_targets, max_num_targets+1),
scene_width=self._generator_width,
random_state=self._random_state,
obstacles_ys_range=obstacles_ys_range,
targets_ys_range=targets_ys_range,
min_obstacles_interdistance=constants.SMALL_WIDTH * 2,
min_targets_interdistance=0.)
class ConnectingGenerator(stacking.StackingGenerator):
"""Generates a set of horizontal obstacles and targets."""
def __init__(self,
num_obstacles_range,
num_targets_range,
obstacles_ys_range,
targets_ys_range,
scene_width,
random_state,
obstacles_width_range=(10, 40),
use_legacy_obstacles_heights=False,
targets_side=5,
obstacles_height=5,
min_obstacles_interdistance=0.,
min_targets_interdistance=0.,
**kwargs):
"""Initialize the generator.
Args:
num_obstacles_range: a tuple indicating the range of obstacles
that will be in the generated scene, from low (inclusive) to high
(exclusive). This counts the number of obstacles per height.
num_targets_range: a tuple indicating the range of targets
that will be in the generated scene, from low (inclusive) to high
(exclusive). This counts the total number of targets.
obstacles_ys_range: y-position to draw the obstacle from. A tuple of
y-positions will be sampled from this range. This is scalled
appropriately, so that -1 corresponds to an object below the floor, 0
to an object on the floor, etc.
targets_ys_range: y-position to draw the targets from. A tuple of
y-positions will be sampled from this range. This is scalled
appropriately, so that -1 corresponds to an object below the floor, 0
to an object on the floor, etc.
scene_width: the width of the scene.
random_state: a np.random.RandomState object
obstacles_width_range: the range of widths for obstacles, from low
(inclusive) to high (exclusive).
use_legacy_obstacles_heights: In the first versions, obstacles would be
placed with less margin compared to a corresponding stack of blocks.
With the new versions, obstacles are thiner and there is therefore more
margin around them. This makes the task easier, and using glue more
helpful.
targets_side: The width and height of targets. Only used when
`use_legacy_obstacles_heights` is set to False.
obstacles_height: The height of the obstacles. Only used when
`use_legacy_obstacles_heights` is set to False.
min_obstacles_interdistance: The minimal horizontal distance between
obstacles at the same height. Default=0.
min_targets_interdistance: The minimal horizontal distance between
targets at the same height. Default=0.
**kwargs: additional keyword arguments passed to super
"""
super(ConnectingGenerator, self).__init__(
num_blocks_range=None,
scene_width=scene_width,
random_state=random_state,
**kwargs)
self._num_obstacles_range = num_obstacles_range
self._num_targets_range = num_targets_range
self._targets_side = targets_side
self._min_obstacles_interdistance = min_obstacles_interdistance
self._min_targets_interdistance = min_targets_interdistance
self._obstacles_width_range = obstacles_width_range
self._use_legacy_obstacles_heights = use_legacy_obstacles_heights
if use_legacy_obstacles_heights:
self.obstacles_height = self.height
self._targets_height = self.height
self._obstacles_ys_range = [
self._scale_y_range(obstacles_ys, margin=self.margin)
for obstacles_ys in obstacles_ys_range]
self._targets_ys_range = [
self._scale_y_range(targets_ys, offset=self.height/2.)
for targets_ys in targets_ys_range]
else:
self.obstacles_height = obstacles_height
self._targets_height = targets_side
scale_y_fn = lambda y: (y + 0.5) * self.height
scale_ys_fn = lambda ys: tuple([scale_y_fn(y) for y in ys])
self._obstacles_ys_range = [
scale_ys_fn(obstacles_ys) for obstacles_ys in obstacles_ys_range]
self._targets_ys_range = [
scale_ys_fn(targets_ys) for targets_ys in targets_ys_range]
def _generate_line_of_blocks(self,
available_widths,
num_blocks,
min_available_width=0,
min_interdistance=0.):
if num_blocks == 0:
return [], []
# Pick a set of block widths, and check that the sum of the widths is
# not greater than the scene width plus some buffer room. keep regenerting
# blocks until this is the case.
available_width = -1
while available_width < min_available_width:
blocks_lengths = self.random_state.choice(
available_widths, size=[num_blocks])
available_width = self.scene_width - np.sum(blocks_lengths)
# Compute the left and right edges of each block, assuming the blocks are
# all placed right next to each other beginning from the left side of the
# scene.
blocks_begins = np.concatenate(
[np.array([0], dtype=np.int32), np.cumsum(blocks_lengths)[:-1]])
blocks_ends = np.cumsum(blocks_lengths)
# available_width now is the amount of space left on the floor, not taken
# up by obstacles. we split this into a few chunks of random size to space
# the obstacles out along the floor
while True:
relative_shifts = self.random_state.uniform(0., 1., size=[num_blocks + 1])
relative_shifts /= np.sum(relative_shifts)
if len(relative_shifts) < 3 or (
np.min(relative_shifts[1:-1]) > min_interdistance / available_width):
break
relative_shifts = np.floor(relative_shifts * available_width)
shifts = np.cumsum(relative_shifts.astype(np.int32))[:-1]
blocks_begins += shifts
blocks_ends += shifts
return blocks_begins, blocks_ends
def _scale_y_range(self, y_range, offset=0., margin=0.):
return tuple([offset + y * (margin + self.height) for y in y_range])
def generate_one(self):
"""Generate a single scene.
Returns:
observation: a block_utils.BlocksObservation object
solution: a list of Block objects in their final locations
"""
# Pick the set of y-positions we want for our obstacles
idx = np.arange(len(self._obstacles_ys_range))
obstacles_ys_range = self._obstacles_ys_range[self.random_state.choice(idx)]
# Place the obstacles at each level, going from bottom to top
obstacles = []
for y in obstacles_ys_range:
available_widths = np.arange(*self._obstacles_width_range)
obstacles_begins, obstacles_ends = self._generate_line_of_blocks(
available_widths=available_widths,
num_blocks=self.random_state.randint(*self._num_obstacles_range),
min_available_width=self.small_width + 1,
min_interdistance=self._min_obstacles_interdistance)
# Now actually create the obstacles
for obstacle_begin, obstacle_end in zip(obstacles_begins, obstacles_ends):
center = (obstacle_begin + obstacle_end) // 2
width = obstacle_end - obstacle_begin
obstacle = block_utils.Block(
x=center, y=y, width=width, height=self.obstacles_height)
obstacles.append(obstacle)
# Pick y positions for the targets
idx = np.arange(len(self._targets_ys_range))
targets_ys_range = self._targets_ys_range[self.random_state.choice(idx)]
targets = []
for y in targets_ys_range:
available_widths = [self.small_width, self.medium_width]
if not self._use_legacy_obstacles_heights:
available_widths = [self._targets_side]
num_targets = self.random_state.randint(*self._num_targets_range)
targets_begins, targets_ends = self._generate_line_of_blocks(
available_widths=available_widths,
num_blocks=num_targets,
min_interdistance=self._min_targets_interdistance)
for target_begin, target_end in zip(targets_begins, targets_ends):
center = (target_begin + target_end) // 2
width = target_end - target_begin
target = block_utils.Block(
x=center, y=y, width=width, height=self._targets_height)
targets.append(target)
observation_blocks = self._place_available_objects()
floor = self._place_floor()
observation = block_utils.BlocksObservation(
blocks=[floor] + observation_blocks,
obstacles=obstacles,
targets=targets,
balls=[])
return observation
| dm_construction-master | dm_construction/environments/connecting.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A construction environment where the task is to fill in silhouettes.
See: Bapst, V., Sanchez-Gonzalez, A., Doersch, C., Stachenfeld, K., Kohli, P.,
Battaglia, P., & Hamrick, J. (2019, May). Structured agents for physical
construction. In International Conference on Machine Learning (pp. 464-474).
"""
from dm_construction.environments import stacking
from dm_construction.unity import constants as unity_constants
from dm_construction.utils import block as block_utils
import numpy as np
# Approximate vertical correction to leave verically between blocks, due
# to contact precision.
_VERTICAL_CORRECTION = 0.2
def _get_horizontal_blocks(
blocks, index_getter=lambda x: x, threshold_sine=0.05):
sine_index = index_getter(unity_constants.SINE_ANGLE_FEATURE_INDEX)
return blocks[np.abs(blocks[:, sine_index]) < threshold_sine]
def _map_slice(slice_, mapper):
"""Applies `mapper` to a contiguous slice `slice_`."""
assert slice_.step is None or slice_.step == 1
new_slice = slice(mapper(slice_.start), 1 + mapper(slice_.stop -1))
assert slice_.stop - slice_.start == new_slice.stop - new_slice.start
return new_slice
def _target_fraction(
target, horizontal_blocks, index_getter=lambda x: x, threshold_size=1e-3):
"""Returns the fraction of a target covered by a block of the same size."""
if horizontal_blocks.shape[0] == 0:
return 0.
target_width = target[index_getter(unity_constants.WIDTH_FEATURE_INDEX)]
target_height = target[index_getter(unity_constants.HEIGHT_FEATURE_INDEX)]
position_slice = _map_slice(
unity_constants.POSITION_FEATURE_SLICE, index_getter)
distances = np.linalg.norm(
horizontal_blocks[:, position_slice] - target[position_slice], axis=1)
closest_index = np.argmin(distances)
closest_block = horizontal_blocks[closest_index]
closest_block_width = closest_block[
index_getter(unity_constants.WIDTH_FEATURE_INDEX)]
closest_block_height = closest_block[
index_getter(unity_constants.HEIGHT_FEATURE_INDEX)]
if (np.abs(closest_block_width-target_width) > threshold_size or
np.abs(closest_block_height-target_height) > threshold_size):
return 0.
# Calculate the fraction of the target_area that is covered by the closest
# block, assuming they are both horizontal, and of the same size.
vector_distance = (closest_block[position_slice] - target[position_slice])
covered_area = np.prod(
np.maximum([target_width, target_height] - np.abs(vector_distance), 0.))
target_area = target_width * target_height
return covered_area / target_area
class ConstructionSilhouette(stacking.ConstructionStacking):
"""Task consisting of filling in silhouettes, avoiding obstacles.
In the Silhouette task, the agent must place blocks to overlap with target
blocks in the scene, while avoiding randomly positioned obstacles. The reward
function is: +1 for each placed block which overlaps at least 90% with a
target block of the same size; and -0.5 for each block set as sticky. The
task-specific termination criterion is achieved when there is at least 90%
overlap with all targets.
Generalization levels:
* `"double_the_targets"`: Scenes with 16 target blocks (8 is maximum number
of target blocks in other levels).
"""
def __init__(self,
unity_environment,
sticky_penalty=0.5,
num_allowed_extra_steps=0,
**stacking_kwargs):
"""Inits the environment.
Args:
unity_environment: See base class.
sticky_penalty: See base class.
num_allowed_extra_steps: Number of extra-steps to allow the agent to take
in an episode, additionaly to a number of steps equal to the number of
targets. Defaults to zero, meaning that (assuming cheap enough glue) an
optimal agent should place blocks exactly on the targets location only.
**stacking_kwargs: keyword arguments passed to
covering.ConstructionStacking.
Raises:
ValueError: If curriculum_type is not in ["ys"].
"""
self._num_allowed_extra_steps = num_allowed_extra_steps
self._max_steps = None
super(ConstructionSilhouette, self).__init__(
unity_environment=unity_environment,
sticky_penalty=sticky_penalty,
block_replacement=True,
max_difficulty=7,
target_color=(0., 1., 0., 0.3),
progress_threshold=0.98,
**stacking_kwargs)
def _compute_max_episode_reward(self, obs):
return len(self._initial_scene.targets)
def _maybe_update_max_steps(self):
self._max_steps = (
len(self._initial_scene.targets) + self._num_allowed_extra_steps)
def _get_task_reward(self, obstacles, targets, blocks):
"""Computes the current score based on the targets and placed blocks."""
del obstacles
targets_score = 0.
targets_fraction = 0.
# Filter to keep only horizontal blocks.
horizontal_blocks = _get_horizontal_blocks(blocks)
targets_to_reward = targets
for _, target in enumerate(targets_to_reward):
target_fraction = _target_fraction(
target, horizontal_blocks)
targets_fraction += target_fraction
targets_score += 1. if target_fraction > 0.9 else 0.
return targets_score
def _get_generator(self, difficulty):
min_num_obstacles = 1
if isinstance(difficulty, int):
# One additional target per difficulty up to 8 (difficulty=7)
max_num_targets = difficulty + 1
# Max number of levels added one by one, capped at 6.
max_num_levels = min(difficulty + 1, 6)
# Max number of obstacles added one by one starting at difficulty=2,
# and capped at 4.
max_num_obstacles = max(0, min(difficulty - 1, 4))
min_num_targets = max_num_targets
min_num_obstacles = min(min_num_obstacles, max_num_obstacles)
elif difficulty == "double_the_targets":
# Twice as many targets
max_num_levels = 6
min_num_targets = 16
max_num_targets = 16
max_num_obstacles = 4
else:
raise ValueError("Unrecognized difficulty: %s" % difficulty)
num_levels_range = (max_num_levels, max_num_levels+1)
num_targets_range = (min_num_targets, max_num_targets+1)
num_obstacles_range = (min_num_obstacles, max_num_obstacles+1)
return SilhouetteGenerator(
num_obstacles_range=num_obstacles_range,
num_targets_range=num_targets_range,
scene_width=self._generator_width,
random_state=self._random_state,
num_levels_range=num_levels_range)
def _almost_equal(a, b, threshold=1e-3):
return np.abs(a - b) < threshold
def _bounds_to_center_dims_1d(bds):
"""Get midpoints of bounds.
Args:
bds: list of numbers denoting intervals (bds[i] = start of interval i,
end of interval i-1)
Returns:
midpoints: midpoints of intervals. Note len(midpoints) = len(bds)-1
because bds contains both start + end bounds, meaning there will be one
more bds entry than there are intervals.
"""
return .5 * (bds[:-1] + bds[1:]), np.diff(bds)
def _center_dims_to_bounding_boxes(cxy_dxy):
"""Convert from center-dimensions coordinates to bounding box coordinates.
Args:
cxy_dxy: n x 4 matrix describing rectangles in terms of center coordinates
and dimensions in x, y directions
(cxy_dxy[i] = [center_x_i, center_y_i, dimension_x_i, dimension_y_i])
Returns:
lxy_uxy: n x 4 matrix describing rectangles in terms of lower/upper bounds
in x, y directions
(lxy_uxy[i] = [lower_bd_x_i, lower_bd_y_i, upper_bd_x_i, upper_bd_y_i])
"""
if not list(cxy_dxy): return []
dim = len(cxy_dxy.shape)
if dim == 1: cxy_dxy = cxy_dxy.reshape(1, -1)
c_xy = cxy_dxy[:, :2]
d_xy = cxy_dxy[:, 2:]
if dim == 1:
return np.concatenate([c_xy - d_xy * .5, c_xy + d_xy * .5],
axis=1).reshape(-1)
else:
return np.concatenate([c_xy - d_xy * .5, c_xy + d_xy * .5], axis=1)
def _get_bounds_1d_discrete(stack_max_length, discrete_values,
random_state, offset=0.):
"""Get bounds of blocks in 1 dimension.
Args:
stack_max_length: max length of blocks
discrete_values: discrete values used to partition stack_max_length.
random_state: np.random.RandomState(seed)
offset: how much to offset first point by (default=0)
Returns:
pts: array of starts, ends of blocks
[s_block_0, s_block_1/e_block_1, ..., s_block_n/e_block_n-1, e_block_n]
Raises:
ValueError: if random_state is None or block_range is incorrectly specified
"""
# check inputs
if random_state is None:
raise ValueError("random_state must be supplied and not None.")
n_max = int(np.ceil(stack_max_length * 1. / np.min(discrete_values)))
dim = random_state.choice(discrete_values, size=n_max)
y_bds = np.insert(np.cumsum(dim), 0, 0) + offset
# sanity checks
assert y_bds[-1] >= stack_max_length
assert np.all(np.diff(y_bds) > 0.)
y_bds_lt_max_len = y_bds[y_bds < stack_max_length]
return y_bds_lt_max_len
def _tessellate_discrete_rectangles_by_row_from_options(
scene_width, scene_height=None,
random_state=None,
discrete_widths=(5, 10, 20, 40),
discrete_heights=(5, 10),
do_x_offset=True, return_as="cxy_dxy"):
"""Method 1 for tessellating plane with rectangles.
Tessellate with rule:
1) sample row heights for current row.
4) for each row, sample block widths from the discrete widhts until scene
is filled horizontally.
5) for each row: select random width for each block uniformly from
discrete_widths; offset blocks from left wall by random amount if
do_x_offset
6) return as cxy_dxy (center/dimensions) or lxy_uxy (lower/upper bounds)
Args:
scene_width: width of scene
scene_height: height of scene (default=scene_width)
random_state: np.random.RandomState(seed)
discrete_widths: Iterable for values of the block widths.
discrete_heights: Iterable for values of the block heights.
do_x_offset: jitter x offset of blocks in each row (default=True)
return_as: "cxy_dxy" for center (x,y), dimensions (x,y) format
(cxy_dxy[i] = [cx, cy, dx, dy])
"lxy_uxy" for lower (x,y) bounds, upper (x,y) bounds format
(lxy_uxy[i] = [lx, ly, ux, uy])
(default="cxy_dxy")
Returns:
discrete_widths: as a np.array
discrete_heights: as a np.array
rectangle_coords: in format "lxy_uxy" or "cxy_dxy" as specified
Raises:
ValueError: if random_state is not supplied or is None; if
discrete_heights or discrete_widths is incorrectly specified
"""
y_bds = _get_bounds_1d_discrete(scene_height, discrete_heights, random_state)
y_c, y_dim = _bounds_to_center_dims_1d(y_bds)
ny = len(y_c)
coords = []
for iy in range(ny):
# get widths of blocks in row
if do_x_offset:
x_offset = random_state.rand() * np.max(discrete_widths)
else: x_offset = 0.
x_bds = _get_bounds_1d_discrete(
scene_width, discrete_widths, random_state, offset=x_offset)
x_c, x_dim = _bounds_to_center_dims_1d(x_bds)
# add x + y features of rectangles to box
coords_i = np.concatenate([ii.reshape(-1, 1) for ii in
[x_c, np.repeat(y_c[iy], len(x_c)),
x_dim, np.repeat(y_dim[iy], len(x_c))]],
axis=1)
coords.append(coords_i)
if return_as == "cxy_dxy":
return np.concatenate(coords, axis=0)
elif return_as == "lxy_uxy":
return _center_dims_to_bounding_boxes(np.concatenate(coords, axis=0))
else:
raise ValueError("return_as type '{}' not recognized.".format(return_as) +
" Should be 'cxy_dxy' or 'lxy_uxy'")
class SilhouetteGenerator(stacking.StackingGenerator):
"""Generates a set of horizontal obstacles and targets."""
def __init__(self,
num_obstacles_range,
num_targets_range,
num_levels_range,
scene_width,
random_state,
height_distribution_exponent=1.75,
obstacles_height=5,
**kwargs):
"""Initialize the generator.
Args:
num_obstacles_range: a tuple indicating the range of obstacles
that will be in the generated scene, from low (inclusive) to high
(exclusive). This counts the number of obstacles per height.
num_targets_range: a tuple indicating the range of targets
that will be in the generated scene, from low (inclusive) to high
(exclusive). This counts the total number of targets.
num_levels_range: a tuple indicating the range of levels that will be in
the generated scene, from low (inclusive) to high (exclusive).
scene_width: the width of the scene.
random_state: a np.random.RandomState object
height_distribution_exponent: probability of chosing objects at different
levels will be proportional to level**height_distribution_parameter for
each object.
obstacles_height: The height of the obstacles. Only used when
`use_legacy_obstacles_heights` is set to False.
**kwargs: additional keyword arguments passed to super
"""
super(SilhouetteGenerator, self).__init__(
num_blocks_range=None,
scene_width=scene_width,
random_state=random_state,
**kwargs)
self._num_obstacles_range = num_obstacles_range
self._num_targets_range = num_targets_range
self._height_distribution_exponent = height_distribution_exponent
self.obstacles_height = obstacles_height
self._num_levels_range = num_levels_range
self._corrected_height = self.height + _VERTICAL_CORRECTION
def _get_supported_blocks(
self, tessellation_blocks, existing_blocks, min_overlap):
"""Returns tessellation blocks on top of previous blocks or the floor.
Args:
tessellation_blocks: Array of blocks left in the tessellation
with shape [num_blocks, 4], where the last axis indicates: center_x,
center_y, width, height.
existing_blocks: List of existing blocks, formatted as the rows in
tessellation_blocks
min_overlap: minimum overlap between an existing block and a tessellation
block, for the second one to the considered supported.
Returns:
List of tuples (index, block), corresponding to rows of
`tessellation_blocks` satifying the support condition.
"""
# Blocks on the floor.
possible_blocks = [
(i, candidate_block)
for i, candidate_block in enumerate(tessellation_blocks)
if _almost_equal(candidate_block[1], self._corrected_height/2)]
# Blocks overlapping with other blocks underneath.
for existing_block in existing_blocks:
for i, candidate_block in enumerate(tessellation_blocks):
# If it is one level above an existing block.
if _almost_equal(candidate_block[1],
existing_block[1] + self._corrected_height):
distance_between_centers = np.abs(
candidate_block[0] - existing_block[0])
combined_half_length = (candidate_block[2]/2 + existing_block[2]/2 -
self.margin)
# If it overlaps enough with the block at the level below.
if distance_between_centers < combined_half_length - min_overlap:
possible_blocks.append((i, candidate_block))
return possible_blocks
def _sample_candidates(self, candidates, num_samples=1):
candidates_heights = np.array([candidate[1][1] for candidate in candidates])
# Levels as 1, 2, 3
candidates_levels_num = (candidates_heights+self.height/2)/self.height
if not candidates:
return []
# We want the probability to increase with height, to build more
# tower like structures, and less flat structures.
probs = candidates_levels_num**self._height_distribution_exponent
probs = probs/probs.sum()
chosen_indices = self.random_state.choice(
np.arange(len(candidates)), p=probs, size=num_samples, replace=False)
return [candidates[i] for i in chosen_indices]
def generate_one(self):
"""Generate a single scene.
Returns:
observation: a block_utils.BlocksObservation object
solution: a list of Block objects in their final locations
"""
# Tessellate space, fractions of the blocks lengths, with a small margin.
# And with the standard block height.
discrete_heights = (self._corrected_height,)
discrete_widths = (self.small_width+self.margin,
self.medium_width+self.margin,
self.large_width+self.margin)
# Sample a maximum height for the scene.
num_levels = self.random_state.randint(*self._num_levels_range)
this_scene_height = self.height * (num_levels+0.5)
# Set the targets.
num_targets = self.random_state.randint(*self._num_targets_range)
max_attempts = 10
min_overlap = 0.9 * self.small_width
for _ in range(max_attempts):
# Tessellate space. This returns a list of blocks in the tesselation
# with shape [num_blocks, 4], where the last axis, indicats, center_x,
# center_y, width, height.
tessellation_blocks = (
_tessellate_discrete_rectangles_by_row_from_options(
scene_width=self.scene_width, scene_height=this_scene_height,
discrete_widths=discrete_widths,
discrete_heights=discrete_heights,
random_state=self.random_state,
do_x_offset=True, return_as="cxy_dxy"))
# Pick num_targets blocks from possible options.
existing_blocks = []
for _ in range(num_targets):
candidates = self._get_supported_blocks(
tessellation_blocks, existing_blocks, min_overlap=min_overlap)
if not candidates:
break
block_i, block = self._sample_candidates(candidates, num_samples=1)[0]
tessellation_blocks = np.delete(tessellation_blocks, block_i, axis=0)
existing_blocks.append(block)
else:
# If we successfully added as many targets as we needed, we do not need
# to keep attempting by breaking the loop.
break
else:
# If we got here, is because we did not break out of the loop, and
# we have exhausted all attempts.
raise ValueError(
"Maximum number of attempts reached to generate silhouette.")
targets = [block_utils.Block(
x=b[0], y=b[1]+_VERTICAL_CORRECTION/2, width=b[2]-self.margin,
height=b[3]-_VERTICAL_CORRECTION) for b in existing_blocks]
# Set the obstacles.
num_obstacles = self.random_state.randint(*self._num_obstacles_range)
# We only require negative overlap for obstacles.
candidates = self._get_supported_blocks(
tessellation_blocks, existing_blocks, min_overlap=-2.)
sampled_candidates = self._sample_candidates(
candidates, num_samples=min(num_obstacles, len(candidates)))
obstacles = [block_utils.Block(
x=block[0], y=block[1], width=block[2]-self.margin,
height=self.obstacles_height) for _, block in sampled_candidates]
tessellation_blocks = np.delete(
tessellation_blocks,
[block_i for block_i, _ in sampled_candidates], axis=0)
observation_blocks = self._place_available_objects()
floor = self._place_floor()
observation = block_utils.BlocksObservation(
blocks=[floor] + observation_blocks,
obstacles=obstacles,
targets=targets,
balls=[])
return observation
| dm_construction-master | dm_construction/environments/silhouette.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| dm_construction-master | dm_construction/environments/__init__.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A construction environment where the task is to get a ball from A to B.
See: Hamrick, J. B., Bapst, V., Sanchez-Gonzalez, A., Pfaff, T., Weber, T.,
Buesing, L., & Battaglia, P. W. (2020). Combining Q-Learning and Search with
Amortized Value Estimates. ICLR 2020.
"""
import collections
import itertools
from dm_construction.environments import stacking
from dm_construction.unity import constants as unity_constants
from dm_construction.utils import block as block_utils
from dm_construction.utils import constants
from dm_construction.utils import geometry
import dm_env
from dm_env import specs
import numpy as np
_OBJECT_TYPE_NAMES = [
constants.BLOCK,
constants.OBSTACLE,
constants.TARGET,
constants.AVAILABLE_BLOCK,
constants.BALL
]
def _rect_or_ramp_blocks_collision(block0, block1, epsilon=1e-4):
"""Returns true if there is a collision between ramps/boxes."""
x0 = block0[unity_constants.POSITION_X_FEATURE_INDEX]
y0 = block0[unity_constants.POSITION_Y_FEATURE_INDEX]
w0 = block0[unity_constants.WIDTH_FEATURE_INDEX]
h0 = block0[unity_constants.HEIGHT_FEATURE_INDEX]
angle0 = _get_angle(block0)
x1 = block1[unity_constants.POSITION_X_FEATURE_INDEX]
y1 = block1[unity_constants.POSITION_Y_FEATURE_INDEX]
w1 = block1[unity_constants.WIDTH_FEATURE_INDEX]
h1 = block1[unity_constants.HEIGHT_FEATURE_INDEX]
angle1 = _get_angle(block1)
if (block0[unity_constants.IS_BOX_FEATURE_INDEX] and
block1[unity_constants.IS_BOX_FEATURE_INDEX]):
return geometry.rect_overlap(x0, y0, w0, h0, angle0,
x1, y1, w1, h1, angle1) > epsilon
elif (block0[unity_constants.IS_RAMP_FEATURE_INDEX] and
block1[unity_constants.IS_RAMP_FEATURE_INDEX]):
return geometry.ramp_overlap(x0, y0, w0, h0, angle0,
x1, y1, w1, h1, angle1) > epsilon
elif (block0[unity_constants.IS_BOX_FEATURE_INDEX] and
block1[unity_constants.IS_RAMP_FEATURE_INDEX]):
return geometry.rect_ramp_overlap(x0, y0, w0, h0, angle0,
x1, y1, w1, h1, angle1) > epsilon
elif (block0[unity_constants.IS_RAMP_FEATURE_INDEX] and
block1[unity_constants.IS_BOX_FEATURE_INDEX]):
return geometry.rect_ramp_overlap(x1, y1, w1, h1, angle0,
x0, y0, w0, h0, angle1) > epsilon
else:
raise ValueError()
def _rect_or_ramp_ball_blocks_collision(block_box, block_ball, epsilon=1e-4):
"""Returns true if there is a collision between a box/ramp and a ball."""
x0 = block_box[unity_constants.POSITION_X_FEATURE_INDEX]
y0 = block_box[unity_constants.POSITION_Y_FEATURE_INDEX]
w0 = block_box[unity_constants.WIDTH_FEATURE_INDEX]
h0 = block_box[unity_constants.HEIGHT_FEATURE_INDEX]
angle0 = _get_angle(block_box)
x1 = block_ball[unity_constants.POSITION_X_FEATURE_INDEX]
y1 = block_ball[unity_constants.POSITION_Y_FEATURE_INDEX]
w1 = block_ball[unity_constants.WIDTH_FEATURE_INDEX]
if block_box[unity_constants.IS_BOX_FEATURE_INDEX]:
return geometry.rect_ball_overlap(
x0, y0, w0, h0, angle0, x1, y1, w1) > epsilon
elif block_box[unity_constants.IS_RAMP_FEATURE_INDEX]:
return geometry.ramp_ball_overlap(
x0, y0, w0, h0, angle0, x1, y1, w1) > epsilon
else:
raise ValueError()
def _blocks_collision(block1, block2):
if (not block1[unity_constants.IS_BALL_FEATURE_INDEX] and
not block2[unity_constants.IS_BALL_FEATURE_INDEX]):
return _rect_or_ramp_blocks_collision(block1, block2)
elif block2[unity_constants.IS_BALL_FEATURE_INDEX]:
return _rect_or_ramp_ball_blocks_collision(block1, block2)
elif block1[unity_constants.IS_BALL_FEATURE_INDEX]:
return _rect_or_ramp_ball_blocks_collision(block2, block1)
else:
raise ValueError(
"Only collisions between boxes, and box and ball are supported")
def _get_angle(block):
return np.arctan2(block[unity_constants.SINE_ANGLE_FEATURE_INDEX],
block[unity_constants.COSINE_ANGLE_FEATURE_INDEX])
MarbleRunEpisodeParams = collections.namedtuple(
"MarbleRunEpisodeParams",
["num_obstacles", # Number of obstacles in the episode.
"num_targets", # Number of targets in the episode.
"num_balls", # Number of balls in the episode.
"min_height_obstacle", # Vertical position of the lowest obstacle.
"max_height_obstacle", # Vertical position of the highest obstacle.
"max_height_target", # Vertical position of the highest target.
# Maximum horizontal distance between a ball and its closest target in the
# episode. This distance is discretized as follows:
# 0: less than 1/3 the width of the scene.
# 1: between 1/3 and 2/3 the width of the scene.
# 2: more than 2/3 the width of the scene.
"discretized_ball_target_distance",
]
)
class ConstructionMarbleRun(stacking.ConstructionStacking):
"""Environment for Marble Run task.
The goal in Marble Run is to stack blocks to enable a marble to get from its
original starting position to a goal location, while avoiding obstacles. At
each step, the agent may choose from a number of differently shaped
rectangular blocks as well as ramp shapes, and may choose to make these blocks
"sticky" (for a price) so that they stick to other objects in the scene. The
episode ends once the agent has created a structure that would get the marble
to the goal. The agent receives a reward of one if it solves the scene, and
zero otherwise.
"""
def __init__(self,
unity_environment,
length_dynamics=1,
reward_per_target=1.,
max_num_bounces=8,
curriculum_sample_geom_p=0.5,
sticky_penalty=0.0,
**stacking_kwargs):
"""Inits the environment.
Args:
unity_environment: See base class.
length_dynamics: Length of the augmented observation of the ball. The
observation will be a sequence of the state of the system between
bounces of the ball.
reward_per_target: Reward associated with colliding with each target.
max_num_bounces: Maximum number of objects the ball can bounce off/
interact with to evaluate the reward. Must be >= length_dynamics - 1 .
curriculum_sample_geom_p: See base class.
sticky_penalty: See base class.
**stacking_kwargs: keyword arguments passed to
stacking.ConstructionStacking.
"""
# Bounciness of the balls.
self._bounciness = 0.
# Linear drag of the balls.
self._linear_drag = 0.
# Angular drag of the balls.
self._angular_drag = 0.
# Density of the ball.
self._ball_density = 1.
# If True the ball also gets glued to sticky objects.
self._glueable_ball = False
# Maximum number of timesteps a ball can can spend in a long bounce, before
# the bounce is split into two.
self._max_simulation_steps_per_bounce = 1000
# If True, the dynamics are only evaluated until one of the termination
# conditions is reached, otherwise, the dynamics are always evaluated up to
# max_num_bounces. If True, the observation sequence is padded with the last
# observation up to length_dynamics, whenever the simulation is too short.
self._end_dynamics_on_termination = True
self._length_dynamics = length_dynamics
self._reward_per_target = reward_per_target
self._max_num_bounces = max_num_bounces
if self._max_num_bounces < self._length_dynamics - 1:
raise ValueError(
"`max_num_bounces` cannot be smaller than `length_dynamics - 1`.")
super(ConstructionMarbleRun, self).__init__(
unity_environment=unity_environment,
sticky_penalty=sticky_penalty,
block_replacement=True,
max_difficulty=8,
generator_width=250,
curriculum_sample_geom_p=curriculum_sample_geom_p,
progress_threshold=0.99,
**stacking_kwargs)
def _split_available_obstacles_placed_balls(self, blocks):
"""Splits observations for available blocks, obstacles and placed blocks."""
(available, targets, obstacles, placed
) = self._split_available_obstacles_placed(blocks)
# We know that the balls are always last, since they are instantitated in
# the scene last.
num_balls = len(self._initial_scene.balls)
balls = placed[-num_balls:]
placed = placed[:-num_balls]
return available, targets, obstacles, placed, balls
def _check_ball_collision(self, blocks):
"""Verifies if any of the balls is colliding with the placed blocks."""
# We check spawn collisions manually because currently the Unity Env only
# detects spawn collisions after a simulation has been run. However
# it there is a spawn collisition with the bumper, we don't even want to run
# a simulation.
(unused_available, unused_targets, unused_obstacles, placed, balls
) = self._split_available_obstacles_placed_balls(blocks)
for ball in balls:
for other_block in placed:
if _blocks_collision(ball, other_block):
return True
return False
def _add_balls_to_scene(self):
reset_ball_actions = []
for ball_i, ball in enumerate(self._initial_scene.balls):
reset_ball_actions.append({
# Remove the display ball.
"Delete": 1.,
"SelectId": self._ball_ids[ball_i],
# Add the actual ball.
"SpawnBlock": 1.,
"Shape": ball.shape,
"FreeBody": 1,
"SetId": self._ball_ids[ball_i],
"SetPosX": ball.x,
"SetPosY": ball.y,
"Width": ball.width,
"Height": ball.height,
"Density": self._ball_density,
"SetAngle": ball.angle,
"RGBA": self._ball_color,
"Bounciness": self._bounciness,
"LinearDrag": self._linear_drag,
"AngularDrag": self._angular_drag,
"Glueable": 1. if self._glueable_ball else 0.,
"CollisionMask": 0b11, # Balls can collide with targets too.
"Friction": 0
})
return self._unity_environment.step(reset_ball_actions)
def _check_collisions_and_run_ball_dynamics(self, initial_observation):
"""Runs the jumpy simulation between collisions from the current state.
Simulation terminates when `max_num_bounces` is reached, or may terminate
early if the ball was spawned on top of another object, reaches an obstacle,
or reaches the target.
Args:
initial_observation: previous observation from the core environment. Used
to initialize the observation list returned by the function.
Returns:
core_observation_list: List of the observations observed during the jumpy.
In case of early termination the last observation is repeated to
indicate stationary dynamics after termination.
simulation_on_list: Boolean bask of the same length as
`core_observation_list`. A True in the i-th position indicates that
physics were enabled when transitioning from core_observation_list[i]
and core_observation_list[i+1]. Otherwise, the dynamics between
core_observation_list[i] and core_observation_list[i+1] are purely
stationary due to early simulation termination.
spawn_collision: Indicates an object was placed on top of the ball, or
and existing object.
hit_obstacle: Indicates an obstacle was hit even before the ball
simulation.
hit_obstacle_dynamics: Indicates an obstacle was hit during the ball
simulation.
hit_goals: Indicates all goals were hit.
"""
# We will construct a trajectory of the dynamics for up to
# self._max_num_bounces and trim it later if required.
core_observation_list = [initial_observation]
spawn_collision = (
(initial_observation["SpawnCollisionCount"] >
self._initial_spawn_collision_count) or
self._check_ball_collision(initial_observation["Blocks"]))
# Check if any of the obstacles have been hit already
blocks = initial_observation["Blocks"]
(unused_available, targets, obstacles, unused_placed, balls
) = self._split_available_obstacles_placed_balls(blocks)
initial_obstacle_hits = obstacles[
:, unity_constants.COLLISION_COUNT_FEATURE_INDEX]
hit_obstacle = True if np.any(initial_obstacle_hits) else False
# We will flip these flags to true when the conditions are satified.
hit_goals = False
hit_obstacle_dynamics = False
if spawn_collision or hit_obstacle:
# If there is a spawn collision, we do not even simulate.
simulation_on_list = [False]
return (core_observation_list, simulation_on_list,
spawn_collision, hit_obstacle, hit_obstacle_dynamics, hit_goals)
simulation_on_list = [True]
previous_ball_hits = balls[:, unity_constants.COLLISION_COUNT_FEATURE_INDEX]
simulation_timestep = 0.01
max_time_one_bounce = (
self._max_simulation_steps_per_bounce * simulation_timestep)
current_time = initial_observation["ElapsedTime"]
for _ in range(self._max_num_bounces):
# Run steps until we either get a collision of the ball reach the
# timeout, or get an obstacle collision, or no balls are moving.
finishing_time = current_time + max_time_one_bounce
num_remaining_steps = np.round((finishing_time - current_time) /
simulation_timestep)
# Even though we run always with "StopOnCollision", it may be that this
# collision is between two blocks and does not involve the balls.
# So we keep simulating until we get a ball collision or we timeout.
while num_remaining_steps:
core_time_step = self._unity_environment.step(
{"StopOnCollision": 1.0,
"SimulationSteps": num_remaining_steps,
"Timestep": simulation_timestep})
blocks = core_time_step.observation["Blocks"]
(unused_available, targets, obstacles, unused_placed, balls
) = self._split_available_obstacles_placed_balls(blocks)
current_time = core_time_step.observation["ElapsedTime"]
ball_hits = balls[:, unity_constants.COLLISION_COUNT_FEATURE_INDEX]
obstacle_hits = obstacles[
:, unity_constants.COLLISION_COUNT_FEATURE_INDEX]
# No need to continue if an obstacle was hit, or if the collision
# was with one of the balls.
if obstacle_hits.any():
hit_obstacle_dynamics = True
break
if (ball_hits - previous_ball_hits).any():
break
num_remaining_steps = np.round((finishing_time - current_time) /
simulation_timestep)
goal_hits = targets[:, unity_constants.COLLISION_COUNT_FEATURE_INDEX]
if goal_hits.all():
hit_goals = True
core_observation_list.append(core_time_step.observation)
if (self._end_dynamics_on_termination and
(hit_obstacle_dynamics or hit_goals)):
# Terminate the dynamics when ball collides with obstacle or targets.
# (Later will be padded with zeros, repeating the last observation).
# Models will be able to learn the transition from simulation_on=True
# to simulation_on=False when one of these conditions happen.
simulation_on_list.append(False)
# By repeating the last observation, they will also be able to learn
# that if simulation_on=False in the input, the output should just be
# the same as the input and simulation_on will also be False after that.
# Allowing them to chain constant predictions after termination
# conditions.
core_observation_list.append(core_observation_list[-1])
simulation_on_list.append(False)
break
else:
simulation_on_list.append(True)
previous_ball_hits = ball_hits
epsilon_velocity = 1e-4
ball_velocities = balls[
:, (unity_constants.VELOCITY_X_FEATURE_INDEX,
unity_constants.VELOCITY_Y_FEATURE_INDEX,
unity_constants.ANGULAR_VELOCITY_FEATURE_INDEX)]
if np.all(np.abs(ball_velocities.flatten()) < epsilon_velocity):
break
return (core_observation_list, simulation_on_list,
spawn_collision, hit_obstacle, hit_obstacle_dynamics, hit_goals)
def _set_observation_and_termination(
self, time_step, default_step_type=dm_env.StepType.MID):
# Save the current state to restore it later, this corresponds to the scene
# with the last placed block already relaxed and at its final location,
# which will be the starting point for the next step (undoing all damage
# that the ball simulation may cause to the existing structures).
time_step_before_balls = time_step
# Adding the balls and simulating the dynamics.
time_step = self._add_balls_to_scene()
(core_observation_list, simulation_on_list, spawn_collision,
hit_obstacle, unused_hit_obstacle_dynamics, hit_goals
) = self._check_collisions_and_run_ball_dynamics(time_step.observation)
new_observation = self._build_observation(
core_observation_list, simulation_on_list)
time_step = time_step._replace(observation=core_observation_list[-1])
# We split the different types of blocks.
blocks = time_step.observation["Blocks"]
(available, targets, obstacles, placed, unused_balls
) = self._split_available_obstacles_placed_balls(blocks)
# Evaluate termination conditions.
# If we have placed as many objects as there are in display, or have reached
# the maximum number of steps
if not available.shape[0] or self._num_steps >= self._max_steps:
self._end_episode(constants.TERMINATION_MAX_STEPS)
# If there was a Spawn collision. A Spawn collision means the agent placed
# an object overlapping with another object. We also override the reward.
penalty_reward = 0.
block_reward = 0.
if spawn_collision > 0:
self._end_episode(constants.TERMINATION_SPAWN_COLLISION)
penalty_reward = -self._spawn_collision_penalty
# If we hit an obstacle, we also end the episode and override the reward.
elif hit_obstacle:
self._end_episode(constants.TERMINATION_OBSTACLE_HIT)
penalty_reward = -self._hit_obstacle_penalty
else:
# We remove the floor before evaluating the score.
placed_blocks = placed[1:]
self._num_sticky_blocks = np.sum(
placed_blocks[:, unity_constants.STICKY_FEATURE_INDEX])
self._progress = self._get_task_reward(
obstacles, targets, placed_blocks)
total_cost = self._get_cost(blocks)
total_score = self._progress
cost = total_cost - self._previous_cost
self._previous_cost = total_cost
block_reward = total_score - self._previous_score
self._previous_score = total_score
block_reward -= cost
if hit_goals:
self._end_episode(constants.TERMINATION_COMPLETE)
if self._is_end_of_episode:
step_type = dm_env.StepType.LAST
discount = time_step.discount * 0.
else:
step_type = default_step_type
discount = time_step.discount
reward = penalty_reward + block_reward
# Restore the state to what if was before the ball, to be able to repeat the
# simulation, without any potential changes that the ball made to the
# blocks.
if not self._is_end_of_episode:
self._unity_environment.restore_state(
time_step_before_balls.observation,
verify_restored_state=False)
self._last_time_step = time_step._replace(
observation=new_observation,
step_type=step_type,
discount=discount,
reward=reward)
return self._last_time_step
def _build_observation(self, core_observation_list, simulation_on_list):
new_observation = core_observation_list[-1].copy()
# Trim or repeat to match the desired dynamics length.
num_missing_steps = self._length_dynamics - len(core_observation_list)
if num_missing_steps > 0:
time_mask = [True]*len(core_observation_list) + [False]*num_missing_steps
core_observation_list = core_observation_list[:]
core_observation_list += [core_observation_list[-1]] * num_missing_steps
simulation_on_list = simulation_on_list[:]
simulation_on_list += [False] * num_missing_steps
else:
core_observation_list = core_observation_list[:self._length_dynamics]
simulation_on_list = simulation_on_list[:self._length_dynamics]
time_mask = [True] * self._length_dynamics
# Get observations for each step and stack them.
step_observation_dict_list = [
self._build_observation_each_simulation_step(obs)
for obs in core_observation_list]
for key in step_observation_dict_list[0].keys():
if key in ["RGB", "ObserverRGB"]:
# For observations without an entity axis, the time axis will be
# the first axis.
axis = 0
else:
# The rest of the observations from each simulation timestep have
# a leading entity axis, so the time axis should be the second one.
axis = 1
new_observation[key] = np.stack(
[obs[key] for obs in step_observation_dict_list], axis=axis)
new_observation["SimulationOn"] = np.array(simulation_on_list, np.bool)
new_observation["TimeMask"] = np.array(time_mask, np.bool)
del new_observation["Contacts"]
del new_observation["SpawnCollisionCount"]
del new_observation["CollisionStop"]
del new_observation["ElapsedTime"]
if "Segmentation" in new_observation:
del new_observation["Segmentation"]
return new_observation
def _build_observation_each_simulation_step(self, core_observation):
(available, targets, obstacles, placed, balls
) = self._split_available_obstacles_placed_balls(core_observation["Blocks"])
new_observation = {}
new_observation[constants.AVAILABLE_BLOCK] = available
new_observation[constants.BLOCK] = placed
new_observation[constants.OBSTACLE] = obstacles
new_observation[constants.TARGET] = targets
new_observation[constants.BALL] = balls
for key in ["RGB", "ObserverRGB"]:
if key in core_observation:
new_observation[key] = core_observation[key]
if "Segmentation" in core_observation:
self._add_segmentation_masks(new_observation,
core_observation["Segmentation"])
return new_observation
def _add_segmentation_masks(self, observation, segmentation):
for name in _OBJECT_TYPE_NAMES:
obs_name = "SegmentationMasks" + name
ids = list(np.round(observation[name][:, 0]))
observation[obs_name] = (
stacking.build_segmentation_masks_for_ids(
segmentation, ids))
def observation_spec(self, *args, **kwargs):
new_spec = self._unity_environment.observation_spec().copy()
# The block observation is exactly as we get it
block_obs_shape = [0, self._length_dynamics, new_spec["Blocks"].shape[1]]
block_obs_dtype = new_spec["Blocks"].dtype
# We know the observation is the same for all block types.
for name in _OBJECT_TYPE_NAMES:
new_spec[name] = specs.Array(
block_obs_shape, dtype=block_obs_dtype, name=name)
for key in ["RGB", "ObserverRGB"]:
if key in new_spec:
prev_spec = new_spec[key]
new_spec[key] = specs.Array(
(self._length_dynamics,) + prev_spec.shape,
dtype=prev_spec.dtype, name=prev_spec.name)
if "Segmentation" in list(new_spec.keys()):
segmentation_resolution = new_spec["Segmentation"].shape[:2]
segmentation_obs_shape = (
0, self._length_dynamics) + segmentation_resolution
for name in _OBJECT_TYPE_NAMES:
obs_name = "SegmentationMasks" + name
new_spec[obs_name] = specs.Array(
segmentation_obs_shape, dtype=np.bool, name=obs_name)
del new_spec["Segmentation"]
new_spec.update({
"SimulationOn": specs.Array(
[self._length_dynamics], dtype=np.bool, name="SimulationOn"),
"TimeMask": specs.Array(
[self._length_dynamics], dtype=np.bool, name="TimeMask"),
})
del new_spec["Contacts"]
del new_spec["SpawnCollisionCount"]
del new_spec["CollisionStop"]
del new_spec["ElapsedTime"]
return new_spec
def _compute_max_episode_reward(self, obs):
return len(obs.targets) * self._reward_per_target
def _maybe_update_max_steps(self):
block_height = self._initial_available_objects[0].height
target_level_height = int(
self._initial_scene.targets[0].y // block_height)
self._max_steps = max(15, 5 * (target_level_height + 1))
def _get_task_reward(self, obstacles, targets, blocks):
del obstacles, blocks
num_targets_hit = np.sum(
(targets[:, unity_constants.COLLISION_COUNT_FEATURE_INDEX] > 0.))
target_score = num_targets_hit * self._reward_per_target
return target_score
def _get_generator(self, difficulty):
if isinstance(difficulty, str):
raise ValueError("Unrecognized difficulty: %s" % difficulty)
# By having a single ball level at 0, the ball will always be placed
# a fixed number of levels above the target.
ball_levels = [0]
difficulty_distance = min(difficulty, 4)
max_height = max(0, difficulty - 4)
min_num_obstacles = 1
max_num_obstacles = difficulty // 2 + min_num_obstacles
distance_ranges = [(0.03, 0.3),
(0.36, 0.49),
(0.50, 0.63),
(0.69, 0.82),
(0.83, 1.)]
max_rel_distance = distance_ranges[difficulty_distance][1]
min_rel_distance = distance_ranges[difficulty_distance][0]
horizontal_distance_range = [min_rel_distance, max_rel_distance]
target_levels = [max_height]
num_obstacles_range = [max_num_obstacles, max_num_obstacles + 1]
return MarbleRunGenerator(
target_levels=target_levels,
ball_levels=ball_levels,
scene_width=self._generator_width,
random_state=self._random_state,
num_obstacles_range=num_obstacles_range,
rel_horizontal_distance_range=horizontal_distance_range)
@property
def episode_params(self):
"""Returns discrete parameters of the current episode.
This can be used to implement a dynamic curriculum by clustering episode
outcomes according to these, and by asking the agent to do well on all
clusters/parameter setting combinations, before it can progress to the next
difficulty level.
"""
# First two obstacles are the fixed walls.
obstacles = self._initial_scene.obstacles[2:]
balls = self._initial_scene.balls
targets = self._initial_scene.targets
num_obstacles = len(obstacles)
num_targets = len(targets)
num_balls = len(balls)
max_height_target = max([target.y for target in targets])
obstacle_heights = [obstacle.y for obstacle in obstacles]
min_height_obstacle = min(obstacle_heights)
max_height_obstacle = max(obstacle_heights)
# Calculate the maximum horizontal distance between all targets and
# their closest ball and digitize it.
scene_width = self._display_limit * 2.
max_min_target_distance_per_target = max([
min([np.abs(ball.x-target.x) for ball in balls]) # pylint: disable=g-complex-comprehension
for target in targets])
horizontal_distance_bin = int(np.digitize(
max_min_target_distance_per_target, [scene_width/3, scene_width*2/3]))
return MarbleRunEpisodeParams(
num_obstacles=num_obstacles,
num_targets=num_targets,
num_balls=num_balls,
min_height_obstacle=min_height_obstacle,
max_height_obstacle=max_height_obstacle,
max_height_target=max_height_target,
discretized_ball_target_distance=horizontal_distance_bin)
class MarbleRunGenerator(stacking.StackingGenerator):
"""Generates a set of horizontal obstacles and targets."""
def __init__(self,
target_levels,
ball_levels,
num_obstacles_range,
rel_horizontal_distance_range=(0., 1.),
min_ball_target_level_diff=4,
targets_side=5,
obstacles_height=5,
**kwargs):
"""Initialize the generator.
Args:
target_levels: List of discrete height levels (starting from 0) at which
the targets can be located.
ball_levels: List of discrete height levels (starting from 0) at which
the balls can be located. See min_ball_target_level_diff.
num_obstacles_range: a tuple indicating the range of targets
that will be in the generated scene, from low (inclusive) to high
(exclusive). This counts the total number of obstacles.
rel_horizontal_distance_range: Range of horizontal distances between
the target and the ball, relative to the scene width.
min_ball_target_level_diff: Minimum number of levels that the ball will
be above the target. This will define the lower limit for the position
the ball level to be `(target_level + min_ball_target_level_diff)`,
which may cause the ball to be higher than any of the levels specified
in ball_levels.
targets_side: The width and height of targets. Only used when
`use_legacy_obstacles_heights` is set to False.
obstacles_height: The height of the obstacles. Only used when
`use_legacy_obstacles_heights` is set to False.
**kwargs: additional keyword arguments passed to super
"""
super(MarbleRunGenerator, self).__init__(
num_blocks_range=None,
**kwargs)
self._targets_side = targets_side
self.obstacles_height = obstacles_height
self._targets_height = targets_side
self._target_levels = target_levels
self._num_obstacles_range = num_obstacles_range
self._min_ball_target_level_diff = min_ball_target_level_diff
self._rel_horizontal_distance_range = rel_horizontal_distance_range
self._ball_levels = ball_levels
def _place_available_objects(self):
"""Returns the available blocks to the agent."""
def create_block(width, height, shape):
"""Returns a block with the specified properties."""
block = block_utils.Block(
width=width, height=height, angle=0., shape=shape,
x=0, y=0) # x and y will be set later.
return block
observation_blocks = [
create_block(
self.small_width, self.height, unity_constants.BOX_SHAPE),
create_block(
2*self.small_width, self.height, unity_constants.BOX_SHAPE),
create_block(
self.small_width, 2*self.height, unity_constants.BOX_SHAPE),
create_block(
self.medium_width, self.height*2/3, unity_constants.BOX_SHAPE),
create_block(
self.large_width, self.height/10*3, unity_constants.BOX_SHAPE),
create_block(
-self.medium_width, self.height, unity_constants.RAMP_SHAPE),
create_block(
self.medium_width, self.height, unity_constants.RAMP_SHAPE),
]
# Calculate margin of blocks.
block_abs_widths = [np.abs(block.width) for block in observation_blocks]
empty_width = self.scene_width - sum(block_abs_widths)
if empty_width <= 0:
raise ValueError("Not enough space between available objects.")
horizontal_margin = empty_width / (len(observation_blocks) - 1)
# Update the position of the blocks using the margin.
observation_block_with_positions = []
current_x = 0
display_y_pos = -2 * (self.margin + self.height)
for block in observation_blocks:
abs_width = np.abs(block.width)
display_x_pos = current_x + abs_width / 2
observation_block_with_positions.append(
block._replace(x=display_x_pos, y=display_y_pos))
current_x += abs_width + horizontal_margin
assert current_x - horizontal_margin <= self.scene_width
return observation_block_with_positions
def _build_tessellation(self, num_levels):
""""Tessellates space blocks.
The width of the blocks in the tessellation will be either
`self.small_width` or `2*self.small_width`.
Args:
num_levels: Number of layers of blocks in the tesselation.
Returns:
2-D Array containing the blocks in the tesselation with shape
[num_blocks, 3]. Where the trailing dimension contains horizontal
position (floating point bounded between 0 and self.scene_width),
vertical discrete position (integer in range(num_levels)) and width (
one of `self.small_width` or `2*self.small_width`).
"""
valid_widths = [self.small_width, 2*self.small_width]
blocks = []
for level in range(num_levels):
accumulated_width = self.random_state.uniform(0, self.small_width)
while True:
block_width = self.random_state.choice(valid_widths)
block_x = accumulated_width + block_width / 2
block_y = level
accumulated_width += block_width
if accumulated_width > self.scene_width:
break
blocks.append([block_x, block_y, block_width])
blocks_xyw = np.stack(blocks, axis=0)
return blocks_xyw
def _place_walls(self, relative_wall_margin=0.05):
"""Returns blocks with walls at the edges of the scene, with some margin."""
wall_width = self.height / 2
margin = self.scene_width * relative_wall_margin
scene_height = self.scene_width * 2/3
right_wall = block_utils.Block(
x=self.scene_width + margin + wall_width/2., y=scene_height/2,
height=scene_height, width=wall_width)
left_wall = block_utils.Block(
x=-margin-wall_width/2., y=scene_height/2,
height=scene_height, width=wall_width)
return [right_wall, left_wall]
def _scale_vertical_positions(self, blocks):
"""Transformcs integer level vertical positions into continuous."""
return [block._replace(y=(block.y+0.5)*self.height)
for block in blocks]
def _remove_nearby_blocks(self, reference_block, blocks_xyw):
"""Removes blocks that are nearby to a reference block from a tessellation.
A block is considered nearby if it is within the same layer or up to two
layers abover or below the reference block, and the horitonzal distance
to the reference block is less than self.small_width * 1.5.
Args:
reference_block: reference block_utils.Block object.
blocks_xyw: Block in the tessellation.
Returns:
Updated tessellation where the nearby blocks have been removed.
"""
# We will for sure keep all blocks that are more than two levels away
# from the reference block.
mask_keep_rows = np.abs(blocks_xyw[:, 1] - reference_block.y) > 2.5
margin = self.small_width * 2
# We will also keep blocks whose left side is far enough to the right of the
# right side of the reference block, and blocks whose right side is far
# enough to the left from the left side of the reference block.
right_side_reference = reference_block.x + reference_block.width/2
right_side_blocks = blocks_xyw[:, 0] + blocks_xyw[:, 2]/2
left_side_reference = reference_block.x - reference_block.width/2
left_side_blocks = blocks_xyw[:, 0] - blocks_xyw[:, 2]/2
mask_keep_margin = np.logical_or(
left_side_blocks > right_side_reference + margin,
right_side_blocks < left_side_reference - margin)
mask_keep = np.logical_or(mask_keep_margin, mask_keep_rows)
return blocks_xyw[mask_keep]
def _sample_block_from_tessellation(
self, tessellation_blocks_xyw, balls, targets):
"""Samples a block from the tessellation according to different criteria."""
probabilities = []
# Start by assigning uniform probability to each block.
weight_uniform = 1.
probabilities_uniform = np.ones([tessellation_blocks_xyw.shape[0]])
probabilities.append((weight_uniform, probabilities_uniform))
# Probabilities near ball and target
weight_near = 1.
temperature_length = self.scene_width / 10
for ball in balls:
distances = np.abs(tessellation_blocks_xyw[:, 0] - ball.x)
probabilities_near_ball = 1 / ((distances/temperature_length)**2 + 0.1)
probabilities.append((weight_near, probabilities_near_ball))
for target in targets:
distances = np.abs(tessellation_blocks_xyw[:, 0] - target.x)
probabilities_near_target = 1 / ((distances/temperature_length)**2 + 0.1)
probabilities.append((weight_near, probabilities_near_target))
# Higher probabilities for objects laying exactly on top of the floor,
# in the region between the ball and targets.
weight_floor = 4.
blocks_on_floor = tessellation_blocks_xyw[:, 1] == 0
balls_and_targets_x = [ball_or_target.x for ball_or_target in balls+targets]
min_x, max_x = min(balls_and_targets_x), max(balls_and_targets_x)
blocks_between = ((tessellation_blocks_xyw[:, 0] > min_x) *
(tessellation_blocks_xyw[:, 0] < max_x))
blocks_floor_between = blocks_on_floor * blocks_between
if np.any(blocks_floor_between):
probabilities_on_floor = np.where(blocks_floor_between, 1., 0.)
probabilities.append((weight_floor, probabilities_on_floor))
# Probabilities near the middle point between ball and target:
weight_middle = 1.
temperature_length = self.scene_width / 5
for ball, target in itertools.product(balls, targets):
middle_x = (ball.x + target.x) / 2
distances = np.abs(tessellation_blocks_xyw[:, 0] - middle_x)
probabilities_near_middle = 1 / ((distances/temperature_length)**2 + 0.1)
probabilities.append((weight_middle, probabilities_near_middle))
weights, probabilities = list(zip(*probabilities))
# Stack multipliers and probabilities.
weights = np.stack(weights, axis=0)
probabilities = np.stack(probabilities, axis=0)
# Normalize each row of probabilities to 1 and weight to each category.
probabilities /= np.sum(probabilities, axis=1, keepdims=True)
probabilities = weights[:, np.newaxis] * probabilities
# Sum the probabilities and normalize again.
probabilities_merged = probabilities.sum(axis=0)
probabilities_merged /= np.sum(probabilities_merged)
block_indices = np.arange(tessellation_blocks_xyw.shape[0])
block_ind = self.random_state.choice(block_indices, p=probabilities_merged)
return tessellation_blocks_xyw[block_ind]
def _try_to_generate_one(self):
"""Generates a single marble_run scene.
Returns:
observation: a block_utils.BlocksObservation object
solution: a list of Block objects in their final locations. This generator
in particular always returns an empty list here. Unused
"""
# Generate ball and target.
# Make sure that there is enough horizontal separation.
min_horiz_dist = self._rel_horizontal_distance_range[0]*self.scene_width
max_horiz_dist = self._rel_horizontal_distance_range[1]*self.scene_width
# Start by assuming that the ball will go left to right.
# Sample the position of the ball to make sure that is it at least
# min_horiz_dist away from the right.
ball_x = self.random_state.uniform(0, self.scene_width-min_horiz_dist)
min_target_x = ball_x + min_horiz_dist
# The minimum target separation is the size of the target and the ball,
# so the ball cannot drop directly on the target.
min_target_x = max(self._targets_height + self.height, min_target_x)
max_target_x = ball_x + max_horiz_dist
max_target_x = min(self.scene_width, max_target_x)
target_x = self.random_state.uniform(min_target_x, max_target_x)
assert target_x - ball_x < max_horiz_dist
assert target_x - ball_x > min_horiz_dist
# We flip a coin to exchage the positions of target and ball
if self.random_state.randint(2):
target_x, ball_x = ball_x, target_x
# Make sure that the ball is at least two levels above the target.
target_level = np.random.choice(self._target_levels)
ball_level = np.random.choice(self._ball_levels)
ball_level = max(ball_level,
target_level + self._min_ball_target_level_diff)
targets = [block_utils.Block(
x=target_x, y=target_level, shape=unity_constants.BALL_SHAPE,
width=self._targets_height, height=self._targets_height)]
balls = [block_utils.Block(
x=ball_x, y=ball_level, shape=unity_constants.BALL_SHAPE,
width=self.height, height=self.height)]
# Generating obstacles.
obstacles = []
# Tesselate space and remove blocks too close to balls and targets.
tessellation_blocks_xyw = self._build_tessellation(
num_levels=ball_level+1)
for ball in balls:
tessellation_blocks_xyw = self._remove_nearby_blocks(
ball, tessellation_blocks_xyw)
for target in targets:
tessellation_blocks_xyw = self._remove_nearby_blocks(
target, tessellation_blocks_xyw)
# Add obstacles sequentially.
num_obstacles = np.random.randint(*self._num_obstacles_range)
for _ in range(num_obstacles):
if tessellation_blocks_xyw.shape[0] == 0:
raise stacking.GenerationError(
"Not possible to generate all obstacles.")
block = self._sample_block_from_tessellation(
tessellation_blocks_xyw, balls, targets)
obstacles.append(
block_utils.Block(
x=block[0], y=block[1],
width=block[2], height=self.obstacles_height))
tessellation_blocks_xyw = self._remove_nearby_blocks(
obstacles[-1], tessellation_blocks_xyw)
floor = self._place_floor()
observation_blocks = self._place_available_objects()
fixed_blocks = [floor] + observation_blocks
# Convert discrete vertical positions into continuous values.
obstacles = self._scale_vertical_positions(obstacles)
targets = self._scale_vertical_positions(targets)
balls = self._scale_vertical_positions(balls)
walls = self._place_walls()
observation = block_utils.BlocksObservation(
balls=balls,
blocks=fixed_blocks,
obstacles=walls + obstacles,
targets=targets)
return observation
def generate_one(self):
"""Generates a single marble_run scene.
Returns:
observation: a block_utils.BlocksObservation object
solution: a list of Block objects in their final locations. This generator
in particular always returns an empty list here. Unused
"""
max_attempts_generate = 100
for _ in range(max_attempts_generate):
try:
return self._try_to_generate_one()
except stacking.GenerationError:
continue
raise stacking.GenerationError("Max number of generation attempts reached.")
| dm_construction-master | dm_construction/environments/marble_run.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The base environment for Construction tasks.
See: Bapst, V., Sanchez-Gonzalez, A., Doersch, C., Stachenfeld, K., Kohli, P.,
Battaglia, P., & Hamrick, J. (2019, May). Structured agents for physical
construction. In International Conference on Machine Learning (pp. 464-474).
See: Hamrick, J. B., Bapst, V., Sanchez-Gonzalez, A., Pfaff, T., Weber, T.,
Buesing, L., & Battaglia, P. W. (2020). Combining Q-Learning and Search with
Amortized Value Estimates. ICLR 2020.
"""
import abc
from absl import logging
from dm_construction.unity import constants as unity_constants
from dm_construction.utils import block as block_utils
from dm_construction.utils import constants
from dm_construction.utils import serialization
import dm_env
from dm_env import specs
import numpy as np
from scipy import stats
_OBJECT_TYPE_NAMES = [
constants.BLOCK,
constants.OBSTACLE,
constants.TARGET,
constants.AVAILABLE_BLOCK
]
def _find_value_in_array(value, array):
index = np.where(value == array)[0]
if index.shape[0] == 0:
return None
if index.shape[0] > 1:
raise ValueError("Found more than one {} in {}".format(value, array))
return index[0]
def _build_segmentation_mask_for_id(segmentation_array, target_id):
"""Builds a binary mask for target_id."""
return np.any(segmentation_array == target_id, axis=2)
def build_segmentation_masks_for_ids(
segmentation_array, target_ids):
if target_ids:
return np.stack([_build_segmentation_mask_for_id(segmentation_array, id_)
for id_ in target_ids])
else:
return np.zeros((0,) + segmentation_array.shape[:2], dtype=np.bool)
return
def _obstacle_has_been_hit(obstacles):
return np.sum(
obstacles[:, unity_constants.COLLISION_COUNT_FEATURE_INDEX]) > 0.
def _calculate_contact_pairs_and_features(placed_blocks_ids, contacts):
"""Returns pairs of blocks in contact and the corresponding features."""
placed_blocks_ids = np.round(placed_blocks_ids).astype(np.int32)
senders = np.round(contacts[:, 0]).astype(np.int32)
receivers = np.round(contacts[:, 1]).astype(np.int32)
# We are only going to provide the feature that tells if the there is glue,
# but not the features indicating the position of the glue/contact.
features = contacts[:, 2:3]
contact_pairs = []
contact_features = []
for sender, receiver, feature in zip(senders, receivers, features):
sender_ind = _find_value_in_array(sender, placed_blocks_ids)
receiver_ind = _find_value_in_array(receiver, placed_blocks_ids)
if sender_ind is None or receiver_ind is None:
continue
contact_pairs.append(np.array([sender_ind, receiver_ind], np.int32))
contact_features.append(feature)
if not contact_pairs:
contact_pairs = np.zeros([0, 2], dtype=np.int32)
contact_features = np.zeros_like(features[:0])
else:
contact_pairs = np.stack(contact_pairs, axis=0)
contact_features = np.stack(contact_features, axis=0)
return contact_pairs, contact_features
class ConstructionStacking(dm_env.Environment):
"""A base class for the construction tasks."""
def __init__(self,
unity_environment,
block_replacement,
sticky_penalty,
max_difficulty,
progress_threshold,
bad_choice_penalty=0.0,
spawn_collision_penalty=0.0,
hit_obstacle_penalty=0.0,
difficulty=None,
curriculum_sample=True,
curriculum_sample_geom_p=0.,
bad_choice_termination=True,
num_simulation_steps=1000,
target_color=(0., 0., 1., 0.5),
block_color=(0., 0., 1., 1.),
sticky_block_color=(0., 0.8, 1., 1.),
obstacle_color=(1., 0., 0., 1.),
ball_color=(0., 1., 0., 1.),
generator_width=200,
random_state=None):
"""Inits the environment.
Args:
unity_environment: To be used to run the environment. Should be created
with unity/environment.py.
block_replacement: if True, the same block can be used multiple times.
sticky_penalty: value to be subtracted from the score for each sticky
block used.
max_difficulty: the maximum curriculum difficulty level.
progress_threshold: the fraction of maximum reward that needs to be
obtained for the task to be considered "solved".
bad_choice_penalty: value to be subtracted from the score each time the
agent does not select an object correctly.
spawn_collision_penalty: Reward to be passed to the agent when it
terminates the episode early by placing an object overlapping with
another object.
hit_obstacle_penalty: Reward to be passed to the agent when it
terminates the episode early by hitting an obstacle.
difficulty: Difficulty of the environment. If None, it will be required
to be passed in the reset method instead. It will usually be an integer
between 0 and `max_difficulty`. Some base classes may accept a string
as documented in their docstring to indicate a generalization level.
curriculum_sample: If `True`, then when doing the curriculum, difficulties
up to the current difficulty are sampled. If None, it will
be required to be passed in the reset method instead. It cannot be set
to true when the difficulty is passed as a string.
curriculum_sample_geom_p: Parameter of the geometric distribution used
to sample difficulty levels when curriculum_sample = True. A value of
0.6, indicates that approximately 0.6 of the episodes run at the
`current` difficulty, 0.6 of the remaining episodes run at `current-1`,
0.6 of the remaining at `current-2`, etc. Since the probabilities
are normalized, a small value can be used here for uniform distribution.
A value of 1, is equivalent to curriculum_sample=False, and a value of 0
is equivalent to uniform sampling.
bad_choice_termination: If True, episodes terminate when an agent tries to
select an available object that is no longer available.
num_simulation_steps: number of simulation steps to run every time an
object is placed.
target_color: Color of the targets.
block_color: Color of the blocks.
sticky_block_color: Color of the sticky blocks.
obstacle_color: Color of the obstacles.
ball_color: Color of the balls.
generator_width: Width discretization unit for generator.
random_state: a np.random.RandomState object.
"""
self._unity_environment = unity_environment
self._random_state = random_state or np.random
# This number is the width discretization unit.
self._generator_width = generator_width
# Maximum displacement from the center of the image to display available
# objects. Units are the same as object horizontal positions. Camera view
# covers roughtly between -7 and +7.
self._display_limit = 7.
generator_scale = self._display_limit * 2. / self._generator_width
self._generator_scale = np.array((generator_scale, generator_scale))
self._generator_offset = np.array((
-self._generator_width*self._generator_scale[0]/2, 0.))
# Force boolean parameters to not be passed as None.
assert block_replacement is not None
self._block_replacement = block_replacement
self._sticky_penalty = sticky_penalty
self._bad_choice_penalty = bad_choice_penalty
self._spawn_collision_penalty = spawn_collision_penalty
self._hit_obstacle_penalty = hit_obstacle_penalty
self._bad_choice_termination = bad_choice_termination
self._progress_threshold = progress_threshold
self._target_color = target_color
self._block_color = block_color
self._sticky_block_color = sticky_block_color
self._obstacle_color = obstacle_color
self._ball_color = ball_color
assert sticky_penalty > -1e-6
assert bad_choice_penalty > -1e-6
assert sticky_penalty > -1e-6
assert hit_obstacle_penalty > -1e-6
self._num_simulation_steps = num_simulation_steps
self._init_curriculum_sample = curriculum_sample
self._init_difficulty = difficulty
if curriculum_sample_geom_p < 0.:
raise ValueError("`curriculum_sample_geom_p (%g) should be >= 0.`"
% curriculum_sample_geom_p)
self._curriculum_sample_geom_p = curriculum_sample_geom_p
self._max_difficulty = max_difficulty
self._termination_reason = None
self._state_ignore_fields = [
"_unity_environment", "_random_state", "_generator"]
# Contains the overall level of difficulty.
self._overall_difficulty = None
# Contains the overall level of difficulty of the current episode instance.
# Equal to `self._overall_difficulty` when curriculum sample is False.
self._episode_difficulty = None
# For the frame observer.
self._frames_list = None
self._frame_observer = None
self._initialize()
def close(self):
self._unity_environment.close()
@property
def max_difficulty(self):
return self._max_difficulty
def get_state(self, ignore_unity_state=False):
state = serialization.get_object_state(self, self._state_ignore_fields)
if not ignore_unity_state:
state["_unity_environment"] = self._unity_environment.last_observation
state["_generator"] = self._generator.get_state()
return state
def get_reset_state(self):
"""Reset state to pass to reset method to restart an identical episode."""
return self._reset_state
def set_state(self, state, ignore_unity_state=False):
serialization.set_object_state(self, state, self._state_ignore_fields)
# In scenes with many constraints (glue) it is not always possible to
# the state fully accurately, leading to different velocities. This should
# not be much of a problem, since the state that is restored should
# only have objects with velocities close to 0 (settled blocks, without
# the ball).
if not ignore_unity_state:
self._unity_environment.restore_state(
state["_unity_environment"], verify_velocities=False)
self._generator.set_state(state["_generator"])
def _split_available_obstacles_placed(self, blocks):
"""Splits observations for available blocks, obstacles and placed blocks."""
num_remaining_display_blocks = len(self._remaining_indices)
num_obstacles = self._num_obstacles
num_targets = self._num_targets
# Because of the order in which the objects were added, we know the
# obstacles come first, available objects next, and all remaining
# objects are blocks placed by the agent.
object_offset = 0
obstacles = blocks[:num_obstacles]
object_offset += num_obstacles
targets = blocks[object_offset:object_offset+num_targets]
object_offset += num_targets
available = blocks[object_offset:object_offset+num_remaining_display_blocks]
object_offset += num_remaining_display_blocks
placed = blocks[object_offset:]
return available, targets, obstacles, placed
def _maybe_add_segmentation_masks(self, observation):
if "Segmentation" not in list(observation.keys()):
return
segmentation = observation["Segmentation"]
for name in _OBJECT_TYPE_NAMES:
obs_name = "SegmentationMasks" + name
ids = list(np.round(observation[name][:, 0]))
observation[obs_name] = build_segmentation_masks_for_ids(
segmentation, ids)
del observation["Segmentation"]
def _set_observation_and_termination(
self, time_step, default_step_type=dm_env.StepType.MID):
new_observation = time_step.observation.copy()
# We split the different types of blocks.
(available, targets, obstacles,
placed) = self._split_available_obstacles_placed(
time_step.observation["Blocks"])
new_observation[constants.AVAILABLE_BLOCK] = available
new_observation[constants.BLOCK] = placed
new_observation[constants.OBSTACLE] = obstacles
new_observation[constants.TARGET] = targets
contact_pairs, contact_features = _calculate_contact_pairs_and_features(
placed[:, unity_constants.ID_FEATURE_INDEX],
new_observation["Contacts"])
del new_observation["Contacts"]
new_observation["ContactPairs"] = contact_pairs
new_observation["ContactFeatures"] = contact_features
self._maybe_add_segmentation_masks(new_observation)
# Evaluate termination conditions.
# If we have placed as many objects as there are in display, or have reached
# the maximum number of steps
if not available.shape[0] or self._num_steps >= self._max_steps:
self._end_episode(constants.TERMINATION_MAX_STEPS)
# If there was a Spawn collision. A Spawn collision means the agent placed
# an object overlapping with another object. We also override the reward.
penalty_reward = 0.
block_reward = 0.
if (time_step.observation["SpawnCollisionCount"] >
self._initial_spawn_collision_count):
self._end_episode(constants.TERMINATION_SPAWN_COLLISION)
penalty_reward = -self._spawn_collision_penalty
# If we hit an obstacle, we also end the episode and override the reward.
elif _obstacle_has_been_hit(obstacles):
self._end_episode(constants.TERMINATION_OBSTACLE_HIT)
penalty_reward = -self._hit_obstacle_penalty
else:
# We remove the floor before evaluating the score.
blocks = new_observation[constants.BLOCK][1:]
self._num_sticky_blocks = np.sum(
blocks[:, unity_constants.STICKY_FEATURE_INDEX])
self._progress = self._get_task_reward(
new_observation[constants.OBSTACLE],
new_observation[constants.TARGET],
blocks)
total_cost = self._get_cost(blocks)
total_score = self._progress
cost = total_cost - self._previous_cost
self._previous_cost = total_cost
block_reward = total_score - self._previous_score
self._previous_score = total_score
block_reward -= cost
if self._enough_progress(self._progress):
self._end_episode(constants.TERMINATION_COMPLETE)
if self._is_end_of_episode:
step_type = dm_env.StepType.LAST
discount = time_step.discount * 0.
else:
step_type = default_step_type
discount = time_step.discount
reward = penalty_reward + block_reward
self._last_time_step = time_step._replace(
observation=new_observation,
step_type=step_type,
discount=discount,
reward=reward)
return self._last_time_step
def _get_cost(self, blocks):
# The number of bad choices can be inferred from the total number of blocks.
num_bad_choices = self._num_steps - len(blocks)
total_cost = self._bad_choice_penalty * num_bad_choices
total_cost += self._sticky_penalty * self._num_sticky_blocks
return total_cost
def observation_spec(self, *args, **kwargs):
new_spec = self._unity_environment.observation_spec().copy()
# The block observation is exactly as we get it
block_obs_shape = [0, new_spec[constants.BLOCK].shape[1]]
block_obs_dtype = new_spec[constants.BLOCK].dtype
# We know the observation is the same for all block types.
for name in _OBJECT_TYPE_NAMES:
new_spec[name] = specs.Array(
block_obs_shape, dtype=block_obs_dtype, name=name)
if "Segmentation" in list(new_spec.keys()):
segmentation_resolution = new_spec["Segmentation"].shape[:2]
segmentation_obs_shape = (0,) + segmentation_resolution
for name in _OBJECT_TYPE_NAMES:
obs_name = "SegmentationMasks" + name
new_spec[obs_name] = specs.Array(
segmentation_obs_shape, dtype=np.bool, name=obs_name)
del new_spec["Segmentation"]
new_spec.update({"ContactPairs": specs.Array(
[0, 2], dtype=np.int32, name="ContactPairs")})
new_spec.update({"ContactFeatures": specs.Array(
[0, 1], dtype=new_spec["Contacts"].dtype, name="ContactFeatures")})
del new_spec["Contacts"]
return new_spec
def action_spec(self, *args, **kwargs):
action_spec = {}
# The action spec of the unity_environment is documented in
# unity/environment.py.
unity_action_spec = self._unity_environment.action_spec()
action_spec["Horizontal"] = unity_action_spec["SetPosX"]
action_spec["Vertical"] = unity_action_spec["SetPosY"]
action_spec["Sticky"] = specs.DiscreteArray(num_values=2)
action_spec["Selector"] = specs.BoundedArray(
[], dtype=np.float32,
minimum=-self._display_limit,
maximum=self._display_limit)
return action_spec
def step(self, actions):
if self._is_end_of_episode:
raise ValueError("Calling step on a closed episode")
self._num_steps += 1
slot_index = self._selector_value_to_slot_index(actions["Selector"])
horizontal = actions["Horizontal"]
vertical = actions["Vertical"]
# Dictionary for the actions that are going to be applied to the core env.
actions_apply = {}
# To move the cursor to the object picked by the agent and the location
# picked by the agent.
display_coordinates = self._display_coordinates[slot_index]
actions_apply.update({"SelectPosX": display_coordinates[0],
"SelectPosY": display_coordinates[1],
"SetPosX": horizontal,
"SetPosY": vertical})
# If the selected block is not available, nothing else happens.
if slot_index not in self._remaining_indices:
time_step = self._unity_environment.step(actions_apply)
if self._bad_choice_termination:
self._end_episode(constants.TERMINATION_BAD_CHOICE)
return self._set_observation_and_termination(time_step)
# If there is no replacement, remove the objects from remaining objects
# and append the delete action.
if not self._block_replacement:
self._remaining_indices.remove(slot_index)
display_object_id = self._display_ids[slot_index]
actions_apply.update({
"Delete": 1.,
"SelectId": display_object_id,
"SetId": display_object_id
})
else:
actions_apply["SetId"] = self._next_object_id
self._next_object_id += 1
# Setting the actions necessary to add the new block.
new_block = self._initial_available_objects[slot_index]
size_x = new_block.width
size_y = new_block.height
if actions["Sticky"]:
actions_apply["Sticky"] = 1.
actions_apply.update({"RGBA": self._sticky_block_color})
else:
actions_apply.update({"RGBA": self._block_color})
actions_apply.update(
{"Width": size_x,
"Height": size_y,
"Shape": new_block.shape,
"SimulationSteps": float(self._num_simulation_steps),
"FreeBody": 1.,
"SpawnBlock": 1.})
try:
time_step = self._unity_environment.step(actions_apply)
except unity_constants.MetaEnvironmentError as e:
logging.info(e)
self._end_episode(constants.TERMINATION_BAD_SIMULATION)
self._last_time_step = self._last_time_step._replace(
discount=self._last_time_step.discount * 0.,
reward=self._last_time_step.reward * 0.,
step_type=dm_env.StepType.LAST)
return self._last_time_step
else:
out = self._set_observation_and_termination(time_step)
return out
def _initialize(self):
# Initializes the env by forcing a reset. This is important to be
# able to get and set states, so all attributes are instantiated
# and a generator is put in place.
self.reset(
difficulty=None if self._init_difficulty is not None else 0,
curriculum_sample=(
None if self._init_curriculum_sample is not None else False)
)
def reset(self, reset_state=None, difficulty=None, curriculum_sample=None):
"""Resets the generator.
Args:
reset_state: A full state that guarantees that an environment will be
reset to the same initial conditions as a past episode.
difficulty: Difficulty of the environment.
curriculum_sample: If `True`, then when doing the curriculum, difficulties
up to the current difficulty are sampled.
Returns:
time_step: The initial time_step.
"""
while True:
found, time_step = self._try_to_reset(
reset_state=reset_state,
difficulty=difficulty,
curriculum_sample=curriculum_sample)
if reset_state is not None:
# We should always be able to reset from a reset state
# in a single attempt.
assert found
if found:
return time_step
def _clip_slot_index(self, slot_index):
if slot_index < 0:
slot_index = 0
elif slot_index >= len(self._initial_available_objects):
slot_index = len(self._initial_available_objects) - 1
return slot_index
def _selector_value_to_slot_index(self, selector_value):
slot_index = int(np.digitize(selector_value, self._display_edges)-1)
return self._clip_slot_index(slot_index)
def _end_episode(self, reason):
if reason not in self.all_termination_reasons:
raise ValueError("invalid termination reason: {}".format(reason))
self._termination_reason = reason
self._is_end_of_episode = True
@property
def termination_reason(self):
return self._termination_reason
@property
def all_termination_reasons(self):
return [
constants.TERMINATION_MAX_STEPS,
constants.TERMINATION_SPAWN_COLLISION,
constants.TERMINATION_OBSTACLE_HIT,
constants.TERMINATION_COMPLETE,
constants.TERMINATION_BAD_SIMULATION,
constants.TERMINATION_BAD_CHOICE,
]
@property
def core_env(self):
return self
@property
def difficulty(self):
"""Returns the overall current difficulty passed to init or reset method.
If `curriculum_sample` is True, the difficulty of the current episode will
be sampled from 0 up to this value, and can be obtained via
`episode_difficulty`.
"""
return self._overall_difficulty
@property
def episode_difficulty(self):
"""Returns the actual difficulty of the present episode.
If `curriculum_sample` is False, this will always be equal to `difficulty`.
Otherwise, it will be `0 <= episode_difficulty <= difficulty`.
"""
return self._episode_difficulty
@property
def episode_logs(self):
"""A dictionnary of logs for a completed episode."""
normalized_glue_points = 0.
if self._num_steps > 0:
normalized_glue_points = self._num_sticky_blocks/float(self._num_steps)
return dict(
score=self._previous_score,
num_steps=self._num_steps,
glue_points=self._num_sticky_blocks,
normalized_score=self._previous_score/self._max_episode_reward,
normalized_glue_points=normalized_glue_points)
@property
def last_time_step(self):
return self._last_time_step
# Abstract methods below.
def _enough_progress(self, progress):
"""Whether enough reward has been obtained."""
return progress > self._max_episode_reward * self._progress_threshold
@abc.abstractmethod
def _get_generator(self, difficulty):
"""Will return a generator for the required difficulty."""
@abc.abstractmethod
def _get_task_reward(self, obstacles, targets, blocks):
"""Returns the score for this set of obstacles, targets and blocks."""
@abc.abstractmethod
def _maybe_update_max_steps(self):
"""Update max_num_steps based on the current instance properties."""
def _get_sampled_episode_difficulty(
self, difficulty, curriculum_sample):
"""Returns a value of the difficulty to be used for the next episode."""
if not curriculum_sample:
# If we don't do curriculum sample, we just return the passed difficulty.
return difficulty
# Will be sampling from a difficulty value from 0 up to difficulty.
candidate_difficulties = list(range(difficulty + 1))
num_candidate_difficulties = len(candidate_difficulties)
# And define the probabilities that we will sampling from each level.
if self._curriculum_sample_geom_p > 0.:
distribution = stats.distributions.geom(
p=self._curriculum_sample_geom_p)
# Geometrical distribution pmf starts at 1.
probs = distribution.pmf(np.arange(1, num_candidate_difficulties+1))
# Geometrical distributions goes from high to low, but we want the
# opposite (higher probability for the highest level).
probs = probs[::-1]
else:
# A value of 0. corresponds to uniform distribution among all
# candidate difficulties.
probs = np.ones([num_candidate_difficulties], dtype=np.float32)
# Normalize probabilities.
candidate_difficulties_probs = probs / probs.sum()
# Sample a difficulty according to their probabilities.
sampled_difficulty = int(np.random.choice(
candidate_difficulties, p=candidate_difficulties_probs))
return sampled_difficulty
def _get_new_starting_configuration(
self, difficulty, curriculum_sample):
sampled_difficulty = self._get_sampled_episode_difficulty(
difficulty, curriculum_sample)
self._generator = self._get_generator(sampled_difficulty)
self._episode_difficulty = sampled_difficulty
blocks_observation = self._generator.generate_one()
# Rescale the blocks observation.
blocks_observation = block_utils.transform_blocks_observation(
blocks_observation, self._generator_scale, self._generator_offset)
return blocks_observation
def _get_difficulty_and_curriculum_sample(
self, reset_difficulty, reset_curriculum_sample):
if not ((reset_difficulty is None) ^
(self._init_difficulty is None)):
raise ValueError(
"A difficulty value must be passed to the constructor (%s) or "
"to the reset method (%s) and never to both." % (
self._init_difficulty, reset_difficulty))
if not ((reset_curriculum_sample is None) ^
(self._init_curriculum_sample is None)):
raise ValueError(
"A curriculum_sample value must be passed to the constructor (%s) or "
"to the reset method (%s) and never to both." % (
self._init_curriculum_sample, reset_curriculum_sample))
if reset_difficulty is not None:
difficulty = reset_difficulty
else:
difficulty = self._init_difficulty
if reset_curriculum_sample is not None:
curriculum_sample = reset_curriculum_sample
else:
curriculum_sample = self._init_curriculum_sample
if isinstance(difficulty, int):
if difficulty > self._max_difficulty or difficulty < 0:
raise ValueError("Trying to set a value of the difficulty (%d) larger "
"than the maximum difficulty (%d) or smaller than 0" %(
difficulty, self._max_difficulty))
elif isinstance(difficulty, str):
if curriculum_sample:
raise ValueError(
"`difficulty` can only be a passed as a string when using "
"`curriculum_sample==False`, got `difficulty==%s`" % difficulty)
else:
raise ValueError(
"Difficulty must be `int` or `str`, got (%s) with type (%s)" %
(str(difficulty), type(difficulty)))
return difficulty, curriculum_sample
def _try_to_reset(self, reset_state, difficulty, curriculum_sample):
"""Tries to generate a new episode.
Args:
reset_state: A full state that guarantees that an environment will be
reset to the same initial conditions as a past episode.
difficulty: Difficulty of the environment.
curriculum_sample: If `True`, then when doing the curriculum, difficulties
up to the current difficulty are sampled.
Returns:
1. A boolean indicating whether the scene generation was successful.
2. A time_step corresponding to the beginning of an episode, if the
generation was successful, or None.
"""
if reset_state is None:
(difficulty,
curriculum_sample) = self._get_difficulty_and_curriculum_sample(
difficulty, curriculum_sample)
self._overall_difficulty = difficulty
self._initial_scene = self._get_new_starting_configuration(
difficulty, curriculum_sample)
self._initial_available_objects = self._initial_scene.blocks[1:]
self._maybe_update_max_steps()
# It is assumed that from here on, everything is deterministic, so it is
# a safe point to obtain the reset_state.
self._reset_state = None # So we don't get this as part of the state.
self._reset_state = self.get_state(ignore_unity_state=True)
else:
if difficulty is not None:
raise ValueError(
"`difficulty` should be None when `reset_state` is passed.")
if curriculum_sample is not None:
raise ValueError(
"`curriculum_sample` should be None when `reset_state` is passed.")
self.set_state(reset_state, ignore_unity_state=True)
# This is the only thing that would not have been restored.
self._reset_state = reset_state
return self._deterministic_reset()
def _deterministic_reset(self):
"""Set-up work for the episode that is fully deterministic on the state."""
# Start setting up the scene in Unity.
setup_actions = []
self._unity_environment.reset()
# Indices corresponding to the _initial_available_objects still available.
# (All of them are available at the beginning of the episode).
self._remaining_indices = {
i for i in range(len(self._initial_available_objects))}
# Place the obstacles.
self._num_obstacles = len(self._initial_scene.obstacles)
self._num_targets = len(self._initial_scene.targets)
self._max_episode_reward = self._compute_max_episode_reward(
self._initial_scene)
self._progress = None
object_index = len(self._initial_available_objects) + 1
obstacle_color = self._obstacle_color
for obstacle in self._initial_scene.obstacles:
setup_actions.append(
{"SetId": object_index,
"SetPosX": obstacle.x, "SetPosY": obstacle.y,
"Width": obstacle.width,
"Height": obstacle.height,
"SetAngle": obstacle.angle,
"Shape": obstacle.shape,
"SpawnBlock": 1.,
"RGBA": obstacle_color})
object_index += 1
target_color = self._target_color
for target in self._initial_scene.targets:
setup_actions.append(
{"SetId": object_index,
"SetPosX": target.x, "SetPosY": target.y,
"Width": target.width,
"Height": target.height,
# By default, collision masks are 0b0001, so by using 0b0010 target
# will not collide with any block, unless their mask matches 0b??1?.
"CollisionMask": 0b10,
"SetAngle": target.angle,
"Shape": target.shape,
"SpawnBlock": 1.,
"RGBA": target_color})
object_index += 1
# Add the balls only for display purposes.
self._ball_ids = []
for ball in self._initial_scene.balls:
self._ball_ids.append(object_index)
setup_actions.append({
"SpawnBlock": 1.,
"PhysicalBody": 0.,
"Shape": ball.shape,
"SetId": object_index,
"SetPosX": ball.x,
"SetPosY": ball.y,
"Width": ball.width,
"Height": ball.height,
"RGBA": np.array(list(self._ball_color[:3]) + [0.5]),
})
object_index += 1
self._display_ids = []
self._display_coordinates = []
blocks_starts = []
blocks_ends = []
for display_index, block in enumerate(self._initial_available_objects):
# We give explicit positive ids to the display objects,
# so we can remove them later using their ids.
display_id = (display_index+1)
y_display = -1.
x_display = block.x
setup_actions.append(
{"SetId": display_id,
"SetPosX": block.x, "SetPosY": block.y,
"Width": block.width,
"Height": block.height,
"SetAngle": block.angle,
"Shape": block.shape,
"SpawnBlock": 1., "RGBA": self._block_color})
self._display_ids.append(display_id)
self._display_coordinates.append((x_display, y_display))
blocks_starts.append(block.x-np.abs(block.width)/2.)
blocks_ends.append(block.x+np.abs(block.width)/2.)
# Compute the edge between two blocks as the center between the end of the
# previous block and the start of the next block
edges = [(x + y) / 2. for x, y in zip(blocks_ends[:-1], blocks_starts[1:])]
self._display_edges = [-self._display_limit] + edges + [self._display_limit]
# Place the floor.
floor = self._initial_scene.blocks[0]
setup_actions.append(
{"SetId": object_index,
"SetPosX": floor.x, "SetPosY": floor.y,
"SetAngle": floor.angle,
"Shape": floor.shape,
"Width": floor.width, "Height": floor.height,
"SpawnBlock": 1., "R": 0., "G": 0., "B": 0., "A": 1.})
self._next_object_id = object_index + 1
self._previous_cost = 0
self._previous_score = 0.
self._num_steps = 0
self._is_end_of_episode = False
time_step = self._unity_environment.step(setup_actions)
self._initial_spawn_collision_count = time_step.observation[
"SpawnCollisionCount"]
first_time_step = self._set_observation_and_termination(
time_step, default_step_type=dm_env.StepType.FIRST)
time_step = first_time_step
self._termination_reason = None
return True, time_step._replace(step_type=first_time_step.step_type)
def enable_frame_observer(self):
"""Enables a frame observer on the Unity environment.
This observer will gather frames from the Unity observer camera, which
typically produces higher-res images than agent observations.
"""
if self._frame_observer is not None:
raise ValueError("the frame observer is already enabled")
obs_spec = self._unity_environment.observation_spec()
if "ObserverRGB" not in obs_spec:
raise ValueError(
"the observer camera in the Unity environment is not enabled")
self._frames_list = []
self._frame_observer = (
lambda obs: self._frames_list.append(obs["ObserverRGB"]))
self._unity_environment.add_observer(self._frame_observer)
def disable_frame_observer(self):
"""Disables the frame observer on the Unity environment.
This observer will gather frames from the Unity observer camera, which
typically produces higher-res images than agent observations.
"""
if self._frame_observer is None:
return
self._unity_environment.remove_observer(self._frame_observer)
self._frames_list = None
self._frame_observer = None
def pop_observer_frames(self):
"""Queries frames from the frame observer, and empties the frame list.
Returns:
observations: list of RGB frames
"""
if self._frame_observer is None:
raise ValueError("the frame observer is not enabled")
observations = self._frames_list.copy()
self._frames_list[:] = []
return observations
class GenerationError(Exception):
pass
class StackingGenerator(metaclass=abc.ABCMeta):
"""Abstract base class for construction generators."""
def __init__(self,
num_blocks_range,
scene_width,
random_state,
height=10,
margin=5,
num_small=3,
num_medium=3,
num_large=1):
"""Initialize the generator.
Args:
num_blocks_range: a tuple indicating the range of obstacles
that will be in the generated towers, from low (inclusive) to high
(exclusive).
scene_width: the width of the scene.
random_state: a np.random.RandomState object
height: the height of a block
margin: the space between blocks
num_small: the number of small available blocks
num_medium: the number of medium available blocks
num_large: the number of large available blocks
"""
self.num_blocks_range = num_blocks_range
self.scene_width = scene_width
self.random_state = random_state
self._state_ignore_fields = ["random_state"]
self.scene_height = self.scene_width
# Width of small, medium, and large blocks.
self.small_width = constants.SMALL_WIDTH
self.medium_width = constants.MEDIUM_WIDTH
self.large_width = constants.LARGE_WIDTH
self.height = height
self.margin = margin
self._num_small = num_small
self._num_medium = num_medium
self._num_large = num_large
def get_state(self):
return serialization.get_object_state(self, self._state_ignore_fields)
def set_state(self, state):
serialization.set_object_state(self, state, self._state_ignore_fields)
def _place_available_objects(self):
"""Create the set of objects that can be picked up."""
# compute the margins between available blocks
available_width = self.scene_width
available_width -= self._num_small * self.small_width
available_width -= self._num_medium * self.medium_width
available_width -= self._num_large * self.large_width
num_available = self._num_small + self._num_medium + self._num_large
if num_available > 1:
margin = available_width / (num_available - 1)
else:
margin = available_width
assert margin >= 1
margin = np.floor(margin)
current_position = dict(x=0, y=-2 * (self.margin + self.height))
def add_block(width):
block = block_utils.Block(
x=current_position["x"] + width / 2,
y=current_position["y"],
width=width,
height=self.height)
current_position["x"] += width + margin
return block
observation_blocks = [
add_block(self.small_width) for _ in range(self._num_small)]
observation_blocks += [
add_block(self.medium_width) for _ in range(self._num_medium)]
observation_blocks += [
add_block(self.large_width) for _ in range(self._num_large)]
assert current_position["x"] - margin <= self.scene_width
return observation_blocks
def _place_floor(self):
floor_height = self.height / 2
floor = block_utils.Block(
x=self.scene_width / 2., y=-floor_height / 2.,
height=floor_height, width=self.scene_width * 2)
return floor
@abc.abstractmethod
def generate_one(self):
"""Generate a single scene.
Returns:
A BlocksObservation object
"""
pass
| dm_construction-master | dm_construction/environments/stacking.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A construction environment where the task is to cover obstacles.
See: Bapst, V., Sanchez-Gonzalez, A., Doersch, C., Stachenfeld, K., Kohli, P.,
Battaglia, P., & Hamrick, J. (2019, May). Structured agents for physical
construction. In International Conference on Machine Learning (pp. 464-474).
"""
from dm_construction.environments import stacking
from dm_construction.unity import constants as unity_constants
from dm_construction.utils import block as block_utils
from dm_construction.utils import geometry
import numpy as np
class _CoveringTracker(object):
"""Keeps track of how much of an obstacle has been covered."""
def __init__(self, obstacle):
self._obstacle = obstacle
self._segment_list = []
def _add(self, xmin, xmax):
"""Track coverage in the segment (xmin, xmax)."""
for segment_xmin, segment_xmax in self._segment_list:
# We already have a segment spanning the entire range we are trying to
# add, so it is redundant.
if segment_xmin <= xmin and segment_xmax >= xmax:
return
# The segment is disjoint from the one we are trying to add
if segment_xmin > xmax or segment_xmax < xmin:
continue
# The segments overlap, so we merge them together.
else:
self._segment_list.remove((segment_xmin, segment_xmax))
self._add(min((segment_xmin, xmin)), max((segment_xmax, xmax)))
return
# At this point, it means the segment we're adding is totally disjoint from
# all the others, so we add it to the list.
self._segment_list.append((xmin, xmax))
def add_block(self, block):
"""Computes the xmin and xmax of the block boundaries and tracks it."""
cos_index = unity_constants.COSINE_ANGLE_FEATURE_INDEX
sin_index = unity_constants.SINE_ANGLE_FEATURE_INDEX
x_index = unity_constants.POSITION_X_FEATURE_INDEX
y_index = unity_constants.POSITION_Y_FEATURE_INDEX
width_index = unity_constants.WIDTH_FEATURE_INDEX
height_index = unity_constants.HEIGHT_FEATURE_INDEX
# The block is below the obstacle.
if block[y_index] < self._obstacle[y_index]:
return
x = block[x_index]
angle = np.arctan2(block[sin_index], block[cos_index])
width = block[width_index]
height = block[height_index]
projected_width = geometry.rect_projected_width(width, height, angle)
self._add(x - projected_width / 2., x + projected_width / 2.)
def compute_amount_covered(self):
"""Computes the amount of the obstacle that is covered."""
width_index = unity_constants.WIDTH_FEATURE_INDEX
x_index = unity_constants.POSITION_X_FEATURE_INDEX
obstacle_width = self._obstacle[width_index]
xmin = self._obstacle[x_index] - obstacle_width / 2.
xmax = self._obstacle[x_index] + obstacle_width / 2.
value = 0
for segment_xmin, segment_xmax in self._segment_list:
if segment_xmin <= xmax and segment_xmax >= xmin:
xmax_ = min(xmax, segment_xmax)
xmin_ = max(xmin, segment_xmin)
value += xmax_ - xmin_
return value
def _compute_covered_length(obstacles, blocks):
"""Compute the length of `o` in `obstacles` covered by any `b` in `blocks`."""
total_covered_length = 0
for obstacle in obstacles:
tracker = _CoveringTracker(obstacle)
for block in blocks:
tracker.add_block(block)
total_covered_length += tracker.compute_amount_covered()
return total_covered_length
class ConstructionCovering(stacking.ConstructionStacking):
"""Construction task consisting of covering obstacles laying on the ground.
In the Covering task, the agent must build a shelter that covers all obstacles
from above, without touching them. The reward function is: +L, where L is the
sum of the lengths of the top surfaces of the obstacles which are sheltered by
blocks placed by the agent; and -2 for each block set as sticky. The task-
specific termination criterion is achieved when at least 99% of the summed
obstacle surfaces are covered. The layers of obstacles are well-separated
vertically so that the agent can build structures between them.
"""
def __init__(self,
unity_environment,
sticky_penalty=2.0,
**stacking_kwargs):
"""Inits the environment.
Args:
unity_environment: See base class.
sticky_penalty: See base class.
**stacking_kwargs: keyword arguments passed to
covering.ConstructionStacking.
"""
default_stacking_kwargs = dict(
block_replacement=True,
max_difficulty=2,
progress_threshold=0.99)
default_stacking_kwargs.update(stacking_kwargs)
super(ConstructionCovering, self).__init__(
unity_environment=unity_environment,
sticky_penalty=sticky_penalty,
**default_stacking_kwargs)
def _compute_max_episode_reward(self, obs):
# Assuming you don't need glue.
max_episode_reward = 0
for obstacle in obs.obstacles:
max_episode_reward += obstacle.width
return max_episode_reward
def _maybe_update_max_steps(self):
self._max_steps = len(self._initial_available_objects) * 4
def _get_task_reward(self, obstacles, targets, blocks):
del targets
covered_length = _compute_covered_length(obstacles, blocks)
return covered_length
def _get_generator(self, difficulty):
if isinstance(difficulty, str):
raise ValueError("Unrecognized difficulty: %s" % difficulty)
# Up to `difficulty+1` layers of obstacles, interleaved with layers
# with no obstacles.
obstacles_ys_range = [tuple(np.arange(difficulty+1) * 2)]
return CoveringGenerator(
num_blocks_range=(1, 3),
scene_width=self._generator_width,
random_state=self._random_state,
obstacles_ys_range=obstacles_ys_range,
obstacles_width_range=(10, 40))
class ConstructionCoveringHard(ConstructionCovering):
"""Hard version of the covering task.
In the Covering Hard task, the agent must build a shelter, but the task is
modified to encourage longer term planning: there is a finite supply of
movable blocks, the distribution of obstacles is denser, and the cost of
stickiness is lower (-0.5 per sticky block). The reward function and
termination criterion are the same as in Covering.
"""
def __init__(self,
unity_environment,
sticky_penalty=0.5,
**covering_kwargs):
"""Inits the environment.
Args:
unity_environment: See base class.
sticky_penalty: See base class.
**covering_kwargs: keyword arguments passed to
covering.ConstructionCovering.
"""
super(ConstructionCoveringHard, self).__init__(
unity_environment=unity_environment,
sticky_penalty=sticky_penalty,
block_replacement=False,
max_difficulty=1,
**covering_kwargs)
def _get_generator(self, difficulty):
if isinstance(difficulty, str):
raise ValueError("Unrecognized difficulty: %s" % difficulty)
# Up to `difficulty+1` layers of obstacles.
obstacles_ys_range = [tuple(range(difficulty+1))]
return CoveringGenerator(
num_blocks_range=(1, 3),
scene_width=self._generator_width,
random_state=self._random_state,
obstacles_ys_range=obstacles_ys_range,
obstacles_width_range=(10, 50))
def _maybe_update_max_steps(self):
self._max_steps = len(self._initial_available_objects) * 2
class CoveringGenerator(stacking.StackingGenerator):
"""Generates a set of obstacles for the covering task."""
def __init__(self,
num_blocks_range,
scene_width,
random_state,
obstacles_width_range=(10, 50),
obstacles_ys_range=None,
obstacles_height=5,
**kwargs):
"""Initialize the generator.
Args:
num_blocks_range: a tuple indicating the range of obstacles
that will be in the generated towers, from low (inclusive) to high
(exclusive).
scene_width: the width of the scene.
random_state: a np.random.RandomState object
obstacles_width_range: the range of widths for obstacles, from low
(inclusive) to high (exclusive).
obstacles_ys_range: y-position to draw the obstacle from. A tuple of
y-positions will be sampled from this range. This is scalled
appropriately, so that -1 corresponds to an object below the floor, 0
to an object on the floor.
obstacles_height: The height of the obstacles.
**kwargs: additional keyword arguments passed to super
"""
super(CoveringGenerator, self).__init__(
num_blocks_range=num_blocks_range,
scene_width=scene_width,
random_state=random_state,
**kwargs)
obstacles_ys_range = obstacles_ys_range or [(0,)]
self.obstacle_height = obstacles_height
scale_y_fn = lambda y: (y + 0.5) * self.height
scale_ys_fn = lambda ys: tuple([scale_y_fn(y) for y in ys])
self._obstacles_ys_range = [
scale_ys_fn(obstacles_ys) for obstacles_ys in obstacles_ys_range]
self._obstacles_width_range = obstacles_width_range
def generate_one(self):
"""Generate a single scene.
Returns:
observation: a BlocksObservation object
solution: a list of Block objects in their final locations
"""
# pick the set of y-positions we want for our obstacles
idx = np.arange(len(self._obstacles_ys_range))
obstacles_ys = self._obstacles_ys_range[self.random_state.choice(idx)]
# place the obstacles at each level, going from bottom to top
obstacles = []
for y in obstacles_ys:
# get the number of obstacles at this layer
num_obstacles = self.random_state.randint(*self.num_blocks_range)
# pick a set of obstacle widths, and check that the sum of the widths is
# not greater than the scene width plus some buffer room. keep regenerting
# obstacles until this is the case.
available_width = 0
while available_width < self.small_width + 1:
available_widths = np.arange(*self._obstacles_width_range)
obstacles_lengths = self.random_state.choice(
available_widths, size=[num_obstacles])
available_width = self.scene_width - np.sum(obstacles_lengths)
# compute the left and right edges of each obstacle, assuming the
# obstacles are all placed right next to each other beginning from the
# left side of the scene.
obstacles_begins = np.concatenate(
[np.array([0], dtype=np.int32), np.cumsum(obstacles_lengths)[:-1]])
obstacles_ends = np.cumsum(obstacles_lengths)
# available_width now is the amount of space left on the floor, not taken
# up by obstacles. we split this into a few chunks of random size to space
# the obstacles out along the floor
relative_shifts = self.random_state.uniform(
0., 1., size=[num_obstacles + 1])
relative_shifts /= np.sum(relative_shifts)
relative_shifts = np.floor(relative_shifts * available_width)
shifts = np.cumsum(relative_shifts.astype(np.int32))[:-1]
obstacles_begins += shifts
obstacles_ends += shifts
# now actually create the obstacles
for obstacle_begin, obstacle_end in zip(obstacles_begins, obstacles_ends):
center = (obstacle_begin + obstacle_end) // 2
width = obstacle_end - obstacle_begin
obstacle = block_utils.Block(
x=center, y=y, width=width, height=self.obstacle_height)
obstacles.append(obstacle)
observation_blocks = self._place_available_objects()
floor = self._place_floor()
observation = block_utils.BlocksObservation(
blocks=[floor] + observation_blocks,
obstacles=obstacles,
targets=[],
balls=[])
return observation
| dm_construction-master | dm_construction/environments/covering.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Constant values."""
# Available block sizes.
SMALL_WIDTH = 10
MEDIUM_WIDTH = 30
LARGE_WIDTH = 50
# Types of objects in the ConstructionStacking base environment.
BLOCK = "Blocks"
AVAILABLE_BLOCK = "AvailableBlocks"
OBSTACLE = "Obstacles"
TARGET = "Targets"
BALL = "Balls"
# ConstructionStacking termination types.
TERMINATION_MAX_STEPS = "max_steps"
TERMINATION_SPAWN_COLLISION = "spawn_collision"
TERMINATION_OBSTACLE_HIT = "obstacle_hit"
TERMINATION_COMPLETE = "complete"
TERMINATION_BAD_SIMULATION = "bad_simulation"
TERMINATION_BAD_CHOICE = "bad_choice"
# Termination types of the DiscreteRelativeGraphWrapper.
TERMINATION_INVALID_EDGE = "invalid_edge"
| dm_construction-master | dm_construction/utils/constants.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| dm_construction-master | dm_construction/utils/__init__.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
from absl import flags
from absl.testing import absltest
from dm_construction.utils import geometry
FLAGS = flags.FLAGS
class GeometryTest(absltest.TestCase):
def test_area(self):
# Rectangular triangle
self.assertAlmostEqual(geometry._area(0, 0, 0, 1, 1, 0), 0.5)
# Translate it alongside the x-axis
self.assertAlmostEqual(geometry._area(10, 0, 10, 1, 11, 0), 0.5)
# Translate it alongside the y-axis
self.assertAlmostEqual(geometry._area(0, 10, 0, 11, 1, 10), 0.5)
# Rotate it
self.assertAlmostEqual(geometry._area(0, 0, 1, 0, 0.5, 1), 0.5)
# Non-rectangular anymore
self.assertAlmostEqual(geometry._area(0, 0, 2, 0, 0.5, 1), 1.)
def test_rotation(self):
# No rotation
x, y = geometry.rotate_rectangle_corner(2, -1, 0, 0, 1., 0.)
self.assertAlmostEqual(x, 2)
self.assertAlmostEqual(y, -1)
# 90 degrees
x, y = geometry.rotate_rectangle_corner(2, -1, 0, 0, 0., 1.)
self.assertAlmostEqual(x, 1)
self.assertAlmostEqual(y, 2)
def test_is_point_in_rectangle(self):
self.assertTrue(
geometry.is_point_in_rectangle(-1, -1, -1, 1, 1, 1, 1, -1, 0, 0))
# Just on the boundary
self.assertTrue(
geometry.is_point_in_rectangle(-1, -1, -1, 1, 1, 1, 1, -1, 1, 1))
# Outside
self.assertFalse(
geometry.is_point_in_rectangle(-1, -1, -1, 1, 1, 1, 1, -1, 1, 1.1))
if __name__ == "__main__":
absltest.main()
| dm_construction-master | dm_construction/utils/geometry_test.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Geometry utility functions."""
import numpy as np
import shapely.affinity
import shapely.geometry
def _area(x1, y1, x2, y2, x3, y3):
"""Heron's formula."""
a = np.sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2))
b = np.sqrt(pow(x3 - x2, 2) + pow(y3 - y2, 2))
c = np.sqrt(pow(x1 - x3, 2) + pow(y3 - y1, 2))
s = (a + b + c) / 2
return np.sqrt(s * (s - a) * (s - b) * (s - c))
def is_point_in_rectangle(x1, y1, x2, y2, x3, y3, x4, y4, x, y):
"""Whether (x, y) is within the rectange defined by (x1, y1), .. (x4, y4)."""
# Calculate area of rectangle ABCD
rect_area = (_area(x1, y1, x2, y2, x3, y3) + _area(x1, y1, x4, y4, x3, y3))
# Areas of each 4 triangles.
area_1 = _area(x, y, x1, y1, x2, y2) # Calculate area of triangle PAB
area_2 = _area(x, y, x2, y2, x3, y3) # Calculate area of triangle PBC
area_3 = _area(x, y, x3, y3, x4, y4) # Calculate area of triangle PCD
area_4 = _area(x, y, x1, y1, x4, y4) # Calculate area of triangle PAD
# Check if the sum of triangle areas is the same as the rectange area.
epsilon = rect_area * 0.001
return rect_area >= area_1 + area_2 + area_3 + area_4 - epsilon
def rotate_rectangle_corner(x, y, center_x, center_y, cos_theta, sin_theta):
temp_x = x - center_x
temp_y = y - center_y
rotated_x = temp_x * cos_theta - temp_y * sin_theta
rotated_y = temp_x * sin_theta + temp_y * cos_theta
x = rotated_x + center_x
y = rotated_y + center_y
return (x, y)
def rect_projected_width(width, height, angle):
"""Get the x projection of an angle-rotated rect. with sides width, height."""
# Angle between floor and first diag.
first_angle = np.arctan2(height, width)
# Angle between floor and second diagonal
second_angle = np.pi - first_angle
diagonal_length = np.sqrt(height * height + width * width)
first_angle += angle # Rotate the cube.
second_angle += angle
projected_width = np.max(
[np.abs(np.cos(first_angle)), np.abs(np.cos(second_angle))])
return diagonal_length * projected_width
def rect_projected_height(width, height, angle):
"""Get the y projection of an angle-rotated rect. with sides width, height."""
return rect_projected_width(width, height, angle + np.pi/2.)
def rect_bounding_frame(x, y, w, h, angle):
"""Returns the bounding frame with x_end > x_begin and y_end > y_begin."""
projected_width = rect_projected_width(w, h, angle)
projected_height = rect_projected_height(w, h, angle)
block_x_begin, block_x_end = x - projected_width/2., x + projected_width/2.
block_y_begin, block_y_end = y - projected_height/2., y + projected_height/2.
return block_x_begin, block_x_end, block_y_begin, block_y_end
def circle_bounding_frame(x, y, w):
"""Returns the bounding frame with x_end > x_begin and y_end > y_begin."""
r = w/2
return x-r, x+r, y-r, y+r
def bounding_circles_overlap(x0, y0, w0, x1, y1, w1):
center_distance = np.linalg.norm([x1-x0, y1-y0])
return center_distance < w0/2 + w1/2
def bounding_box_overlap(x0_begin, x0_end, y0_begin, y0_end,
x1_begin, x1_end, y1_begin, y1_end):
if (x1_begin > x0_end or x0_begin > x1_end or
y1_begin > y0_end or y0_begin > y1_end):
return False
return True
def rect_overlap(x0, y0, w0, h0, angle0, x1, y1, w1, h1, angle1):
"""Calculates the overlap area between two rectangles."""
# Check if bounding spheres do not intersect.
dw0 = _rect_diagonal(w0, h0)
dw1 = _rect_diagonal(w1, h1)
if not bounding_circles_overlap(x0, y0, dw0, x1, y1, dw1):
return 0.
# Check if bounding boxes do not intersect.
x0_begin, x0_end, y0_begin, y0_end = rect_bounding_frame(
x0, y0, w0, h0, angle0)
x1_begin, x1_end, y1_begin, y1_end = rect_bounding_frame(
x1, y1, w1, h1, angle1)
if not bounding_box_overlap(x0_begin, x0_end, y0_begin, y0_end,
x1_begin, x1_end, y1_begin, y1_end):
return 0.
# Otherwise, calculate proper intersection.
rect_1 = _build_shapely_rectangle(x0, y0, w0, h0, angle0)
rect_2 = _build_shapely_rectangle(x1, y1, w1, h1, angle1)
return rect_1.intersection(rect_2).area
def ramp_overlap(x0, y0, w0, h0, angle0, x1, y1, w1, h1, angle1):
"""Calculates the overlap area between two ramps."""
# Check if bounding spheres do not intersect.
dw0 = _rect_diagonal(w0, h0)
dw1 = _rect_diagonal(w1, h1)
if not bounding_circles_overlap(x0, y0, dw0, x1, y1, dw1):
return 0.
# Check if bounging boxes do not intersect.
x0_begin, x0_end, y0_begin, y0_end = rect_bounding_frame(
x0, y0, w0, h0, angle0)
x1_begin, x1_end, y1_begin, y1_end = rect_bounding_frame(
x1, y1, w1, h1, angle1)
if not bounding_box_overlap(x0_begin, x0_end, y0_begin, y0_end,
x1_begin, x1_end, y1_begin, y1_end):
return 0.
# Otherwise, calculate proper intersection.
rect_1 = _build_shapely_ramp(x0, y0, w0, h0, angle0)
rect_2 = _build_shapely_ramp(x1, y1, w1, h1, angle1)
return rect_1.intersection(rect_2).area
def rect_ramp_overlap(x0, y0, w0, h0, angle0, x1, y1, w1, h1, angle1):
"""Calculates the overlap area between two rectangles."""
# Check if bounding spheres do not intersect.
dw0 = _rect_diagonal(w0, h0)
dw1 = _rect_diagonal(w1, h1)
if not bounding_circles_overlap(x0, y0, dw0, x1, y1, dw1):
return 0.
# Check if bounging boxes do not intersect.
x0_begin, x0_end, y0_begin, y0_end = rect_bounding_frame(
x0, y0, w0, h0, angle0)
x1_begin, x1_end, y1_begin, y1_end = rect_bounding_frame(
x1, y1, w1, h1, angle1)
if not bounding_box_overlap(x0_begin, x0_end, y0_begin, y0_end,
x1_begin, x1_end, y1_begin, y1_end):
return 0.
# Otherwise, calculate proper intersection.
rect_1 = _build_shapely_rectangle(x0, y0, w0, h0, angle0)
rect_2 = _build_shapely_ramp(x1, y1, w1, h1, angle1)
return rect_1.intersection(rect_2).area
def rect_ball_overlap(x0, y0, w0, h0, angle0, x1, y1, w1):
"""Calculates the overlap area between a rectangles and a ball."""
# Check if bounding spheres do not intersect.
dw0 = _rect_diagonal(w0, h0)
if not bounding_circles_overlap(x0, y0, dw0, x1, y1, w1):
return 0.
# Check if bounging boxes do not intersect.
x0_begin, x0_end, y0_begin, y0_end = rect_bounding_frame(
x0, y0, w0, h0, angle0)
x1_begin, x1_end, y1_begin, y1_end = circle_bounding_frame(
x1, y1, w1)
if not bounding_box_overlap(x0_begin, x0_end, y0_begin, y0_end,
x1_begin, x1_end, y1_begin, y1_end):
return 0.
# Otherwise, calculate proper intersection.
rect = _build_shapely_rectangle(x0, y0, w0, h0, angle0)
circle = _build_shapely_circle(x1, y1, w1)
return rect.intersection(circle).area
def ramp_ball_overlap(x0, y0, w0, h0, angle0, x1, y1, w1):
"""Calculates the overlap area between a ramp and a ball."""
# Check if bounding spheres do not intersect.
dw0 = _rect_diagonal(w0, h0)
if not bounding_circles_overlap(x0, y0, dw0, x1, y1, w1):
return 0.
# Check if bounding boxes do not intersect.
x0_begin, x0_end, y0_begin, y0_end = rect_bounding_frame(
x0, y0, w0, h0, angle0)
x1_begin, x1_end, y1_begin, y1_end = circle_bounding_frame(
x1, y1, w1)
if not bounding_box_overlap(x0_begin, x0_end, y0_begin, y0_end,
x1_begin, x1_end, y1_begin, y1_end):
return 0.
# Otherwise, calculate proper intersection.
rect = _build_shapely_ramp(x0, y0, w0, h0, angle0)
circle = _build_shapely_circle(x1, y1, w1)
return rect.intersection(circle).area
def _rect_diagonal(w, h):
"""Calculates the radius of a rectangle."""
return np.sqrt(w**2 + h**2)
def _build_shapely_rectangle(x, y, w, h, angle):
"""Creates a shapely object representing a rectangle."""
centered = shapely.geometry.box(-w/2, -h/2, w/2, h/2)
rotated = shapely.affinity.rotate(centered, angle/np.pi*180)
return shapely.affinity.translate(rotated, x, y)
def _build_shapely_ramp(x, y, w, h, angle):
"""Creates a shapely object representing a ramp."""
centered = shapely.geometry.Polygon([(-w/2, -h/2), (-w/2, h/2), (w/2, -h/2)])
rotated = shapely.affinity.rotate(centered, angle/np.pi*180)
return shapely.affinity.translate(rotated, x, y)
def _build_shapely_circle(x, y, w):
"""Creates a shapely object representing a rectangle."""
return shapely.geometry.Point(x, y).buffer(w/2)
| dm_construction-master | dm_construction/utils/geometry.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for handling manipulating building blocks."""
import collections
from dm_construction.unity import constants as unity_constants
# Block is a namedtuple which contains the relevant attributes for a block:
# its x and y coordinates of the center of the block, and the width and height
# of the block (should all be floats).
Block = collections.namedtuple("Block", ["x", "y", "width", "height",
"angle", "shape"])
# Default values for backwards compatibility: angle (0.) and shape (box).
Block.__new__.__defaults__ = (0., unity_constants.BOX_SHAPE)
BlocksObservation = collections.namedtuple(
"BlocksObservation",
["blocks", "obstacles", "targets", "balls"])
def transform_block(block, scale, translation):
"""Transform a block by a specified scale and translation.
This scales BOTH the width/height as well as the x and y positions, and THEN
performs the translation.
Args:
block: Block object
scale: a tuple/list of length two, corresponding to the scale of the x and
y dimensions
translation: a tuple/list of length two, corresponding to the translation
in the x and y dimensions
Returns:
block: a scaled Block object
"""
if block is None:
return None
if block.x is None:
x = None
else:
x = (block.x * scale[0]) + translation[0]
if block.y is None:
y = None
else:
y = (block.y * scale[1]) + translation[1]
width = block.width * scale[0]
height = block.height * scale[1]
return block._replace(x=x, y=y, width=width, height=height)
def transform_blocks_observation(observation, scale, translation):
"""Scale and translate a blocks observation by a specified amount.
This scales BOTH the width/height as well as the x and y positions, and THEN
performs the translation.
Args:
observation: a BlocksObservation object
scale: a tuple/list of length two, corresponding to the scale of the x and
y dimensions
translation: a tuple/list of length two, corresponding to the translation
in the x and y dimensions
Returns:
observation: a scaled BlocksObservation object
"""
transform = lambda b: transform_block(b, scale, translation)
return BlocksObservation(
[transform(b) for b in observation.blocks],
[transform(b) for b in observation.obstacles],
[transform(b) for b in observation.targets],
[transform(b) for b in observation.balls])
| dm_construction-master | dm_construction/utils/block.py |
#!/usr/bin/python
#
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utils for serializing objects."""
import copy
def get_object_state(obj, ignore_attributes=()):
"""Returns a dictionary with the state of the attributes of an object.
Note that this is not general. For example, it will throw an error for classes
that define a __slots__ field (like namedtuples).
Args:
obj: a Python object
ignore_attributes: list of attributes to ignore when getting the object
state
Returns:
state: a dictionary representation of the object state.
"""
state = {}
for k, v in obj.__dict__.items():
if k not in ignore_attributes:
state[k] = copy.deepcopy(v)
for k in ignore_attributes:
if not hasattr(obj, k):
raise ValueError("Ignored attribute `%s` does not exist in object." % k)
return state
def set_object_state(obj, state, ignore_attributes=()):
"""Sets the state of an object obtained through `get_object_state`.
Note that this is not general. For example, it will not work for classes
that define a __slots__ field (like namedtuples).
Args:
obj: a Python object
state: the state to set on the object (obtained with `get_object_state`).
ignore_attributes: list of attributes to ignore when getting the object
state.
"""
for k, v in state.items():
if k not in ignore_attributes:
setattr(obj, k, copy.deepcopy(v))
for k in ignore_attributes:
if not hasattr(obj, k):
raise ValueError("Ignored attribute `%s` does not exist in object." % k)
| dm_construction-master | dm_construction/utils/serialization.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training and evaluation loops for an experiment.
The code in this file is adapted from the BYOL code
(https://github.com/deepmind/deepmind-research/tree/master/byol).
"""
import time
from typing import Any, Mapping, Text, Type
from absl import app
from absl import flags
from absl import logging
import jax
import numpy as np
from relicv2 import eval_experiment
from relicv2.configs import eval as eval_config
_WORKER_MODE = flags.DEFINE_string('worker_mode', 'train',
'The mode, train or eval')
_WORKER_TPU_DRIVER = flags.DEFINE_string('worker_tpu_driver', '',
'The tpu driver to use')
_BATCH_SIZE = flags.DEFINE_integer('batch_size', 1024, 'Total batch size')
_NUM_EPOCHS = flags.DEFINE_integer('num_epochs', 100,
'Number of training epochs for evaluation.')
_CHECKPOINT_ROOT = flags.DEFINE_string('checkpoint_root', '',
'The directory to save checkpoints to.')
_LOG_TENSORS_INTERVAL = flags.DEFINE_integer('log_tensors_interval', 60,
'Log tensors every n seconds.')
FLAGS = flags.FLAGS
ExperimentType = Type[eval_experiment.EvalExperiment]
def train_loop(experiment_class: ExperimentType, config: Mapping[Text, Any]):
"""The main training loop.
This loop periodically saves a checkpoint to be evaluated in the eval_loop.
Args:
experiment_class: the constructor for the experiment.
config: the experiment config.
"""
experiment = experiment_class(**config)
rng = jax.random.PRNGKey(0)
step = 0
host_id = jax.host_id()
last_logging = time.time()
if config['checkpointing_config']['use_checkpointing']:
checkpoint_data = experiment.load_checkpoint()
if checkpoint_data is None:
step = 0
else:
step, rng = checkpoint_data
local_device_count = jax.local_device_count()
while step < config['max_steps']:
step_rng, rng = tuple(jax.random.split(rng))
# Broadcast the random seeds across the devices
step_rng_device = jax.random.split(step_rng, num=jax.device_count())
first_local_device_id = host_id * local_device_count
step_rng_device = step_rng_device[first_local_device_id:(
first_local_device_id + local_device_count)]
step_device = np.broadcast_to(step, [local_device_count])
# Perform a training step and get scalars to log.
scalars = experiment.step(global_step=step_device, rng=step_rng_device)
# Checkpointing and logging.
if config['checkpointing_config']['use_checkpointing']:
experiment.save_checkpoint(step, rng)
current_time = time.time()
if current_time - last_logging > _LOG_TENSORS_INTERVAL.value:
logging.info('Step %d: %s', step, scalars)
last_logging = current_time
step += 1
logging.info('Saving final checkpoint')
logging.info('Step %d: %s', step, scalars)
experiment.save_checkpoint(step, rng)
def eval_loop(experiment_class: ExperimentType, config: Mapping[Text, Any]):
"""The main evaluation loop.
This loop periodically loads a checkpoint and evaluates its performance on the
test set, by calling experiment.evaluate.
Args:
experiment_class: the constructor for the experiment.
config: the experiment config.
"""
experiment = experiment_class(**config)
last_evaluated_step = -1
while True:
checkpoint_data = experiment.load_checkpoint()
if checkpoint_data is None:
logging.info('No checkpoint found. Waiting for 10s.')
time.sleep(10)
continue
step, _ = checkpoint_data
if step <= last_evaluated_step:
logging.info('Checkpoint at step %d already evaluated, waiting.', step)
time.sleep(10)
continue
host_id = jax.host_id()
local_device_count = jax.local_device_count()
step_device = np.broadcast_to(step, [local_device_count])
scalars = experiment.evaluate(global_step=step_device)
if host_id == 0: # Only perform logging in one host.
logging.info('Evaluation at step %d: %s', step, scalars)
last_evaluated_step = step
if last_evaluated_step >= config['max_steps']:
return
def main(_):
if _WORKER_TPU_DRIVER.value:
jax.config.update('jax_xla_backend', 'tpu_driver')
jax.config.update('jax_backend_target', _WORKER_TPU_DRIVER.value)
logging.info('Backend: %s %r', _WORKER_TPU_DRIVER.value, jax.devices())
experiment_class = eval_experiment.EvalExperiment
config = eval_config.get_config(f'{_CHECKPOINT_ROOT.value}/pretrain.pkl',
_BATCH_SIZE.value, _NUM_EPOCHS.value)
config['checkpointing_config']['checkpoint_dir'] = _CHECKPOINT_ROOT.value # pytype: disable=unsupported-operands # dict-kwargs
if _WORKER_MODE.value == 'train':
train_loop(experiment_class, config)
elif _WORKER_MODE.value == 'eval':
eval_loop(experiment_class, config)
if __name__ == '__main__':
app.run(main)
| relicv2-main | main_loop.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ReLICv2's main training loop.
The code in this file is adapted from the BYOL code
(https://github.com/deepmind/deepmind-research/tree/master/byol).
"""
from absl import flags
from absl.testing import absltest
import tensorflow_datasets as tfds
from relicv2 import eval_experiment
from relicv2 import main_loop
from relicv2.configs import eval as eval_config
FLAGS = flags.FLAGS
class MainLoopTest(absltest.TestCase):
def test_linear_eval(self):
config = eval_config.get_config(
checkpoint_to_evaluate=None, batch_size=4, num_epochs=10)
temp_dir = self.create_tempdir().full_path
# Override some config fields to make test lighter.
config['network_config']['encoder_class'] = 'TinyResNet'
config['allow_train_from_scratch'] = True
config['checkpointing_config']['checkpoint_dir'] = temp_dir
config['evaluation_config']['batch_size'] = 16
config['max_steps'] = 16
with tfds.testing.mock_data(num_examples=64):
experiment_class = eval_experiment.EvalExperiment
main_loop.train_loop(experiment_class, config)
main_loop.eval_loop(experiment_class, config)
if __name__ == '__main__':
absltest.main()
| relicv2-main | main_loop_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear evaluation or fine-tuning pipeline.
Use this experiment to evaluate a checkpoint.
The code in this file is adapted from the BYOL code
(https://github.com/deepmind/deepmind-research/tree/master/byol).
"""
import functools
from typing import Any, Generator, Mapping, NamedTuple, Optional, Text, Tuple, Union
from absl import logging
from acme.jax import utils as acme_utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
from relicv2.utils import checkpointing
from relicv2.utils import dataset
from relicv2.utils import helpers
from relicv2.utils import networks
from relicv2.utils import schedules
# Type declarations.
OptState = Tuple[optax.TraceState, optax.ScaleByScheduleState, optax.ScaleState]
LogsDict = Mapping[Text, jnp.ndarray]
class _EvalExperimentState(NamedTuple):
backbone_params: hk.Params
classif_params: hk.Params
backbone_state: hk.State
backbone_opt_state: Union[None, OptState]
classif_opt_state: OptState
class EvalExperiment:
"""Linear evaluation experiment."""
def __init__(self, random_seed: int, num_classes: int, batch_size: int,
max_steps: int, enable_double_transpose: bool,
checkpoint_to_evaluate: Optional[Text],
allow_train_from_scratch: bool, freeze_backbone: bool,
network_config: Mapping[Text, Any],
optimizer_config: Mapping[Text, Any],
lr_schedule_config: Mapping[Text, Any],
evaluation_config: Mapping[Text, Any],
checkpointing_config: Mapping[Text, Any]):
"""Constructs the experiment.
Args:
random_seed: the random seed to use when initializing network weights.
num_classes: the number of classes; used for the online evaluation.
batch_size: the total batch size; should be a multiple of the number of
available accelerators.
max_steps: the number of training steps; used for the lr/target network
ema schedules.
enable_double_transpose: see dataset.py; only has effect on TPU.
checkpoint_to_evaluate: the path to the checkpoint to evaluate.
allow_train_from_scratch: whether to allow training without specifying a
checkpoint to evaluate (training from scratch).
freeze_backbone: whether the backbone resnet should remain frozen (linear
evaluation) or be trainable (fine-tuning).
network_config: the configuration for the network.
optimizer_config: the configuration for the optimizer.
lr_schedule_config: the configuration for the learning rate schedule.
evaluation_config: the evaluation configuration.
checkpointing_config: the configuration for checkpointing.
"""
self._random_seed = random_seed
self._enable_double_transpose = enable_double_transpose
self._num_classes = num_classes
self._lr_schedule_config = lr_schedule_config
self._batch_size = batch_size
self._max_steps = max_steps
self._checkpoint_to_evaluate = checkpoint_to_evaluate
self._allow_train_from_scratch = allow_train_from_scratch
self._freeze_backbone = freeze_backbone
self._optimizer_config = optimizer_config
self._evaluation_config = evaluation_config
# Checkpointed experiment state.
self._experiment_state = None
# Input pipelines.
self._train_input = None
self._eval_input = None
backbone_fn = functools.partial(self._backbone_fn, **network_config)
self.forward_backbone = hk.without_apply_rng(
hk.transform_with_state(backbone_fn))
self.forward_classif = hk.without_apply_rng(hk.transform(self._classif_fn))
self.update_pmap = jax.pmap(self._update_func, axis_name='i')
self.eval_batch_jit = jax.jit(self._eval_batch)
self._is_backbone_training = not self._freeze_backbone
self._checkpointer = checkpointing.Checkpointer(**checkpointing_config)
def _should_transpose_images(self):
"""Should we transpose images (saves host-to-device time on TPUs)."""
return (self._enable_double_transpose and
jax.local_devices()[0].platform == 'tpu')
def _backbone_fn(
self,
inputs: dataset.Batch,
encoder_class: Text,
encoder_config: Mapping[Text, Any],
bn_decay_rate: float,
is_training: bool,
) -> jnp.ndarray:
"""Forward of the encoder (backbone)."""
bn_config = {'decay_rate': bn_decay_rate}
encoder = getattr(networks, encoder_class)
model = encoder(None, bn_config=bn_config, **encoder_config)
if self._should_transpose_images():
inputs = dataset.transpose_images(inputs)
images = dataset.normalize_images(inputs['images'])
return model(images, is_training=is_training)
def _classif_fn(
self,
embeddings: jnp.ndarray,
) -> jnp.ndarray:
classifier = hk.Linear(output_size=self._num_classes)
return classifier(embeddings)
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(self, *, global_step: jnp.ndarray,
rng: jnp.ndarray) -> Mapping[Text, np.ndarray]:
"""Performs a single training step."""
if self._train_input is None:
self._initialize_train(rng)
inputs = next(self._train_input)
self._experiment_state, scalars = self.update_pmap(self._experiment_state,
global_step, inputs)
scalars = helpers.get_first(scalars)
return scalars
def save_checkpoint(self, step: int, rng: jnp.ndarray):
self._checkpointer.maybe_save_checkpoint(
self._experiment_state,
step=step,
rng=rng,
is_final=step >= self._max_steps)
def load_checkpoint(self) -> Union[Tuple[int, jnp.ndarray], None]:
checkpoint_data = self._checkpointer.maybe_load_checkpoint()
if checkpoint_data is None:
return None
self._experiment_state, step, rng = checkpoint_data
return step, rng
def _initialize_train(self, rng):
"""ReLICv2's _ExperimentState initialization.
Args:
rng: random number generator used to initialize parameters. If working in
a multi device setup, this need to be a ShardedArray.
dummy_input: a dummy image, used to compute intermediate outputs shapes.
Returns:
Initial EvalExperiment state.
Raises:
RuntimeError: invalid or empty checkpoint.
"""
self._train_input = acme_utils.prefetch(self._build_train_input())
# Check we haven't already restored params
if self._experiment_state is None:
inputs = next(self._train_input)
if self._checkpoint_to_evaluate is not None:
# Load params from checkpoint
checkpoint_data = checkpointing.load_checkpoint(
self._checkpoint_to_evaluate)
if checkpoint_data is None:
raise RuntimeError('Invalid checkpoint.')
backbone_params = helpers.get_first(
checkpoint_data['experiment_state']['online_params'])
backbone_state = helpers.get_first(
checkpoint_data['experiment_state']['online_state'])
backbone_params = helpers.bcast_local_devices(backbone_params)
backbone_state = helpers.bcast_local_devices(backbone_state)
else:
if not self._allow_train_from_scratch:
raise ValueError(
'No checkpoint specified, but `allow_train_from_scratch` '
'set to False')
# Initialize with random parameters
logging.info(
'No checkpoint specified, initializing the networks from scratch '
'(dry run mode)')
backbone_params, backbone_state = jax.pmap(
functools.partial(self.forward_backbone.init, is_training=True),
axis_name='i')(
rng=rng, inputs=inputs)
init_experiment = jax.pmap(self._make_initial_state, axis_name='i')
# Init uses the same RNG key on all hosts+devices to ensure everyone
# computes the same initial state and parameters.
init_rng = jax.random.PRNGKey(self._random_seed)
init_rng = helpers.bcast_local_devices(init_rng)
self._experiment_state = init_experiment(
rng=init_rng,
dummy_input=inputs,
backbone_params=backbone_params,
backbone_state=backbone_state)
# Clear the backbone optimizer's state when the backbone is frozen.
if self._freeze_backbone:
self._experiment_state = _EvalExperimentState(
backbone_params=self._experiment_state.backbone_params,
classif_params=self._experiment_state.classif_params,
backbone_state=self._experiment_state.backbone_state,
backbone_opt_state=None,
classif_opt_state=self._experiment_state.classif_opt_state,
)
def _make_initial_state(
self,
rng: jnp.ndarray,
dummy_input: dataset.Batch,
backbone_params: hk.Params,
backbone_state: hk.Params,
) -> _EvalExperimentState:
"""_EvalExperimentState initialization."""
# Initialize the backbone params
# Always create the batchnorm weights (is_training=True), they will be
# overwritten when loading the checkpoint.
embeddings, _ = self.forward_backbone.apply(
backbone_params, backbone_state, dummy_input, is_training=True)
backbone_opt_state = self._optimizer(0.).init(backbone_params)
# Initialize the classifier params and optimizer_state
classif_params = self.forward_classif.init(rng, embeddings)
classif_opt_state = self._optimizer(0.).init(classif_params)
return _EvalExperimentState(
backbone_params=backbone_params,
classif_params=classif_params,
backbone_state=backbone_state,
backbone_opt_state=backbone_opt_state,
classif_opt_state=classif_opt_state,
)
def _build_train_input(self) -> Generator[dataset.Batch, None, None]:
"""See base class."""
num_devices = jax.device_count()
global_batch_size = self._batch_size
per_device_batch_size, ragged = divmod(global_batch_size, num_devices)
if ragged:
raise ValueError(
f'Global batch size {global_batch_size} must be divisible by '
f'num devices {num_devices}')
return dataset.load(
dataset.Split.TRAIN_AND_VALID,
preprocess_mode=dataset.PreprocessMode.LINEAR_TRAIN,
transpose=self._should_transpose_images(),
batch_dims=[jax.local_device_count(), per_device_batch_size])
def _optimizer(self, learning_rate: float):
"""Build optimizer from config."""
return optax.sgd(learning_rate, **self._optimizer_config)
def _loss_fn(
self,
backbone_params: hk.Params,
classif_params: hk.Params,
backbone_state: hk.State,
inputs: dataset.Batch,
) -> Tuple[jnp.ndarray, Tuple[jnp.ndarray, hk.State]]:
"""Compute the classification loss function.
Args:
backbone_params: parameters of the encoder network.
classif_params: parameters of the linear classifier.
backbone_state: internal state of encoder network.
inputs: inputs, containing `images` and `labels`.
Returns:
The classification loss and various logs.
"""
embeddings, backbone_state = self.forward_backbone.apply(
backbone_params,
backbone_state,
inputs,
is_training=not self._freeze_backbone)
logits = self.forward_classif.apply(classif_params, embeddings)
labels = hk.one_hot(inputs['labels'], self._num_classes)
loss = helpers.softmax_cross_entropy(logits, labels, reduction='mean')
scaled_loss = loss / jax.device_count()
return scaled_loss, (loss, backbone_state)
def _update_func(
self,
experiment_state: _EvalExperimentState,
global_step: jnp.ndarray,
inputs: dataset.Batch,
) -> Tuple[_EvalExperimentState, LogsDict]:
"""Applies an update to parameters and returns new state."""
# This function computes the gradient of the first output of loss_fn and
# passes through the other arguments unchanged.
# Gradient of the first output of _loss_fn wrt the backbone (arg 0) and the
# classifier parameters (arg 1). The auxiliary outputs are returned as-is.
grad_loss_fn = jax.grad(self._loss_fn, has_aux=True, argnums=(0, 1))
grads, aux_outputs = grad_loss_fn(
experiment_state.backbone_params,
experiment_state.classif_params,
experiment_state.backbone_state,
inputs,
)
backbone_grads, classifier_grads = grads
train_loss, new_backbone_state = aux_outputs
classifier_grads = jax.lax.psum(classifier_grads, axis_name='i')
# Compute the decayed learning rate
learning_rate = schedules.learning_schedule(
global_step,
batch_size=self._batch_size,
total_steps=self._max_steps,
**self._lr_schedule_config)
logging.info('Learning rate: %s', learning_rate)
# Compute and apply updates via our optimizer.
classif_updates, new_classif_opt_state = self._optimizer(
learning_rate).update(classifier_grads,
experiment_state.classif_opt_state)
new_classif_params = optax.apply_updates(experiment_state.classif_params,
classif_updates)
if self._freeze_backbone:
del backbone_grads, new_backbone_state # Unused
# The backbone is not updated.
new_backbone_params = experiment_state.backbone_params
new_backbone_opt_state = None
new_backbone_state = experiment_state.backbone_state
else:
backbone_grads = jax.lax.psum(backbone_grads, axis_name='i')
# Compute and apply updates via our optimizer.
backbone_updates, new_backbone_opt_state = self._optimizer(
learning_rate).update(backbone_grads,
experiment_state.backbone_opt_state)
new_backbone_params = optax.apply_updates(
experiment_state.backbone_params, backbone_updates)
experiment_state = _EvalExperimentState(
new_backbone_params,
new_classif_params,
new_backbone_state,
new_backbone_opt_state,
new_classif_opt_state,
)
# Scalars to log (note: we log the mean across all hosts/devices).
scalars = {'train_loss': train_loss}
scalars = jax.lax.pmean(scalars, axis_name='i')
return experiment_state, scalars
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(self, global_step, **unused_args):
"""See base class."""
global_step = np.array(helpers.get_first(global_step))
scalars = jax.device_get(self._eval_epoch(**self._evaluation_config))
logging.info('[Step %d] Eval scalars: %s', global_step, scalars)
return scalars
def _eval_batch(
self,
backbone_params: hk.Params,
classif_params: hk.Params,
backbone_state: hk.State,
inputs: dataset.Batch,
) -> LogsDict:
"""Evaluates a batch."""
embeddings, backbone_state = self.forward_backbone.apply(
backbone_params, backbone_state, inputs, is_training=False)
logits = self.forward_classif.apply(classif_params, embeddings)
labels = hk.one_hot(inputs['labels'], self._num_classes)
loss = helpers.softmax_cross_entropy(logits, labels, reduction=None)
top1_correct = helpers.topk_accuracy(logits, inputs['labels'], topk=1)
top5_correct = helpers.topk_accuracy(logits, inputs['labels'], topk=5)
# NOTE: Returned values will be summed and finally divided by num_samples.
return {
'eval_loss': loss,
'top1_accuracy': top1_correct,
'top5_accuracy': top5_correct
}
def _eval_epoch(self, subset: Text, batch_size: int):
"""Evaluates an epoch."""
num_samples = 0.
summed_scalars = None
backbone_params = helpers.get_first(self._experiment_state.backbone_params)
classif_params = helpers.get_first(self._experiment_state.classif_params)
backbone_state = helpers.get_first(self._experiment_state.backbone_state)
split = dataset.Split.from_string(subset)
dataset_iterator = dataset.load(
split,
preprocess_mode=dataset.PreprocessMode.EVAL,
transpose=self._should_transpose_images(),
batch_dims=[batch_size])
for inputs in dataset_iterator:
num_samples += inputs['labels'].shape[0]
scalars = self.eval_batch_jit(
backbone_params,
classif_params,
backbone_state,
inputs,
)
# Accumulate the sum of scalars for each step.
scalars = jax.tree_map(lambda x: jnp.sum(x, axis=0), scalars)
if summed_scalars is None:
summed_scalars = scalars
else:
summed_scalars = jax.tree_map(jnp.add, summed_scalars, scalars)
mean_scalars = jax.tree_map(lambda x: x / num_samples, summed_scalars)
return mean_scalars
| relicv2-main | eval_experiment.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for eval experiment."""
| relicv2-main | utils/__init__.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of LARS Optimizer with optax.
The code in this file is taken from the BYOL code
(https://github.com/deepmind/deepmind-research/tree/master/byol).
"""
from typing import Any, Callable, List, NamedTuple, Optional, Tuple
import jax
import jax.numpy as jnp
import optax
import tree as nest
# A filter function takes a path and a value as input and outputs True for
# variable to apply update and False not to apply the update
FilterFn = Callable[[Tuple[Any], jnp.ndarray], jnp.ndarray]
def exclude_bias_and_norm(path: Tuple[Any], val: jnp.ndarray) -> jnp.ndarray:
"""Filter to exclude biaises and normalizations weights."""
del val
if path[-1] == "b" or "norm" in path[-2]:
return False
return True
def _partial_update(updates: optax.Updates,
new_updates: optax.Updates,
params: optax.Params,
filter_fn: Optional[FilterFn] = None) -> optax.Updates:
"""Returns new_update for params which filter_fn is True else updates."""
if filter_fn is None:
return new_updates
wrapped_filter_fn = lambda x, y: jnp.array(filter_fn(x, y))
params_to_filter = nest.map_structure_with_path(wrapped_filter_fn, params)
def _update_fn(g: jnp.ndarray, t: jnp.ndarray, m: jnp.ndarray) -> jnp.ndarray:
m = m.astype(g.dtype)
return g * (1. - m) + t * m
return jax.tree_map(_update_fn, updates, new_updates, params_to_filter)
class ScaleByLarsState(NamedTuple):
mu: jnp.ndarray
def scale_by_lars(
momentum: float = 0.9,
eta: float = 0.001,
filter_fn: Optional[FilterFn] = None) -> optax.GradientTransformation:
"""Rescales updates according to the LARS algorithm.
Does not include weight decay.
References:
[You et al, 2017](https://arxiv.org/abs/1708.03888)
Args:
momentum: momentum coeficient.
eta: LARS coefficient.
filter_fn: an optional filter function.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(params: optax.Params) -> ScaleByLarsState:
mu = jax.tree_map(jnp.zeros_like, params) # momentum
return ScaleByLarsState(mu=mu)
def update_fn(updates: optax.Updates, state: ScaleByLarsState,
params: optax.Params) -> Tuple[optax.Updates, ScaleByLarsState]:
def lars_adaptation(
update: jnp.ndarray,
param: jnp.ndarray,
) -> jnp.ndarray:
param_norm = jnp.linalg.norm(param)
update_norm = jnp.linalg.norm(update)
return update * jnp.where(
param_norm > 0.,
jnp.where(update_norm > 0,
(eta * param_norm / update_norm), 1.0), 1.0)
adapted_updates = jax.tree_map(lars_adaptation, updates, params)
adapted_updates = _partial_update(updates, adapted_updates, params,
filter_fn)
mu = jax.tree_map(lambda g, t: momentum * g + t, state.mu, adapted_updates)
return mu, ScaleByLarsState(mu=mu)
return optax.GradientTransformation(init_fn, update_fn)
class AddWeightDecayState(NamedTuple):
"""Stateless transformation."""
def add_weight_decay(
weight_decay: float,
filter_fn: Optional[FilterFn] = None) -> optax.GradientTransformation:
"""Adds a weight decay to the update.
Args:
weight_decay: weight_decay coeficient.
filter_fn: an optional filter function.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(_) -> AddWeightDecayState:
return AddWeightDecayState()
def update_fn(
updates: optax.Updates,
state: AddWeightDecayState,
params: optax.Params,
) -> Tuple[optax.Updates, AddWeightDecayState]:
new_updates = jax.tree_map(lambda g, p: g + weight_decay * p, updates,
params)
new_updates = _partial_update(updates, new_updates, params, filter_fn)
return new_updates, state
return optax.GradientTransformation(init_fn, update_fn)
LarsState = List # Type for the lars optimizer
def lars(
learning_rate: float,
weight_decay: float = 0.,
momentum: float = 0.9,
eta: float = 0.001,
weight_decay_filter: Optional[FilterFn] = None,
lars_adaptation_filter: Optional[FilterFn] = None,
) -> optax.GradientTransformation:
"""Creates lars optimizer with weight decay.
References:
[You et al, 2017](https://arxiv.org/abs/1708.03888)
Args:
learning_rate: learning rate coefficient.
weight_decay: weight decay coefficient.
momentum: momentum coefficient.
eta: LARS coefficient.
weight_decay_filter: optional filter function to only apply the weight decay
on a subset of parameters. The filter function takes as input the
parameter path (as a tuple) and its associated update, and return a True
for params to apply the weight decay and False for params to not apply the
weight decay. When weight_decay_filter is set to None, the weight decay is
not applied to the bias, i.e. when the variable name is 'b', and the
weight decay is not applied to nornalization params, i.e. the panultimate
path contains 'norm'.
lars_adaptation_filter: similar to weight decay filter but for lars
adaptation
Returns:
An optax.GradientTransformation, i.e. a (init_fn, update_fn) tuple.
"""
if weight_decay_filter is None:
weight_decay_filter = lambda *_: True
if lars_adaptation_filter is None:
lars_adaptation_filter = lambda *_: True
return optax.chain(
add_weight_decay(
weight_decay=weight_decay, filter_fn=weight_decay_filter),
scale_by_lars(
momentum=momentum, eta=eta, filter_fn=lars_adaptation_filter),
optax.scale(-learning_rate),
)
| relicv2-main | utils/optimizers.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ImageNet dataset with typical pre-processing.
The code in this file is taken from the BYOL code
(https://github.com/deepmind/deepmind-research/tree/master/byol).
"""
import enum
from typing import Generator, Mapping, Optional, Sequence, Text, Tuple
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
Batch = Mapping[Text, np.ndarray]
class Split(enum.Enum):
"""Imagenet dataset split."""
TRAIN = 1
TRAIN_AND_VALID = 2
VALID = 3
TEST = 4
@classmethod
def from_string(cls, name: Text) -> 'Split':
return {
'TRAIN': Split.TRAIN,
'TRAIN_AND_VALID': Split.TRAIN_AND_VALID,
'VALID': Split.VALID,
'VALIDATION': Split.VALID,
'TEST': Split.TEST
}[name.upper()]
@property
def num_examples(self):
return {
Split.TRAIN_AND_VALID: 1281167,
Split.TRAIN: 1271167,
Split.VALID: 10000,
Split.TEST: 50000
}[self]
class PreprocessMode(enum.Enum):
"""Preprocessing modes for the dataset."""
PRETRAIN = 1 # Generates two augmented views (random crop + augmentations).
LINEAR_TRAIN = 2 # Generates a single random crop.
EVAL = 3 # Generates a single center crop.
def normalize_images(images: jnp.ndarray) -> jnp.ndarray:
"""Normalize the image using ImageNet statistics."""
mean_rgb = (0.485, 0.456, 0.406)
stddev_rgb = (0.229, 0.224, 0.225)
normed_images = images - jnp.array(mean_rgb).reshape((1, 1, 1, 3))
normed_images = normed_images / jnp.array(stddev_rgb).reshape((1, 1, 1, 3))
return normed_images
def load(split: Split,
*,
preprocess_mode: PreprocessMode,
batch_dims: Sequence[int],
transpose: bool = False,
allow_caching: bool = False) -> Generator[Batch, None, None]:
"""Loads the given split of the dataset."""
start, end = _shard(split, jax.host_id(), jax.host_count())
total_batch_size = np.prod(batch_dims)
tfds_split = tfds.core.ReadInstruction(
_to_tfds_split(split), from_=start, to=end, unit='abs')
ds = tfds.load(
'imagenet2012:5.*.*',
split=tfds_split,
decoders={'image': tfds.decode.SkipDecoding()})
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = 48
options.experimental_threading.max_intra_op_parallelism = 1
if preprocess_mode is not PreprocessMode.EVAL:
options.experimental_deterministic = False
if jax.host_count() > 1 and allow_caching:
# Only cache if we are reading a subset of the dataset.
ds = ds.cache()
ds = ds.repeat()
ds = ds.shuffle(buffer_size=10 * total_batch_size, seed=0)
else:
if split.num_examples % total_batch_size != 0:
raise ValueError(f'Test/valid must be divisible by {total_batch_size}')
ds = ds.with_options(options)
def preprocess_pretrain(example):
view1 = _preprocess_image(example['image'], mode=preprocess_mode)
view2 = _preprocess_image(example['image'], mode=preprocess_mode)
label = tf.cast(example['label'], tf.int32)
return {'view1': view1, 'view2': view2, 'labels': label}
def preprocess_linear_train(example):
image = _preprocess_image(example['image'], mode=preprocess_mode)
label = tf.cast(example['label'], tf.int32)
return {'images': image, 'labels': label}
def preprocess_eval(example):
image = _preprocess_image(example['image'], mode=preprocess_mode)
label = tf.cast(example['label'], tf.int32)
return {'images': image, 'labels': label}
if preprocess_mode is PreprocessMode.PRETRAIN:
ds = ds.map(
preprocess_pretrain, num_parallel_calls=tf.data.experimental.AUTOTUNE)
elif preprocess_mode is PreprocessMode.LINEAR_TRAIN:
ds = ds.map(
preprocess_linear_train,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
else:
ds = ds.map(
preprocess_eval, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def transpose_fn(batch):
# We use the double-transpose-trick to improve performance for TPUs. Note
# that this (typically) requires a matching HWCN->NHWC transpose in your
# model code. The compiler cannot make this optimization for us since our
# data pipeline and model are compiled separately.
batch = dict(**batch)
if preprocess_mode is PreprocessMode.PRETRAIN:
batch['view1'] = tf.transpose(batch['view1'], (1, 2, 3, 0))
batch['view2'] = tf.transpose(batch['view2'], (1, 2, 3, 0))
else:
batch['images'] = tf.transpose(batch['images'], (1, 2, 3, 0))
return batch
for i, batch_size in enumerate(reversed(batch_dims)):
ds = ds.batch(batch_size)
if i == 0 and transpose:
ds = ds.map(transpose_fn) # NHWC -> HWCN
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
yield from tfds.as_numpy(ds)
def _to_tfds_split(split: Split) -> tfds.Split:
"""Returns the TFDS split appropriately sharded."""
# NOTE: Imagenet did not release labels for the test split used in the
# competition, we consider the VALID split the TEST split and reserve
# 10k images from TRAIN for VALID.
if split in (Split.TRAIN, Split.TRAIN_AND_VALID, Split.VALID):
return tfds.Split.TRAIN
else:
assert split == Split.TEST
return tfds.Split.VALIDATION
def _shard(split: Split, shard_index: int, num_shards: int) -> Tuple[int, int]:
"""Returns [start, end) for the given shard index."""
assert shard_index < num_shards
arange = np.arange(split.num_examples)
shard_range = np.array_split(arange, num_shards)[shard_index]
start, end = shard_range[0], (shard_range[-1] + 1)
if split == Split.TRAIN:
# Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000].
offset = Split.VALID.num_examples
start += offset
end += offset
return start, end
def _preprocess_image(
image_bytes: tf.Tensor,
mode: PreprocessMode,
) -> tf.Tensor:
"""Returns processed and resized images."""
if mode is PreprocessMode.PRETRAIN:
image = _decode_and_random_crop(image_bytes)
# Random horizontal flipping is optionally done in augmentations.preprocess.
elif mode is PreprocessMode.LINEAR_TRAIN:
image = _decode_and_random_crop(image_bytes)
image = tf.image.random_flip_left_right(image)
else:
image = _decode_and_center_crop(image_bytes)
# NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without
# clamping overshoots. This means values returned will be outside the range
# [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]).
assert image.dtype == tf.uint8
image = tf.image.resize(image, [224, 224], tf.image.ResizeMethod.BICUBIC)
image = tf.clip_by_value(image / 255., 0., 1.)
return image
def _decode_and_random_crop(image_bytes: tf.Tensor) -> tf.Tensor:
"""Make a random crop of 224."""
img_size = tf.image.extract_jpeg_shape(image_bytes)
area = tf.cast(img_size[1] * img_size[0], tf.float32)
target_area = tf.random.uniform([], 0.08, 1.0, dtype=tf.float32) * area
log_ratio = (tf.math.log(3 / 4), tf.math.log(4 / 3))
aspect_ratio = tf.math.exp(
tf.random.uniform([], *log_ratio, dtype=tf.float32))
w = tf.cast(tf.round(tf.sqrt(target_area * aspect_ratio)), tf.int32)
h = tf.cast(tf.round(tf.sqrt(target_area / aspect_ratio)), tf.int32)
w = tf.minimum(w, img_size[1])
h = tf.minimum(h, img_size[0])
offset_w = tf.random.uniform((),
minval=0,
maxval=img_size[1] - w + 1,
dtype=tf.int32)
offset_h = tf.random.uniform((),
minval=0,
maxval=img_size[0] - h + 1,
dtype=tf.int32)
crop_window = tf.stack([offset_h, offset_w, h, w])
image = tf.io.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def transpose_images(batch: Batch):
"""Transpose images for TPU training.."""
new_batch = dict(batch) # Avoid mutating in place.
if 'images' in batch:
new_batch['images'] = jnp.transpose(batch['images'], (3, 0, 1, 2))
else:
new_batch['view1'] = jnp.transpose(batch['view1'], (3, 0, 1, 2))
new_batch['view2'] = jnp.transpose(batch['view2'], (3, 0, 1, 2))
return new_batch
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
) -> tf.Tensor:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
padded_center_crop_size = tf.cast(
((224 / (224 + 32)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([
offset_height, offset_width, padded_center_crop_size,
padded_center_crop_size
])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
| relicv2-main | utils/dataset.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Networks used in ReLICv2.
The code in this file is taken from the BYOL code
(https://github.com/deepmind/deepmind-research/tree/master/byol).
"""
from typing import Any, Mapping, Optional, Sequence, Text
import haiku as hk
import jax
import jax.numpy as jnp
class MLP(hk.Module):
"""One hidden layer perceptron, with normalization."""
def __init__(
self,
name: Text,
hidden_size: int,
output_size: int,
bn_config: Mapping[Text, Any],
):
super().__init__(name=name)
self._hidden_size = hidden_size
self._output_size = output_size
self._bn_config = bn_config
def __call__(self, inputs: jnp.ndarray, is_training: bool) -> jnp.ndarray:
out = hk.Linear(output_size=self._hidden_size, with_bias=True)(inputs)
out = hk.BatchNorm(**self._bn_config)(out, is_training=is_training)
out = jax.nn.relu(out)
out = hk.Linear(output_size=self._output_size, with_bias=False)(out)
return out
def check_length(length, value, name):
if len(value) != length:
raise ValueError(f'`{name}` must be of length 4 not {len(value)}')
class ResNetTorso(hk.Module):
"""ResNet model."""
def __init__(
self,
blocks_per_group: Sequence[int],
num_classes: Optional[int] = None,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
bottleneck: bool = True,
channels_per_group: Sequence[int] = (256, 512, 1024, 2048),
use_projection: Sequence[bool] = (True, True, True, True),
width_multiplier: int = 1,
name: Optional[str] = None,
):
"""Constructs a ResNet model.
Args:
blocks_per_group: A sequence of length 4 that indicates the number of
blocks created in each group.
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of three elements, `decay_rate`, `eps`, and
`cross_replica_axis`, to be passed on to the `BatchNorm` layers. By
default the `decay_rate` is `0.9` and `eps` is `1e-5`, and the axis is
`None`.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
False.
bottleneck: Whether the block should bottleneck or not. Defaults to True.
channels_per_group: A sequence of length 4 that indicates the number of
channels used for each block in each group.
use_projection: A sequence of length 4 that indicates whether each
residual block should use projection.
width_multiplier: An integer multiplying the number of channels per group.
name: Name of the module.
"""
super().__init__(name=name)
self.resnet_v2 = resnet_v2
bn_config = dict(bn_config or {})
bn_config.setdefault('decay_rate', 0.9)
bn_config.setdefault('eps', 1e-5)
bn_config.setdefault('create_scale', True)
bn_config.setdefault('create_offset', True)
# Number of blocks in each group for ResNet.
check_length(4, blocks_per_group, 'blocks_per_group')
check_length(4, channels_per_group, 'channels_per_group')
self.initial_conv = hk.Conv2D(
output_channels=64 * width_multiplier,
kernel_shape=7,
stride=2,
with_bias=False,
padding='SAME',
name='initial_conv')
if not self.resnet_v2:
self.initial_batchnorm = hk.BatchNorm(
name='initial_batchnorm', **bn_config)
self.block_groups = []
strides = (1, 2, 2, 2)
for i in range(4):
self.block_groups.append(
hk.nets.ResNet.BlockGroup(
channels=width_multiplier * channels_per_group[i],
num_blocks=blocks_per_group[i],
stride=strides[i],
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=bottleneck,
use_projection=use_projection[i],
name='block_group_%d' % (i)))
if self.resnet_v2:
self.final_batchnorm = hk.BatchNorm(name='final_batchnorm', **bn_config)
self.logits = hk.Linear(num_classes, w_init=jnp.zeros, name='logits')
def __call__(self, inputs, is_training, test_local_stats=False):
out = inputs
out = self.initial_conv(out)
if not self.resnet_v2:
out = self.initial_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = hk.max_pool(
out, window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME')
for block_group in self.block_groups:
out = block_group(out, is_training, test_local_stats)
if self.resnet_v2:
out = self.final_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = jnp.mean(out, axis=[1, 2])
return out
class TinyResNet(ResNetTorso):
"""Tiny resnet for local runs and tests."""
def __init__(self,
num_classes: Optional[int] = None,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
width_multiplier: int = 1,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, `decay_rate` and `eps` to be
passed on to the `BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
False.
width_multiplier: An integer multiplying the number of channels per group.
name: Name of the module.
"""
super().__init__(
blocks_per_group=(1, 1, 1, 1),
channels_per_group=(8, 8, 8, 8),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=False,
width_multiplier=width_multiplier,
name=name)
class ResNet18(ResNetTorso):
"""ResNet18."""
def __init__(self,
num_classes: Optional[int] = None,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
width_multiplier: int = 1,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, `decay_rate` and `eps` to be
passed on to the `BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
False.
width_multiplier: An integer multiplying the number of channels per group.
name: Name of the module.
"""
super().__init__(
blocks_per_group=(2, 2, 2, 2),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=False,
channels_per_group=(64, 128, 256, 512),
width_multiplier=width_multiplier,
name=name)
class ResNet34(ResNetTorso):
"""ResNet34."""
def __init__(self,
num_classes: Optional[int],
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
width_multiplier: int = 1,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, `decay_rate` and `eps` to be
passed on to the `BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
False.
width_multiplier: An integer multiplying the number of channels per group.
name: Name of the module.
"""
super().__init__(
blocks_per_group=(3, 4, 6, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=False,
channels_per_group=(64, 128, 256, 512),
width_multiplier=width_multiplier,
name=name)
class ResNet50(ResNetTorso):
"""ResNet50."""
def __init__(self,
num_classes: Optional[int] = None,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
width_multiplier: int = 1,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, `decay_rate` and `eps` to be
passed on to the `BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
False.
width_multiplier: An integer multiplying the number of channels per group.
name: Name of the module.
"""
super().__init__(
blocks_per_group=(3, 4, 6, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
width_multiplier=width_multiplier,
name=name)
class ResNet101(ResNetTorso):
"""ResNet101."""
def __init__(self,
num_classes: Optional[int],
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
width_multiplier: int = 1,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, `decay_rate` and `eps` to be
passed on to the `BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
False.
width_multiplier: An integer multiplying the number of channels per group.
name: Name of the module.
"""
super().__init__(
blocks_per_group=(3, 4, 23, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
width_multiplier=width_multiplier,
name=name)
class ResNet152(ResNetTorso):
"""ResNet152."""
def __init__(self,
num_classes: Optional[int],
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
width_multiplier: int = 1,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, `decay_rate` and `eps` to be
passed on to the `BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
False.
width_multiplier: An integer multiplying the number of channels per group.
name: Name of the module.
"""
super().__init__(
blocks_per_group=(3, 8, 36, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
width_multiplier=width_multiplier,
name=name)
class ResNet200(ResNetTorso):
"""ResNet200."""
def __init__(self,
num_classes: Optional[int],
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
width_multiplier: int = 1,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, `decay_rate` and `eps` to be
passed on to the `BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
False.
width_multiplier: An integer multiplying the number of channels per group.
name: Name of the module.
"""
super().__init__(
blocks_per_group=(3, 24, 36, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
width_multiplier=width_multiplier,
name=name)
| relicv2-main | utils/networks.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions.
The code in this file is taken from the BYOL code
(https://github.com/deepmind/deepmind-research/tree/master/byol).
"""
from typing import Optional, Text
from absl import logging
import jax
import jax.numpy as jnp
def topk_accuracy(
logits: jnp.ndarray,
labels: jnp.ndarray,
topk: int,
ignore_label_above: Optional[int] = None,
) -> jnp.ndarray:
"""Top-num_codes accuracy."""
assert len(labels.shape) == 1, 'topk expects 1d int labels.'
assert len(logits.shape) == 2, 'topk expects 2d logits.'
if ignore_label_above is not None:
logits = logits[labels < ignore_label_above, :]
labels = labels[labels < ignore_label_above]
prds = jnp.argsort(logits, axis=1)[:, ::-1]
prds = prds[:, :topk]
total = jnp.any(prds == jnp.tile(labels[:, jnp.newaxis], [1, topk]), axis=1)
return total
def softmax_cross_entropy(
logits: jnp.ndarray,
labels: jnp.ndarray,
reduction: Optional[Text] = 'mean',
) -> jnp.ndarray:
"""Computes softmax cross entropy given logits and one-hot class labels.
Args:
logits: Logit output values.
labels: Ground truth one-hot-encoded labels.
reduction: Type of reduction to apply to loss.
Returns:
Loss value. If `reduction` is `none`, this has the same shape as `labels`;
otherwise, it is scalar.
Raises:
ValueError: If the type of `reduction` is unsupported.
"""
loss = -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1)
if reduction == 'sum':
return jnp.sum(loss)
elif reduction == 'mean':
return jnp.mean(loss)
elif reduction == 'none' or reduction is None:
return loss
else:
raise ValueError(f'Incorrect reduction mode {reduction}')
def l2_normalize(
x: jnp.ndarray,
axis: Optional[int] = None,
epsilon: float = 1e-12,
) -> jnp.ndarray:
"""l2 normalize a tensor on an axis with numerical stability."""
square_sum = jnp.sum(jnp.square(x), axis=axis, keepdims=True)
x_inv_norm = jax.lax.rsqrt(jnp.maximum(square_sum, epsilon))
return x * x_inv_norm
def l2_weight_regularizer(params):
"""Helper to do lasso on weights.
Args:
params: the entire param set.
Returns:
Scalar of the l2 norm of the weights.
"""
l2_norm = 0.
for mod_name, mod_params in params.items():
if 'norm' not in mod_name:
for param_k, param_v in mod_params.items():
if param_k != 'b' not in param_k: # Filter out biases
l2_norm += jnp.sum(jnp.square(param_v))
else:
logging.warning('Excluding %s/%s from optimizer weight decay!',
mod_name, param_k)
else:
logging.warning('Excluding %s from optimizer weight decay!', mod_name)
return 0.5 * l2_norm
def bcast_local_devices(value):
"""Broadcasts an object to all local devices."""
devices = jax.local_devices()
def _replicate(x):
"""Replicate an object on each device."""
x = jnp.array(x)
return jax.device_put_sharded(len(devices) * [x], devices)
return jax.tree_util.tree_map(_replicate, value)
def get_first(xs):
"""Gets values from the first device."""
return jax.tree_map(lambda x: x[0], xs)
| relicv2-main | utils/helpers.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data preprocessing and augmentation.
The code in this file is taken from the BYOL code
(https://github.com/deepmind/deepmind-research/tree/master/byol).
"""
import functools
from typing import Any, Mapping, Text
import jax
import jax.numpy as jnp
# typing
JaxBatch = Mapping[Text, jnp.ndarray]
ConfigDict = Mapping[Text, Any]
augment_config = dict(
view1=dict(
random_flip=True, # Random left/right flip
color_transform=dict(
apply_prob=1.0,
# Range of jittering
brightness=0.4,
contrast=0.4,
saturation=0.2,
hue=0.1,
# Probability of applying color jittering
color_jitter_prob=0.8,
# Probability of converting to grayscale
to_grayscale_prob=0.2,
# Shuffle the order of color transforms
shuffle=True),
gaussian_blur=dict(
apply_prob=1.0,
# Kernel size ~ image_size / blur_divider
blur_divider=10.,
# Kernel distribution
sigma_min=0.1,
sigma_max=2.0),
solarize=dict(apply_prob=0.0, threshold=0.5),
),
view2=dict(
random_flip=True,
color_transform=dict(
apply_prob=1.0,
brightness=0.4,
contrast=0.4,
saturation=0.2,
hue=0.1,
color_jitter_prob=0.8,
to_grayscale_prob=0.2,
shuffle=True),
gaussian_blur=dict(
apply_prob=0.1, blur_divider=10., sigma_min=0.1, sigma_max=2.0),
solarize=dict(apply_prob=0.2, threshold=0.5),
))
def postprocess(inputs: JaxBatch, rng: jnp.ndarray):
"""Apply the image augmentations to crops in inputs (view1 and view2)."""
def _postprocess_image(
images: jnp.ndarray,
rng: jnp.ndarray,
presets: ConfigDict,
) -> JaxBatch:
"""Applies augmentations in post-processing.
Args:
images: an NHWC tensor (with C=3), with float values in [0, 1].
rng: a single PRNGKey.
presets: a dict of presets for the augmentations.
Returns:
A batch of augmented images with shape NHWC, with keys view1, view2
and labels.
"""
flip_rng, color_rng, blur_rng, solarize_rng = jax.random.split(rng, 4)
out = images
if presets['random_flip']:
out = random_flip(out, flip_rng)
if presets['color_transform']['apply_prob'] > 0:
out = color_transform(out, color_rng, **presets['color_transform'])
if presets['gaussian_blur']['apply_prob'] > 0:
out = gaussian_blur(out, blur_rng, **presets['gaussian_blur'])
if presets['solarize']['apply_prob'] > 0:
out = solarize(out, solarize_rng, **presets['solarize'])
out = jnp.clip(out, 0., 1.)
return jax.lax.stop_gradient(out)
rng1, rng2 = jax.random.split(rng, num=2)
view1 = _postprocess_image(inputs['view1'], rng1, augment_config['view1'])
view2 = _postprocess_image(inputs['view2'], rng2, augment_config['view2'])
return dict(view1=view1, view2=view2, labels=inputs['labels'])
def _maybe_apply(apply_fn, inputs, rng, apply_prob):
should_apply = jax.random.uniform(rng, shape=()) <= apply_prob
return jax.lax.cond(should_apply, inputs, apply_fn, inputs, lambda x: x)
def _depthwise_conv2d(inputs, kernel, strides, padding):
"""Computes a depthwise conv2d in Jax.
Args:
inputs: an NHWC tensor with N=1.
kernel: a [H", W", 1, C] tensor.
strides: a 2d tensor.
padding: "SAME" or "VALID".
Returns:
The depthwise convolution of inputs with kernel, as [H, W, C].
"""
return jax.lax.conv_general_dilated(
inputs,
kernel,
strides,
padding,
feature_group_count=inputs.shape[-1],
dimension_numbers=('NHWC', 'HWIO', 'NHWC'))
def _gaussian_blur_single_image(image, kernel_size, padding, sigma):
"""Applies gaussian blur to a single image, given as NHWC with N=1."""
radius = int(kernel_size / 2)
kernel_size_ = 2 * radius + 1
x = jnp.arange(-radius, radius + 1).astype(jnp.float32)
blur_filter = jnp.exp(-x**2 / (2. * sigma**2))
blur_filter = blur_filter / jnp.sum(blur_filter)
blur_v = jnp.reshape(blur_filter, [kernel_size_, 1, 1, 1])
blur_h = jnp.reshape(blur_filter, [1, kernel_size_, 1, 1])
num_channels = image.shape[-1]
blur_h = jnp.tile(blur_h, [1, 1, 1, num_channels])
blur_v = jnp.tile(blur_v, [1, 1, 1, num_channels])
expand_batch_dim = len(image.shape) == 3
if expand_batch_dim:
image = image[jnp.newaxis, ...]
blurred = _depthwise_conv2d(image, blur_h, strides=[1, 1], padding=padding)
blurred = _depthwise_conv2d(blurred, blur_v, strides=[1, 1], padding=padding)
blurred = jnp.squeeze(blurred, axis=0)
return blurred
def _random_gaussian_blur(image, rng, kernel_size, padding, sigma_min,
sigma_max, apply_prob):
"""Applies a random gaussian blur."""
apply_rng, transform_rng = jax.random.split(rng)
def _apply(image):
sigma_rng, = jax.random.split(transform_rng, 1)
sigma = jax.random.uniform(
sigma_rng,
shape=(),
minval=sigma_min,
maxval=sigma_max,
dtype=jnp.float32)
return _gaussian_blur_single_image(image, kernel_size, padding, sigma)
return _maybe_apply(_apply, image, apply_rng, apply_prob)
def rgb_to_hsv(r, g, b):
"""Converts R, G, B values to H, S, V values.
Reference TF implementation:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/adjust_saturation_op.cc
Only input values between 0 and 1 are guaranteed to work properly, but this
function complies with the TF implementation outside of this range.
Args:
r: A tensor representing the red color component as floats.
g: A tensor representing the green color component as floats.
b: A tensor representing the blue color component as floats.
Returns:
H, S, V values, each as tensors of shape [...] (same as the input without
the last dimension).
"""
vv = jnp.maximum(jnp.maximum(r, g), b)
range_ = vv - jnp.minimum(jnp.minimum(r, g), b)
sat = jnp.where(vv > 0, range_ / vv, 0.)
norm = jnp.where(range_ != 0, 1. / (6. * range_), 1e9)
hr = norm * (g - b)
hg = norm * (b - r) + 2. / 6.
hb = norm * (r - g) + 4. / 6.
hue = jnp.where(r == vv, hr, jnp.where(g == vv, hg, hb))
hue = hue * (range_ > 0)
hue = hue + (hue < 0)
return hue, sat, vv
def hsv_to_rgb(h, s, v):
"""Converts H, S, V values to an R, G, B tuple.
Reference TF implementation:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/adjust_saturation_op.cc
Only input values between 0 and 1 are guaranteed to work properly, but this
function complies with the TF implementation outside of this range.
Args:
h: A float tensor of arbitrary shape for the hue (0-1 values).
s: A float tensor of the same shape for the saturation (0-1 values).
v: A float tensor of the same shape for the value channel (0-1 values).
Returns:
An (r, g, b) tuple, each with the same dimension as the inputs.
"""
c = s * v
m = v - c
dh = (h % 1.) * 6.
fmodu = dh % 2.
x = c * (1 - jnp.abs(fmodu - 1))
hcat = jnp.floor(dh).astype(jnp.int32)
rr = jnp.where(
(hcat == 0) | (hcat == 5), c, jnp.where(
(hcat == 1) | (hcat == 4), x, 0)) + m
gg = jnp.where(
(hcat == 1) | (hcat == 2), c, jnp.where(
(hcat == 0) | (hcat == 3), x, 0)) + m
bb = jnp.where(
(hcat == 3) | (hcat == 4), c, jnp.where(
(hcat == 2) | (hcat == 5), x, 0)) + m
return rr, gg, bb
def adjust_brightness(rgb_tuple, delta):
return jax.tree_map(lambda x: x + delta, rgb_tuple)
def adjust_contrast(image, factor):
def _adjust_contrast_channel(channel):
mean = jnp.mean(channel, axis=(-2, -1), keepdims=True)
return factor * (channel - mean) + mean
return jax.tree_map(_adjust_contrast_channel, image)
def adjust_saturation(h, s, v, factor):
return h, jnp.clip(s * factor, 0., 1.), v
def adjust_hue(h, s, v, delta):
# Note: this method exactly matches TF"s adjust_hue (combined with the hsv/rgb
# conversions) when running on GPU. When running on CPU, the results will be
# different if all RGB values for a pixel are outside of the [0, 1] range.
return (h + delta) % 1.0, s, v
def _random_brightness(rgb_tuple, rng, max_delta):
delta = jax.random.uniform(rng, shape=(), minval=-max_delta, maxval=max_delta)
return adjust_brightness(rgb_tuple, delta)
def _random_contrast(rgb_tuple, rng, max_delta):
factor = jax.random.uniform(
rng, shape=(), minval=1 - max_delta, maxval=1 + max_delta)
return adjust_contrast(rgb_tuple, factor)
def _random_saturation(rgb_tuple, rng, max_delta):
h, s, v = rgb_to_hsv(*rgb_tuple)
factor = jax.random.uniform(
rng, shape=(), minval=1 - max_delta, maxval=1 + max_delta)
return hsv_to_rgb(*adjust_saturation(h, s, v, factor))
def _random_hue(rgb_tuple, rng, max_delta):
h, s, v = rgb_to_hsv(*rgb_tuple)
delta = jax.random.uniform(rng, shape=(), minval=-max_delta, maxval=max_delta)
return hsv_to_rgb(*adjust_hue(h, s, v, delta))
def _to_grayscale(image):
rgb_weights = jnp.array([0.2989, 0.5870, 0.1140])
grayscale = jnp.tensordot(image, rgb_weights, axes=(-1, -1))[..., jnp.newaxis]
return jnp.tile(grayscale, (1, 1, 3)) # Back to 3 channels.
def _color_transform_single_image(image, rng, brightness, contrast, saturation,
hue, to_grayscale_prob, color_jitter_prob,
apply_prob, shuffle):
"""Applies color jittering to a single image."""
apply_rng, transform_rng = jax.random.split(rng)
perm_rng, b_rng, c_rng, s_rng, h_rng, cj_rng, gs_rng = jax.random.split(
transform_rng, 7)
# Whether the transform should be applied at all.
should_apply = jax.random.uniform(apply_rng, shape=()) <= apply_prob
# Whether to apply grayscale transform.
should_apply_gs = jax.random.uniform(gs_rng, shape=()) <= to_grayscale_prob
# Whether to apply color jittering.
should_apply_color = jax.random.uniform(cj_rng, shape=()) <= color_jitter_prob
# Decorator to conditionally apply fn based on an index.
def _make_cond(fn, idx):
def identity_fn(x, unused_rng, unused_param):
return x
def cond_fn(args, i):
def clip(args):
return jax.tree_map(lambda arg: jnp.clip(arg, 0., 1.), args)
out = jax.lax.cond(should_apply & should_apply_color & (i == idx), args,
lambda a: clip(fn(*a)), args,
lambda a: identity_fn(*a))
return jax.lax.stop_gradient(out)
return cond_fn
random_brightness_cond = _make_cond(_random_brightness, idx=0)
random_contrast_cond = _make_cond(_random_contrast, idx=1)
random_saturation_cond = _make_cond(_random_saturation, idx=2)
random_hue_cond = _make_cond(_random_hue, idx=3)
def _color_jitter(x):
rgb_tuple = tuple(jax.tree_map(jnp.squeeze, jnp.split(x, 3, axis=-1)))
if shuffle:
order = jax.random.permutation(perm_rng, jnp.arange(4, dtype=jnp.int32))
else:
order = range(4)
for idx in order:
if brightness > 0:
rgb_tuple = random_brightness_cond((rgb_tuple, b_rng, brightness), idx)
if contrast > 0:
rgb_tuple = random_contrast_cond((rgb_tuple, c_rng, contrast), idx)
if saturation > 0:
rgb_tuple = random_saturation_cond((rgb_tuple, s_rng, saturation), idx)
if hue > 0:
rgb_tuple = random_hue_cond((rgb_tuple, h_rng, hue), idx)
return jnp.stack(rgb_tuple, axis=-1)
out_apply = _color_jitter(image)
out_apply = jax.lax.cond(should_apply & should_apply_gs, out_apply,
_to_grayscale, out_apply, lambda x: x)
return jnp.clip(out_apply, 0., 1.)
def _random_flip_single_image(image, rng):
_, flip_rng = jax.random.split(rng)
should_flip_lr = jax.random.uniform(flip_rng, shape=()) <= 0.5
image = jax.lax.cond(should_flip_lr, image, jnp.fliplr, image, lambda x: x)
return image
def random_flip(images, rng):
rngs = jax.random.split(rng, images.shape[0])
return jax.vmap(_random_flip_single_image)(images, rngs)
def color_transform(images,
rng,
brightness=0.8,
contrast=0.8,
saturation=0.8,
hue=0.2,
color_jitter_prob=0.8,
to_grayscale_prob=0.2,
apply_prob=1.0,
shuffle=True):
"""Applies color jittering and/or grayscaling to a batch of images.
Args:
images: an NHWC tensor, with C=3.
rng: a single PRNGKey.
brightness: the range of jitter on brightness.
contrast: the range of jitter on contrast.
saturation: the range of jitter on saturation.
hue: the range of jitter on hue.
color_jitter_prob: the probability of applying color jittering.
to_grayscale_prob: the probability of converting the image to grayscale.
apply_prob: the probability of applying the transform to a batch element.
shuffle: whether to apply the transforms in a random order.
Returns:
A NHWC tensor of the transformed images.
"""
rngs = jax.random.split(rng, images.shape[0])
jitter_fn = functools.partial(
_color_transform_single_image,
brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue,
color_jitter_prob=color_jitter_prob,
to_grayscale_prob=to_grayscale_prob,
apply_prob=apply_prob,
shuffle=shuffle)
return jax.vmap(jitter_fn)(images, rngs)
def gaussian_blur(images,
rng,
blur_divider=10.,
sigma_min=0.1,
sigma_max=2.0,
apply_prob=1.0):
"""Applies gaussian blur to a batch of images.
Args:
images: an NHWC tensor, with C=3.
rng: a single PRNGKey.
blur_divider: the blurring kernel will have size H / blur_divider.
sigma_min: the minimum value for sigma in the blurring kernel.
sigma_max: the maximum value for sigma in the blurring kernel.
apply_prob: the probability of applying the transform to a batch element.
Returns:
A NHWC tensor of the blurred images.
"""
rngs = jax.random.split(rng, images.shape[0])
kernel_size = images.shape[1] / blur_divider
blur_fn = functools.partial(
_random_gaussian_blur,
kernel_size=kernel_size,
padding='SAME',
sigma_min=sigma_min,
sigma_max=sigma_max,
apply_prob=apply_prob)
return jax.vmap(blur_fn)(images, rngs)
def _solarize_single_image(image, rng, threshold, apply_prob):
def _apply(image):
return jnp.where(image < threshold, image, 1. - image)
return _maybe_apply(_apply, image, rng, apply_prob)
def solarize(images, rng, threshold=0.5, apply_prob=1.0):
"""Applies solarization.
Args:
images: an NHWC tensor (with C=3).
rng: a single PRNGKey.
threshold: the solarization threshold.
apply_prob: the probability of applying the transform to a batch element.
Returns:
A NHWC tensor of the transformed images.
"""
rngs = jax.random.split(rng, images.shape[0])
solarize_fn = functools.partial(
_solarize_single_image, threshold=threshold, apply_prob=apply_prob)
return jax.vmap(solarize_fn)(images, rngs)
| relicv2-main | utils/augmentations.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Learning rate schedules.
The code in this file is taken from the BYOL code
(https://github.com/deepmind/deepmind-research/tree/master/byol).
"""
import jax.numpy as jnp
def target_ema(global_step: jnp.ndarray, base_ema: float,
max_steps: int) -> jnp.ndarray:
decay = _cosine_decay(global_step, max_steps, 1.)
return 1. - (1. - base_ema) * decay
def learning_schedule(global_step: jnp.ndarray, batch_size: int,
base_learning_rate: float, total_steps: int,
warmup_steps: int) -> float:
"""Cosine learning rate scheduler."""
# Compute LR & Scaled LR
scaled_lr = base_learning_rate * batch_size / 256.
learning_rate = (
global_step.astype(jnp.float32) / int(warmup_steps) *
scaled_lr if warmup_steps > 0 else scaled_lr)
# Cosine schedule after warmup.
return jnp.where(
global_step < warmup_steps, learning_rate,
_cosine_decay(global_step - warmup_steps, total_steps - warmup_steps,
scaled_lr))
def _cosine_decay(global_step: jnp.ndarray, max_steps: int,
initial_value: float) -> jnp.ndarray:
"""Simple implementation of cosine decay from TF1."""
global_step = jnp.minimum(global_step, max_steps)
cosine_decay_value = 0.5 * (1 + jnp.cos(jnp.pi * global_step / max_steps))
decayed_learning_rate = initial_value * cosine_decay_value
return decayed_learning_rate
| relicv2-main | utils/schedules.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Checkpoint saving and restoring utilities.
The code in this file is taken from the BYOL code
(https://github.com/deepmind/deepmind-research/tree/master/byol).
"""
import os
import time
from typing import Mapping, Text, Tuple, Union
from absl import logging
import dill
import jax
import jax.numpy as jnp
from relicv2.utils import helpers
class Checkpointer:
"""A checkpoint saving and loading class."""
def __init__(self, use_checkpointing: bool, checkpoint_dir: Text,
save_checkpoint_interval: int, filename: Text):
if (not use_checkpointing or checkpoint_dir is None or
save_checkpoint_interval <= 0):
self._checkpoint_enabled = False
return
self._checkpoint_enabled = True
self._checkpoint_dir = checkpoint_dir
os.makedirs(self._checkpoint_dir, exist_ok=True)
self._filename = filename
self._checkpoint_path = os.path.join(self._checkpoint_dir, filename)
self._last_checkpoint_time = 0
self._checkpoint_every = save_checkpoint_interval
def maybe_save_checkpoint(self, experiment_state: Mapping[Text, jnp.ndarray],
step: int, rng: jnp.ndarray, is_final: bool):
"""Saves a checkpoint if enough time has passed since the previous one."""
current_time = time.time()
if (not self._checkpoint_enabled or
jax.host_id() != 0 or # Only checkpoint the first worker.
(not is_final and
current_time - self._last_checkpoint_time < self._checkpoint_every)):
return
checkpoint_data = dict(
experiment_state=jax.tree_map(lambda x: jax.device_get(x[0]),
experiment_state),
step=step,
rng=rng)
with open(self._checkpoint_path + '_tmp', 'wb') as checkpoint_file:
dill.dump(checkpoint_data, checkpoint_file, protocol=2)
try:
os.rename(self._checkpoint_path, self._checkpoint_path + '_old')
remove_old = True
except FileNotFoundError:
remove_old = False # No previous checkpoint to remove
os.rename(self._checkpoint_path + '_tmp', self._checkpoint_path)
if remove_old:
os.remove(self._checkpoint_path + '_old')
self._last_checkpoint_time = current_time
def maybe_load_checkpoint(
self) -> Union[Tuple[Mapping[Text, jnp.ndarray], int, jnp.ndarray], None]:
"""Loads a checkpoint if any is found."""
checkpoint_data = load_checkpoint(self._checkpoint_path)
if checkpoint_data is None:
logging.info('No existing checkpoint found at %s', self._checkpoint_path)
return None
step = checkpoint_data['step']
rng = checkpoint_data['rng']
experiment_state = jax.tree_map(helpers.bcast_local_devices,
checkpoint_data['experiment_state'])
del checkpoint_data
return experiment_state, step, rng
def load_checkpoint(checkpoint_path):
"""Function for loading pre-trained encoder checkpoint."""
logging.info('Loading checkpoint from %s', checkpoint_path)
try:
with open(checkpoint_path, 'rb') as checkpoint_file:
checkpoint_data = dill.load(checkpoint_file)
logging.info('Loading checkpoint from %s, saved at step %d',
checkpoint_path, checkpoint_data['step'])
return checkpoint_data
except FileNotFoundError:
return None
| relicv2-main | utils/checkpointing.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Eval experiment configuration."""
| relicv2-main | configs/__init__.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config file for evaluation experiment.
The code in this file is adapted from the BYOL code
(https://github.com/deepmind/deepmind-research/tree/master/byol).
"""
from typing import Text
from relicv2.utils import dataset
def get_config(checkpoint_to_evaluate: Text, batch_size: int, num_epochs: int):
"""Return config object for training."""
train_images_per_epoch = dataset.Split.TRAIN_AND_VALID.num_examples
config = dict(
random_seed=0,
enable_double_transpose=True,
max_steps=num_epochs * train_images_per_epoch // batch_size,
num_classes=1000,
batch_size=batch_size,
checkpoint_to_evaluate=checkpoint_to_evaluate,
# If True, allows training without loading a checkpoint.
allow_train_from_scratch=False,
# Whether the backbone should be frozen (linear evaluation) or
# trainable (fine-tuning).
freeze_backbone=True,
optimizer_config=dict(
momentum=0.9,
nesterov=True,
),
lr_schedule_config=dict(
base_learning_rate=0.3,
warmup_steps=0,
),
network_config=dict( # Should match the evaluated checkpoint
encoder_class='ResNet50', # Should match a class in utils/networks.
encoder_config=dict(resnet_v2=False, width_multiplier=1),
bn_decay_rate=0.9,
),
evaluation_config=dict(
subset='test',
batch_size=100,
),
checkpointing_config=dict(
use_checkpointing=True,
checkpoint_dir='/tmp/relicv2',
save_checkpoint_interval=300,
filename='linear-eval.pkl'),
)
return config
| relicv2-main | configs/eval.py |
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install script for setuptools."""
import os
import subprocess
import sys
from setuptools import Extension
from setuptools import find_packages
from setuptools import setup
from setuptools.command.build_ext import build_ext
class CMakeExtension(Extension):
def __init__(self, name, cmake_lists_dir=".", **kwargs):
Extension.__init__(self, name, sources=[], **kwargs)
self.cmake_lists_dir = os.path.abspath(cmake_lists_dir)
class CMakeBuildExt(build_ext):
"""Build extension handling building C/C++ files with CMake."""
def run(self):
try:
subprocess.check_output(["cmake", "--version"])
except OSError:
raise RuntimeError("Cannot find CMake executable")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
self.configure_cmake(ext)
self.build_cmake(ext)
def configure_cmake(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cfg = "Debug" if self.debug else "Release"
configure_cmd = ["cmake", ext.cmake_lists_dir]
configure_cmd += [
"-DCMAKE_BUILD_TYPE={}".format(cfg),
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}/spiral/environments".format(
cfg.upper(), extdir),
"-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY_{}={}".format(
cfg.upper(), self.build_temp),
"-DPYTHON_EXECUTABLE:FILEPATH={}".format(sys.executable),
]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(configure_cmd, cwd=self.build_temp)
def build_cmake(self, ext):
cfg = "Debug" if self.debug else "Release"
subprocess.check_call(["cmake", "--build", ".", "--config", cfg],
cwd=self.build_temp)
spiral_extension = CMakeExtension("spiral")
setup(
name="spiral",
version="1.0",
author="DeepMind",
license="Apache License, Version 2.0",
packages=find_packages(include=["spiral*"]),
python_requires=">=3.6",
setup_requires=[],
install_requires=[
"tensorflow>=1.14,<2",
"tensorflow-hub",
"dm-sonnet>=1.35,<2",
"dm-env",
"six",
"scipy",
"numpy",
],
ext_modules=[spiral_extension],
cmdclass={
"build_ext": CMakeBuildExt,
},
)
| spiral-master | setup.py |
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| spiral-master | spiral/__init__.py |
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LibMyPaint Reinforcement Learning environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-import-not-at-top
import collections
import copy
import os
import dm_env as environment
from dm_env import specs
import enum
import numpy as np
from six.moves import xrange
import tensorflow as tf
from spiral.environments import utils
from spiral.environments import pylibmypaint
nest = tf.contrib.framework.nest
class BrushSettings(enum.IntEnum):
"""Enumeration of brush settings."""
(MYPAINT_BRUSH_SETTING_OPAQUE,
MYPAINT_BRUSH_SETTING_OPAQUE_MULTIPLY,
MYPAINT_BRUSH_SETTING_OPAQUE_LINEARIZE,
MYPAINT_BRUSH_SETTING_RADIUS_LOGARITHMIC,
MYPAINT_BRUSH_SETTING_HARDNESS,
MYPAINT_BRUSH_SETTING_ANTI_ALIASING,
MYPAINT_BRUSH_SETTING_DABS_PER_BASIC_RADIUS,
MYPAINT_BRUSH_SETTING_DABS_PER_ACTUAL_RADIUS,
MYPAINT_BRUSH_SETTING_DABS_PER_SECOND,
MYPAINT_BRUSH_SETTING_RADIUS_BY_RANDOM,
MYPAINT_BRUSH_SETTING_SPEED1_SLOWNESS,
MYPAINT_BRUSH_SETTING_SPEED2_SLOWNESS,
MYPAINT_BRUSH_SETTING_SPEED1_GAMMA,
MYPAINT_BRUSH_SETTING_SPEED2_GAMMA,
MYPAINT_BRUSH_SETTING_OFFSET_BY_RANDOM,
MYPAINT_BRUSH_SETTING_OFFSET_BY_SPEED,
MYPAINT_BRUSH_SETTING_OFFSET_BY_SPEED_SLOWNESS,
MYPAINT_BRUSH_SETTING_SLOW_TRACKING,
MYPAINT_BRUSH_SETTING_SLOW_TRACKING_PER_DAB,
MYPAINT_BRUSH_SETTING_TRACKING_NOISE,
MYPAINT_BRUSH_SETTING_COLOR_H,
MYPAINT_BRUSH_SETTING_COLOR_S,
MYPAINT_BRUSH_SETTING_COLOR_V,
MYPAINT_BRUSH_SETTING_RESTORE_COLOR,
MYPAINT_BRUSH_SETTING_CHANGE_COLOR_H,
MYPAINT_BRUSH_SETTING_CHANGE_COLOR_L,
MYPAINT_BRUSH_SETTING_CHANGE_COLOR_HSL_S,
MYPAINT_BRUSH_SETTING_CHANGE_COLOR_V,
MYPAINT_BRUSH_SETTING_CHANGE_COLOR_HSV_S,
MYPAINT_BRUSH_SETTING_SMUDGE,
MYPAINT_BRUSH_SETTING_SMUDGE_LENGTH,
MYPAINT_BRUSH_SETTING_SMUDGE_RADIUS_LOG,
MYPAINT_BRUSH_SETTING_ERASER,
MYPAINT_BRUSH_SETTING_STROKE_THRESHOLD,
MYPAINT_BRUSH_SETTING_STROKE_DURATION_LOGARITHMIC,
MYPAINT_BRUSH_SETTING_STROKE_HOLDTIME,
MYPAINT_BRUSH_SETTING_CUSTOM_INPUT,
MYPAINT_BRUSH_SETTING_CUSTOM_INPUT_SLOWNESS,
MYPAINT_BRUSH_SETTING_ELLIPTICAL_DAB_RATIO,
MYPAINT_BRUSH_SETTING_ELLIPTICAL_DAB_ANGLE,
MYPAINT_BRUSH_SETTING_DIRECTION_FILTER,
MYPAINT_BRUSH_SETTING_LOCK_ALPHA,
MYPAINT_BRUSH_SETTING_COLORIZE,
MYPAINT_BRUSH_SETTING_SNAP_TO_PIXEL,
MYPAINT_BRUSH_SETTING_PRESSURE_GAIN_LOG,
MYPAINT_BRUSH_SETTINGS_COUNT) = range(46)
def _fix15_to_rgba(buf):
"""Converts buffer from a 15-bit fixed-point representation into uint8 RGBA.
Taken verbatim from the C code for libmypaint.
Args:
buf: 15-bit fixed-point buffer represented in `uint16`.
Returns:
A `uint8` buffer with RGBA channels.
"""
rgb, alpha = np.split(buf, [3], axis=2)
rgb = rgb.astype(np.uint32)
mask = alpha[..., 0] == 0
rgb[mask] = 0
rgb[~mask] = ((rgb[~mask] << 15) + alpha[~mask] // 2) // alpha[~mask]
rgba = np.concatenate((rgb, alpha), axis=2)
rgba = (255 * rgba + (1 << 15) // 2) // (1 << 15)
return rgba.astype(np.uint8)
class LibMyPaint(environment.Environment):
"""A painting environment wrapping libmypaint."""
ACTION_NAMES = ["control", "end", "flag", "pressure", "size",
"red", "green", "blue"]
SPATIAL_ACTIONS = ["control", "end"]
COLOR_ACTIONS = ["red", "green", "blue"]
BRUSH_APPEARANCE_PARAMS = ["pressure", "log_size",
"hue", "saturation", "value"]
ACTION_MASKS = {
"paint": collections.OrderedDict([
("control", 1.0),
("end", 1.0),
("flag", 1.0),
("pressure", 1.0),
("size", 1.0),
("red", 1.0),
("green", 1.0),
("blue", 1.0)]),
"move": collections.OrderedDict([
("control", 0.0),
("end", 1.0),
("flag", 1.0),
("pressure", 0.0),
("size", 0.0),
("red", 0.0),
("green", 0.0),
("blue", 0.0)]),
}
STROKES_PER_STEP = 50
DTIME = 0.1
P_VALUES = np.linspace(0.1, 1.0, 10)
R_VALUES = np.linspace(0.0, 1.0, 20)
G_VALUES = np.linspace(0.0, 1.0, 20)
B_VALUES = np.linspace(0.0, 1.0, 20)
def __init__(self,
episode_length,
canvas_width,
grid_width,
brush_type,
brush_sizes,
use_color,
use_pressure=True,
use_alpha=False,
background="white",
rewards=None,
discount=1.,
brushes_basedir=""):
self._name = "libmypaint"
if brush_sizes is None:
brush_sizes = [1, 2, 3]
self._canvas_width = canvas_width
self._grid_width = grid_width
self._grid_size = grid_width * grid_width
self._use_color = use_color
self._use_alpha = use_alpha
if not self._use_color:
self._output_channels = 1
elif not self._use_alpha:
self._output_channels = 3
else:
self._output_channels = 4
self._use_pressure = use_pressure
assert np.all(np.array(brush_sizes) > 0.)
self._log_brush_sizes = [np.log(float(i)) for i in brush_sizes]
self._rewards = rewards
# Build action specification and action masks.
self._action_spec = collections.OrderedDict([
("control", specs.DiscreteArray(self._grid_size)),
("end", specs.DiscreteArray(self._grid_size)),
("flag", specs.DiscreteArray(2)),
("pressure", specs.DiscreteArray(len(self.P_VALUES))),
("size", specs.DiscreteArray(len(self._log_brush_sizes))),
("red", specs.DiscreteArray(len(self.R_VALUES))),
("green", specs.DiscreteArray(len(self.G_VALUES))),
("blue", specs.DiscreteArray(len(self.B_VALUES)))])
self._action_masks = copy.deepcopy(self.ACTION_MASKS)
def remove_action_mask(name):
for k in self._action_masks.keys():
del self._action_masks[k][name]
if not self._use_pressure:
del self._action_spec["pressure"]
remove_action_mask("pressure")
if len(self._log_brush_sizes) > 1:
self._use_size = True
else:
del self._action_spec["size"]
remove_action_mask("size")
self._use_size = False
if not self._use_color:
for k in self.COLOR_ACTIONS:
del self._action_spec[k]
remove_action_mask(k)
# Setup the painting surface.
if background == "white":
background = pylibmypaint.SurfaceWrapper.Background.kWhite
elif background == "transparent":
background = pylibmypaint.SurfaceWrapper.Background.kBlack
else:
raise ValueError(
"Invalid background type: {}".format(background))
self._surface = pylibmypaint.SurfaceWrapper(
self._canvas_width, self._canvas_width, background)
# Setup the brush.
self._brush = pylibmypaint.BrushWrapper()
self._brush.SetSurface(self._surface)
self._brush.LoadFromFile(
os.path.join(brushes_basedir, "brushes/{}.myb".format(brush_type)))
self._episode_step = 0
self._episode_length = episode_length
self._prev_step_type = None
self._discount = discount
@property
def name(self):
"""Gets the name of the environment."""
return self._name
@property
def grid_width(self):
return self._grid_width
def _get_canvas(self):
buf = self._surface.BufferAsNumpy()
buf = buf.transpose((0, 2, 1, 3, 4))
buf = buf.reshape((self._canvas_width, self._canvas_width, 4))
canvas = np.single(_fix15_to_rgba(buf)) / 255.0
return canvas
def observation(self):
canvas = self._get_canvas()
if not self._use_color:
canvas = canvas[..., 0:1]
elif not self._use_alpha:
canvas = canvas[..., 0:3]
episode_step = np.array(self._episode_step, dtype=np.int32)
episode_length = np.array(self._episode_length, dtype=np.int32)
return collections.OrderedDict([
("canvas", canvas),
("episode_step", episode_step),
("episode_length", episode_length),
("action_mask", self._action_mask)])
def _update_libmypaint_brush(self, **kwargs):
if "log_size" in kwargs:
self._brush.SetBaseValue(
BrushSettings.MYPAINT_BRUSH_SETTING_RADIUS_LOGARITHMIC,
kwargs["log_size"])
hsv_keys = ["hue", "saturation", "value"]
if any(k in kwargs for k in hsv_keys):
assert all(k in kwargs for k in hsv_keys)
self._brush.SetBaseValue(
BrushSettings.MYPAINT_BRUSH_SETTING_COLOR_H, kwargs["hue"])
self._brush.SetBaseValue(
BrushSettings.MYPAINT_BRUSH_SETTING_COLOR_S, kwargs["saturation"])
self._brush.SetBaseValue(
BrushSettings.MYPAINT_BRUSH_SETTING_COLOR_V, kwargs["value"])
def _update_brush_params(self, **kwargs):
rgb_keys = ["red", "green", "blue"]
if any(k in kwargs for k in rgb_keys):
assert all(k in kwargs for k in rgb_keys)
red, green, blue = [kwargs[k] for k in rgb_keys]
for k in rgb_keys:
del kwargs[k]
if self._use_color:
hue, saturation, value = utils.rgb_to_hsv(red, green, blue)
kwargs.update(dict(
hue=hue, saturation=saturation, value=value))
self._prev_brush_params = copy.copy(self._brush_params)
self._brush_params.update(kwargs)
if not self._prev_brush_params["is_painting"]:
# If we were not painting before we should pretend that the appearence
# of the brush didn't change.
self._prev_brush_params.update({
k: self._brush_params[k] for k in self.BRUSH_APPEARANCE_PARAMS})
# Update the libmypaint brush object.
self._update_libmypaint_brush(**kwargs)
def _reset_brush_params(self):
hue, saturation, value = utils.rgb_to_hsv(
self.R_VALUES[0], self.G_VALUES[0], self.B_VALUES[0])
pressure = 0.0 if self._use_pressure else 1.0
self._brush_params = collections.OrderedDict([
("y", 0.0),
("x", 0.0),
("pressure", pressure),
("log_size", self._log_brush_sizes[0]),
("hue", hue),
("saturation", saturation),
("value", value),
("is_painting", False)])
self._prev_brush_params = None
# Reset the libmypaint brush object.
self._move_to(0.0, 0.0, update_brush_params=False)
self._update_libmypaint_brush(**self._brush_params)
def _move_to(self, y, x, update_brush_params=True):
if update_brush_params:
self._update_brush_params(y=y, x=y, is_painting=False)
self._brush.Reset()
self._brush.NewStroke()
self._brush.StrokeTo(x, y, 0.0, self.DTIME)
def _bezier_to(self, y_c, x_c, y_e, x_e, pressure,
log_size, red, green, blue):
self._update_brush_params(
y=y_e, x=x_e, pressure=pressure, log_size=log_size,
red=red, green=green, blue=blue, is_painting=True)
y_s, x_s, pressure_s = [
self._prev_brush_params[k] for k in ["y", "x", "pressure"]]
pressure_e = pressure
# Compute point along the Bezier curve.
p_s = np.array([[y_s, x_s]])
p_c = np.array([[y_c, x_c]])
p_e = np.array([[y_e, x_e]])
points = utils.quadratic_bezier(p_s, p_c, p_e, self.STROKES_PER_STEP + 1)[0]
# We need to perform this pseudo-stroke at the beginning of the curve
# so that libmypaint handles the pressure correctly.
if not self._prev_brush_params["is_painting"]:
self._brush.StrokeTo(x_s, y_s, pressure_s, self.DTIME)
for t in xrange(self.STROKES_PER_STEP):
alpha = float(t + 1) / self.STROKES_PER_STEP
pressure = pressure_s * (1. - alpha) + pressure_e * alpha
self._brush.StrokeTo(
points[t + 1][1], points[t + 1][0], pressure, self.DTIME)
def _grid_to_real(self, location):
return tuple(self._canvas_width * float(c) / self._grid_width
for c in location)
def _process_action(self, action):
flag = action["flag"]
# Get pressure and size.
if self._use_pressure:
pressure = self.P_VALUES[action["pressure"]]
else:
pressure = 1.0
if self._use_size:
log_size = self._log_brush_sizes[action["size"]]
else:
log_size = self._log_brush_sizes[0]
if self._use_color:
red = self.R_VALUES[action["red"]]
green = self.G_VALUES[action["green"]]
blue = self.B_VALUES[action["blue"]]
else:
red, green, blue = None, None, None
# Get locations. NOTE: the order of the coordinates is (y, x).
locations = [
np.unravel_index(action[k], (self._grid_width, self._grid_width))
for k in self.SPATIAL_ACTIONS]
# Convert grid coordinates into full resolution coordinates.
locations = [
self._grid_to_real(location) for location in locations]
return locations, flag, pressure, log_size, red, green, blue
def reset(self):
self._surface.Clear()
self._reset_brush_params()
self.stats = {
"total_strokes": 0,
"total_disjoint": 0,
}
# TODO: Use an all-zero action mask instead of the "move" mask here.
# Unfortunately, the agents we have rely on this bug (they
# take the mask as an input at the next time step).
# self._action_mask = nest.map_structure(
# lambda _: 0.0, self._action_masks["move"])
self._action_mask = self._action_masks["move"]
time_step = environment.restart(observation=self.observation())
self._episode_step = 1
self._prev_step_type = time_step.step_type
return time_step
def step(self, action):
"""Performs an environment step."""
# If the environment has just been created or finished an episode
# we should reset it (ignoring the action).
if self._prev_step_type in {None, environment.StepType.LAST}:
return self.reset()
for k in action.keys():
self._action_spec[k].validate(action[k])
locations, flag, pressure, log_size, red, green, blue = (
self._process_action(action))
loc_control, loc_end = locations
# Perform action.
self._surface.BeginAtomic()
if flag == 1: # The agent produces a visible stroke.
self._action_mask = self._action_masks["paint"]
y_c, x_c = loc_control
y_e, x_e = loc_end
self._bezier_to(y_c, x_c, y_e, x_e, pressure, log_size, red, green, blue)
# Update episode statistics.
self.stats["total_strokes"] += 1
if not self._prev_brush_params["is_painting"]:
self.stats["total_disjoint"] += 1
elif flag == 0: # The agent moves to a new location.
self._action_mask = self._action_masks["move"]
y_e, x_e = loc_end
self._move_to(y_e, x_e)
else:
raise ValueError("Invalid flag value")
self._surface.EndAtomic()
# Handle termination of the episode.
reward = 0.0
self._episode_step += 1
if self._episode_step == self._episode_length:
time_step = environment.termination(reward=reward,
observation=self.observation())
else:
time_step = environment.transition(reward=reward,
observation=self.observation(),
discount=self._discount)
self._prev_step_type = time_step.step_type
return time_step
def observation_spec(self):
action_mask_spec = nest.map_structure(
lambda _: specs.Array(shape=(), dtype=np.float32),
self._action_masks["move"])
canvas_shape = (self._canvas_width,
self._canvas_width,
self._output_channels)
return collections.OrderedDict([
("canvas", specs.Array(shape=canvas_shape, dtype=np.float32)),
("episode_step", specs.Array(shape=(), dtype=np.int32)),
("episode_length", specs.Array(shape=(), dtype=np.int32)),
("action_mask", action_mask_spec)])
def action_spec(self):
return self._action_spec
def close(self):
self._brush = None
self._surface = None
| spiral-master | spiral/environments/libmypaint.py |
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utitlity functions for environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def quadratic_bezier(p_s, p_c, p_e, n):
t = np.linspace(0., 1., n)
t = t.reshape((1, n, 1))
p_s, p_c, p_e = [np.expand_dims(p, axis=1) for p in [p_s, p_c, p_e]]
p = (1 - t) * (1 - t) * p_s + 2 * (1 - t) * t * p_c + t * t * p_e
return p
def rgb_to_hsv(red, green, blue):
"""Converts RGB to HSV."""
hue = 0.0
red = np.clip(red, 0.0, 1.0)
green = np.clip(green, 0.0, 1.0)
blue = np.clip(blue, 0.0, 1.0)
max_value = np.max([red, green, blue])
min_value = np.min([red, green, blue])
value = max_value
delta = max_value - min_value
if delta > 0.0001:
saturation = delta / max_value
if red == max_value:
hue = (green - blue) / delta
if hue < 0.0:
hue += 6.0
elif green == max_value:
hue = 2.0 + (blue - red) / delta
elif blue == max_value:
hue = 4.0 + (red - green) / delta
hue /= 6.0
else:
saturation = 0.0
hue = 0.0
return hue, saturation, value
| spiral-master | spiral/environments/utils.py |
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fluid Paint Reinforcement Learning environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-import-not-at-top
import collections
import copy
import dm_env as environment
from dm_env import specs
import numpy as np
from six.moves import xrange
import tensorflow as tf
from spiral.environments import utils
from spiral.environments import config_pb2
from spiral.environments import pyfluid
nest = tf.contrib.framework.nest
def mix(a, b, t):
return a * (1.0 - t) + b * t
def circle_mix(a, b, t):
"""Interpolates between `a` and `b` assuming they lie on a circle."""
case = np.argmin([np.abs(b - a), np.abs(b - a - 1), np.abs(b - a + 1)])
if case == 0:
result = np.float32(mix(a, b, t))
elif case == 1:
result = np.float32(mix(a, b - 1, t)) % np.float32(1.0)
else: # case == 2
result = np.float32(mix(a, b + 1, t)) % np.float32(1.0)
if result == 1.0: # Somehow, result can be 1.0 at this point.
return np.float32(0.0) # We make sure that in this case we return 0.0.
else:
return result
class FluidPaint(environment.Environment):
"""A painting environment wrapping Fluid Paint."""
ACTION_NAMES = ["control", "end", "flag", "speed", "size",
"red", "green", "blue", "alpha"]
SPATIAL_ACTIONS = ["control", "end"]
BRUSH_APPEARANCE_PARAMS = ["size", "hue", "saturation", "value", "alpha"]
ACTION_MASKS = {
"paint": collections.OrderedDict([
("control", 1.0),
("end", 1.0),
("flag", 1.0),
("speed", 1.0),
("size", 1.0),
("red", 1.0),
("green", 1.0),
("blue", 1.0),
("alpha", 1.0)]),
"move": collections.OrderedDict([
("control", 0.0),
("end", 1.0),
("flag", 1.0),
("speed", 1.0),
("size", 0.0),
("red", 0.0),
("green", 0.0),
("blue", 0.0),
("alpha", 0.0)]),
}
STROKES_PER_STEP = 5 * np.arange(2, 11)
R_VALUES = np.linspace(0.0, 1.0, 20)
G_VALUES = np.linspace(0.0, 1.0, 20)
B_VALUES = np.linspace(0.0, 1.0, 20)
A_VALUES = np.linspace(0.0, 1.0, 10)
def __init__(self,
episode_length,
canvas_width,
grid_width,
brush_sizes,
rewards=None,
discount=1.,
shaders_basedir=""):
self._name = "fluid_paint"
if brush_sizes is None:
self._brush_sizes = [10.0, 30.0, 50.0]
else:
self._brush_sizes = brush_sizes
self._canvas_width = canvas_width
self._grid_width = grid_width
self._grid_size = grid_width * grid_width
self._rewards = rewards
# Build action specification and action masks.
self._action_spec = collections.OrderedDict([
("control", specs.DiscreteArray(self._grid_size)),
("end", specs.DiscreteArray(self._grid_size)),
("flag", specs.DiscreteArray(2)),
("speed", specs.DiscreteArray(len(self.STROKES_PER_STEP))),
("size", specs.DiscreteArray(len(self._brush_sizes))),
("red", specs.DiscreteArray(len(self.R_VALUES))),
("green", specs.DiscreteArray(len(self.G_VALUES))),
("blue", specs.DiscreteArray(len(self.B_VALUES))),
("alpha", specs.DiscreteArray(len(self.A_VALUES)))])
self._action_masks = copy.deepcopy(self.ACTION_MASKS)
self._brush_params = None
self._prev_reward = 0
config = config_pb2.Config()
self._wrapper = pyfluid.Wrapper(config.SerializeToString())
self._wrapper.Setup(
self._canvas_width, self._canvas_width, shaders_basedir)
self._episode_step = 0
self._episode_length = episode_length
self._prev_step_type = None
self._discount = discount
@property
def name(self):
"""Gets the name of the environment."""
return self._name
@property
def grid_width(self):
return self._grid_width
def _get_canvas(self):
canvas = self._wrapper.CanvasAsNumpy()[..., :3]
canvas = np.single(canvas) / 255.0
return canvas
def observation(self):
canvas = self._get_canvas()
episode_step = np.array(self._episode_step, dtype=np.int32)
episode_length = np.array(self._episode_length, dtype=np.int32)
return collections.OrderedDict([
("canvas", canvas),
("episode_step", episode_step),
("episode_length", episode_length),
("action_mask", self._action_mask)])
def _update_brush_params(self, **kwargs):
rgb_keys = ["red", "green", "blue"]
if any(k in kwargs for k in rgb_keys):
assert all(k in kwargs for k in rgb_keys)
red, green, blue = [kwargs[k] for k in rgb_keys]
for k in rgb_keys:
del kwargs[k]
hue, saturation, value = utils.rgb_to_hsv(red, green, blue)
kwargs.update(dict(
hue=hue, saturation=saturation, value=value))
self._prev_brush_params = copy.copy(self._brush_params)
self._brush_params.update(kwargs)
if not self._prev_brush_params["is_painting"]:
# If we were not painting before we should pretend that the appearence
# of the brush didn't change.
self._prev_brush_params.update({
k: self._brush_params[k] for k in self.BRUSH_APPEARANCE_PARAMS})
def _reset_brush_params(self):
hue, saturation, value = utils.rgb_to_hsv(
self.R_VALUES[0], self.G_VALUES[0], self.B_VALUES[0])
self._brush_params = collections.OrderedDict([
("y", 0.0),
("x", 0.0),
("size", self._brush_sizes[0]),
("hue", hue),
("saturation", saturation),
("value", value),
("alpha", self.A_VALUES[0]),
("is_painting", False)])
self._prev_brush_params = None
def _move_to(self, y, x, num_strokes):
self._update_brush_params(y=y, x=y, is_painting=False)
y_s, x_s = [self._prev_brush_params[k] for k in ["y", "x"]]
y_e, x_e = y, x
for i in xrange(num_strokes):
t = float(i + 1) / num_strokes
x = mix(x_s, x_e, t)
y = mix(y_s, y_e, t)
self._wrapper.Update(x, y, self._brush_params["size"], False)
def _bezier_to(self, y_c, x_c, y_e, x_e, num_strokes,
size, red, green, blue, alpha):
self._update_brush_params(
y=y_e, x=x_e, size=size, red=red, green=green, blue=blue, alpha=alpha,
is_painting=True)
y_s, x_s = [self._prev_brush_params[k] for k in ["y", "x"]]
# Compute point along the Bezier curve.
p_s = np.array([[y_s, x_s]])
p_c = np.array([[y_c, x_c]])
p_e = np.array([[y_e, x_e]])
points = utils.quadratic_bezier(p_s, p_c, p_e, num_strokes + 1)[0]
def mix_for_key(a, b, t, key):
if key == "hue":
return circle_mix(a, b, t)
else:
return mix(a, b, t)
keys = self.BRUSH_APPEARANCE_PARAMS
values_s = [self._prev_brush_params[k] for k in keys]
values_e = [self._brush_params[k] for k in keys]
for i in xrange(num_strokes):
t = float(i + 1) / num_strokes
# Interpolate brush appearance parameters.
params = collections.OrderedDict([
(k, mix_for_key(value_s, value_e, t, k))
for k, value_s, value_e in zip(keys, values_s, values_e)])
self._wrapper.SetBrushColor(
params["hue"], params["saturation"], params["value"], params["alpha"])
self._wrapper.Update(
points[i + 1][1], points[i + 1][0], params["size"], True)
def _grid_to_real(self, location):
return tuple(self._canvas_width * float(c) / self._grid_width
for c in location)
def _process_action(self, action):
flag = action["flag"]
num_strokes = self.STROKES_PER_STEP[action["speed"]]
size = self._brush_sizes[action["size"]]
red = self.R_VALUES[action["red"]]
green = self.G_VALUES[action["green"]]
blue = self.B_VALUES[action["blue"]]
alpha = self.A_VALUES[action["alpha"]]
# Get locations. NOTE: the order of the coordinates is (y, x).
locations = [
np.unravel_index(action[k], (self._grid_width, self._grid_width))
for k in self.SPATIAL_ACTIONS]
# Convert grid coordinates into full resolution coordinates.
locations = [
self._grid_to_real(location) for location in locations]
return locations, flag, num_strokes, size, red, green, blue, alpha
def reset(self):
self._wrapper.Reset()
self._reset_brush_params()
# The first call of `Update()` after `Reset()` initializes the brush.
# We don't need to simulate movement to the initial position.
self._wrapper.Update(0.0, 0.0, self._brush_params["size"], False)
self.stats = {
"total_strokes": 0,
"total_disjoint": 0,
}
# TODO: Use an all-zero action mask instead of the "move" mask here.
# Unfortunately, the agents we have rely on this bug (they
# take the mask as an input at the next time step).
# self._action_mask = nest.map_structure(
# lambda _: 0.0, self._action_masks["move"])
self._action_mask = self._action_masks["move"]
time_step = environment.restart(observation=self.observation())
self._episode_step = 1
self._prev_step_type = time_step.step_type
return time_step
def step(self, action):
"""Performs an environment step."""
# If the environment has just been created or finished an episode
# we should reset it (ignoring the action).
if self._prev_step_type in {None, environment.StepType.LAST}:
return self.reset()
for k in action.keys():
self._action_spec[k].validate(action[k])
locations, flag, num_strokes, size, red, green, blue, alpha = (
self._process_action(action))
loc_control, loc_end = locations
# Perform action.
if flag == 1: # The agent produces a visible stroke.
self._action_mask = self._action_masks["paint"]
y_c, x_c = loc_control
y_e, x_e = loc_end
self._bezier_to(y_c, x_c, y_e, x_e, num_strokes, size,
red, green, blue, alpha)
# Update episode statistics.
self.stats["total_strokes"] += 1
if not self._prev_brush_params["is_painting"]:
self.stats["total_disjoint"] += 1
elif flag == 0: # The agent moves to a new location.
self._action_mask = self._action_masks["move"]
y_e, x_e = loc_end
self._move_to(y_e, x_e, num_strokes)
else:
raise ValueError("Invalid flag value")
# Handle termination of the episode.
reward = 0.0
self._episode_step += 1
if self._episode_step == self._episode_length:
time_step = environment.termination(reward=reward,
observation=self.observation())
else:
time_step = environment.transition(reward=reward,
observation=self.observation(),
discount=self._discount)
self._prev_step_type = time_step.step_type
return time_step
def observation_spec(self):
action_mask_spec = nest.map_structure(
lambda _: specs.Array(shape=(), dtype=np.float32),
self._action_masks["move"])
canvas_shape = (self._canvas_width,
self._canvas_width,
3)
return collections.OrderedDict([
("canvas", specs.Array(shape=canvas_shape, dtype=np.float32)),
("episode_step", specs.Array(shape=(), dtype=np.int32)),
("episode_length", specs.Array(shape=(), dtype=np.int32)),
("action_mask", action_mask_spec)])
def action_spec(self):
return self._action_spec
def close(self):
self._wrapper = None
| spiral-master | spiral/environments/fluid.py |
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| spiral-master | spiral/agents/__init__.py |
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default SPIRAL agent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import dm_env as environment
import six
import sonnet as snt
import tensorflow as tf
from spiral.agents import utils
nest = tf.contrib.framework.nest
# Spatial action arguments need to be treated in a special way.
LOCATION_KEYS = ["end", "control"]
def _xy_grids(batch_size, height, width):
x_grid = tf.linspace(-1., 1., width, name="linspace")
x_grid = tf.reshape(x_grid, [1, 1, width, 1])
x_grid = tf.tile(x_grid, [batch_size, height, 1, 1])
y_grid = tf.linspace(-1., 1., height, name="linspace")
y_grid = tf.reshape(y_grid, [1, height, 1, 1])
y_grid = tf.tile(y_grid, [batch_size, 1, width, 1])
return x_grid, y_grid
class AutoregressiveHeads(snt.AbstractModule):
"""A module for autoregressive action heads."""
ORDERS = {
"libmypaint": ["flag", "end", "control", "size", "pressure",
"red", "green", "blue"],
"fluid": ["flag", "end", "control", "size", "speed",
"red", "green", "blue", "alpha"],
}
def __init__(self,
z_dim,
embed_dim,
action_spec,
decoder_params,
order,
grid_height,
grid_width,
name="autoregressive_heads"):
super(AutoregressiveHeads, self).__init__(name=name)
self._z_dim = z_dim
self._action_spec = action_spec
self._grid_height = grid_height
self._grid_width = grid_width
# Filter the order of actions according to the actual action specification.
order = self.ORDERS[order]
self._order = [k for k in order if k in action_spec]
with self._enter_variable_scope():
self._action_embeds = collections.OrderedDict(
[(k, snt.Linear(output_size=embed_dim,
name=k + "_action_embed"))
for k in six.iterkeys(action_spec)])
self._action_heads = []
for k, v in six.iteritems(action_spec):
if k in LOCATION_KEYS:
decoder = utils.ConvDecoder( # pylint: disable=not-callable
name=k + "_action_decoder",
**decoder_params)
action_head = snt.Sequential([
snt.BatchReshape([4, 4, -1]),
decoder,
snt.BatchFlatten()], name=k + "_action_head")
else:
output_size = v.maximum - v.minimum + 1
action_head = snt.Linear(
output_size=output_size, name=k + "_action_head")
self._action_heads.append((k, action_head))
self._action_heads = collections.OrderedDict(self._action_heads)
self._residual_mlps = {}
for k, v in six.iteritems(self._action_spec):
self._residual_mlps[k] = snt.nets.MLP(
output_sizes=[16, 32, self._z_dim], name=k + "_residual_mlp")
def _build(self, z):
logits = {}
action = {}
for k in self._order:
logits[k] = tf.check_numerics(
self._action_heads[k](z), "Logits for {k} are not valid")
a = tf.squeeze(tf.multinomial(logits[k], num_samples=1), -1)
a = tf.cast(a, tf.int32, name=k + "_action")
action[k] = a
depth = self._action_spec[k].maximum - self._action_spec[k].minimum + 1
# Asserts actions are valid.
assert_op = tf.assert_less_equal(a, tf.constant(depth, dtype=a.dtype))
with tf.control_dependencies([assert_op]):
if k in LOCATION_KEYS:
if depth != self._grid_height * self._grid_width:
raise AssertionError(
"Action space {depth} != grid_height * grid_width "
"{self._grid_height}x{self._grid_width}.")
w = self._grid_width
h = self._grid_height
y = -1.0 + 2.0 * tf.cast(a // w, tf.float32) / (h - 1)
x = -1.0 + 2.0 * tf.cast(a % w, tf.float32) / (w - 1)
a_vec = tf.stack([y, x], axis=1)
else:
a_vec = tf.one_hot(a, depth)
a_embed = self._action_embeds[k](a_vec)
residual = self._residual_mlps[k](tf.concat([z, a_embed], axis=1))
z = tf.nn.relu(z + residual)
action = collections.OrderedDict(
[(k, action[k]) for k in six.iterkeys(self._action_spec)])
logits = collections.OrderedDict(
[(k, logits[k]) for k in six.iterkeys(self._action_spec)])
return logits, action
class Agent(snt.AbstractModule):
"""A module for the default agent."""
def __init__(
self,
action_spec,
input_shape,
grid_shape,
action_order,
name="default"):
"""Initialises the agent."""
super(Agent, self).__init__(name=name)
self._action_order = action_order
self._action_spec = collections.OrderedDict(action_spec)
self._z_dim = 256
input_height, input_width = input_shape
self._grid_height, self._grid_width = grid_shape
enc_factor_h = input_height // 8 # Height of feature after encoding is 8
enc_factor_w = input_width // 8 # Width of feature after encoding is 8
dec_factor_h = self._grid_height // 4 # Height of feature after core is 4
dec_factor_w = self._grid_width // 4 # Width of feature after core is 4
self._encoder_params = {
"factor_h": enc_factor_h,
"factor_w": enc_factor_w,
"num_hiddens": 32,
"num_residual_layers": 8,
"num_residual_hiddens": 32,
}
self._decoder_params = {
"factor_h": dec_factor_h,
"factor_w": dec_factor_w,
"num_hiddens": 32,
"num_residual_layers": 8,
"num_residual_hiddens": 32,
"num_output_channels": 1,
}
with self._enter_variable_scope():
self._core = snt.LSTM(self._z_dim)
def initial_state(self, batch_size):
return utils.AgentState(
lstm_state=self._core.initial_state(batch_size),
prev_action=nest.map_structure(
lambda spec: tf.zeros((batch_size,) + spec.shape, dtype=spec.dtype),
self._action_spec))
def _maybe_reset_core_state(self, core_state, should_reset):
with tf.control_dependencies(None):
if should_reset.shape.is_fully_defined():
batch_size = should_reset.shape[0]
else:
batch_size = tf.shape(should_reset)[0]
initial_core_state = self._core.initial_state(batch_size)
# Use a reset state for the selected elements in the batch.
state = nest.map_structure(
lambda i, s: tf.where(should_reset, i, s),
initial_core_state, core_state)
return state
def _compute_condition(self, action, mask):
mask = tuple(mask[k] for k in self._action_spec.keys())
conds = []
action = action.values()
for k, a, m in zip(self._action_spec.keys(), action, mask):
depth = self._action_spec[k].maximum - self._action_spec[k].minimum + 1
embed = snt.Linear(16)
if k in LOCATION_KEYS:
if depth != self._grid_height * self._grid_width:
raise AssertionError(
"Action space {depth} != grid_height * grid_width "
"{self._grid_height}x{self._grid_width}.")
w = self._grid_width
h = self._grid_height
y = -1.0 + 2.0 * tf.cast(a // w, tf.float32) / (h - 1)
x = -1.0 + 2.0 * tf.cast(a % w, tf.float32) / (w - 1)
a_vec = tf.concat([y, x], axis=1)
else:
a_vec = tf.one_hot(a, depth)[:, 0, :]
cond = embed(a_vec) * m
conds.append(cond)
cond = tf.concat(conds, axis=1)
cond = snt.nets.MLP([64, 32, 32])(cond)
return cond
@snt.reuse_variables
def _torso(self,
observation,
prev_action,
should_reset):
batch_size, x_h, x_w, _ = observation["canvas"].get_shape().as_list()
x_grid, y_grid = _xy_grids(batch_size, x_h, x_w)
should_reset = tf.squeeze(should_reset, -1)
prev_action = nest.map_structure(lambda pa: tf.where( # pylint: disable=g-long-lambda
should_reset, tf.zeros_like(pa), pa), prev_action)
spatial_inputs = [observation["canvas"]]
spatial_inputs += [x_grid, y_grid]
data = tf.concat(spatial_inputs, axis=-1)
with tf.variable_scope("torso"):
h = snt.Conv2D(32, [5, 5])(data)
# Compute conditioning vector based on the previously taken action.
prev_action = nest.map_structure(
lambda pa: tf.expand_dims(pa, -1), prev_action)
cond = self._compute_condition(prev_action, observation["action_mask"])
# Adjust the conditioning vector according to the noise sample
# provided to the model. This is inspired by the original GAN framework.
# NOTE: Unlike in normal GANs, this noise sample is not the only source
# of stochasticity. Stochastic actions contribute as well.
assert observation["noise_sample"].shape.ndims == 2
cond += snt.nets.MLP([64, 32, 32])(observation["noise_sample"])
cond = tf.reshape(cond, [batch_size, 1, 1, -1])
h += cond
h = tf.nn.relu(h)
encoder = utils.ConvEncoder(**self._encoder_params)
h = snt.BatchFlatten()(encoder(h))
h = snt.Linear(256)(tf.nn.relu(h))
return h
@snt.reuse_variables
def _head(self, core_output):
with tf.variable_scope("head"):
head = AutoregressiveHeads(
z_dim=self._z_dim,
embed_dim=16,
action_spec=self._action_spec,
grid_height=self._grid_height,
grid_width=self._grid_width,
decoder_params=self._decoder_params,
order=self._action_order)
logits, actions = head( # pylint: disable=not-callable
core_output)
baseline = tf.squeeze(snt.Linear(1)(core_output), -1)
return utils.AgentOutput(actions, logits, baseline)
def step(self,
step_type,
observation,
prev_state):
"""Computes a single step of the agent."""
with self._capture_variables():
should_reset = tf.equal(step_type, environment.StepType.FIRST)
torso_output = self._torso(
observation,
prev_state.prev_action,
should_reset)
lstm_state = self._maybe_reset_core_state(
prev_state.lstm_state, should_reset)
core_output, new_core_state = self._core(torso_output, lstm_state)
agent_output = self._head(core_output)
new_state = utils.AgentState(
prev_action=agent_output.action,
lstm_state=new_core_state)
return agent_output, new_state
def _build(self, *args): # Unused.
# pylint: disable=no-value-for-parameter
return self.step(*args)
# pylint: enable=no-value-for-parameter
| spiral-master | spiral/agents/default.py |
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities used by SPIRAL agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import six
import sonnet as snt
import tensorflow as tf
import tensorflow_hub as hub
nest = tf.contrib.framework.nest
AgentOutput = collections.namedtuple(
"AgentOutput", ["action", "policy_logits", "baseline"])
AgentState = collections.namedtuple(
"AgentState", ["lstm_state", "prev_action"])
class ResidualStack(snt.AbstractModule):
"""A stack of ResNet V2 blocks."""
def __init__(self,
num_hiddens,
num_residual_layers,
num_residual_hiddens,
filter_size=3,
initializers=None,
data_format="NHWC",
activation=tf.nn.relu,
name="residual_stack"):
"""Instantiate a ResidualStack."""
super(ResidualStack, self).__init__(name=name)
self._num_hiddens = num_hiddens
self._num_residual_layers = num_residual_layers
self._num_residual_hiddens = num_residual_hiddens
self._filter_size = filter_size
self._initializers = initializers
self._data_format = data_format
self._activation = activation
def _build(self, h):
for i in range(self._num_residual_layers):
h_i = self._activation(h)
h_i = snt.Conv2D(
output_channels=self._num_residual_hiddens,
kernel_shape=(self._filter_size, self._filter_size),
stride=(1, 1),
initializers=self._initializers,
data_format=self._data_format,
name="res_nxn_%d" % i)(h_i)
h_i = self._activation(h_i)
h_i = snt.Conv2D(
output_channels=self._num_hiddens,
kernel_shape=(1, 1),
stride=(1, 1),
initializers=self._initializers,
data_format=self._data_format,
name="res_1x1_%d" % i)(h_i)
h += h_i
return self._activation(h)
class ConvEncoder(snt.AbstractModule):
"""Convolutional encoder."""
def __init__(self,
factor_h,
factor_w,
num_hiddens,
num_residual_layers,
num_residual_hiddens,
initializers=None,
data_format="NHWC",
name="conv_encoder"):
super(ConvEncoder, self).__init__(name=name)
self._num_hiddens = num_hiddens
self._num_residual_layers = num_residual_layers
self._num_residual_hiddens = num_residual_hiddens
self._initializers = initializers
self._data_format = data_format
# Note that implicitly the network uses conv strides of 2.
# input height / output height == factor_h.
self._num_steps_h = factor_h.bit_length() - 1
# input width / output width == factor_w.
self._num_steps_w = factor_w.bit_length() - 1
num_steps = max(self._num_steps_h, self._num_steps_w)
if factor_h & (factor_h - 1) != 0:
raise ValueError("`factor_h` must be a power of 2. It is %d" % factor_h)
if factor_w & (factor_w - 1) != 0:
raise ValueError("`factor_w` must be a power of 2. It is %d" % factor_w)
self._num_steps = num_steps
def _build(self, x):
h = x
for i in range(self._num_steps):
stride = (2 if i < self._num_steps_h else 1,
2 if i < self._num_steps_w else 1)
h = snt.Conv2D(
output_channels=self._num_hiddens,
kernel_shape=(4, 4),
stride=stride,
initializers=self._initializers,
data_format=self._data_format,
name="strided_{}".format(i))(h)
h = tf.nn.relu(h)
h = snt.Conv2D(
output_channels=self._num_hiddens,
kernel_shape=(3, 3),
stride=(1, 1),
initializers=self._initializers,
data_format=self._data_format,
name="pre_stack")(h)
h = ResidualStack( # pylint: disable=not-callable
self._num_hiddens,
self._num_residual_layers,
self._num_residual_hiddens,
initializers=self._initializers,
data_format=self._data_format,
name="residual_stack")(h)
return h
class ConvDecoder(snt.AbstractModule):
"""Convolutional decoder."""
def __init__(self,
factor_h,
factor_w,
num_hiddens,
num_residual_layers,
num_residual_hiddens,
num_output_channels=3,
initializers=None,
data_format="NHWC",
name="conv_decoder"):
super(ConvDecoder, self).__init__(name=name)
self._num_hiddens = num_hiddens
self._num_residual_layers = num_residual_layers
self._num_residual_hiddens = num_residual_hiddens
self._num_output_channels = num_output_channels
self._initializers = initializers
self._data_format = data_format
# input height / output height == factor_h.
self._num_steps_h = factor_h.bit_length() - 1
# input width / output width == factor_w.
self._num_steps_w = factor_w.bit_length() - 1
num_steps = max(self._num_steps_h, self._num_steps_w)
if factor_h & (factor_h - 1) != 0:
raise ValueError("`factor_h` must be a power of 2. It is %d" % factor_h)
if factor_w & (factor_w - 1) != 0:
raise ValueError("`factor_w` must be a power of 2. It is %d" % factor_w)
self._num_steps = num_steps
def _build(self, x):
h = snt.Conv2D(
output_channels=self._num_hiddens,
kernel_shape=(3, 3),
stride=(1, 1),
initializers=self._initializers,
data_format=self._data_format,
name="pre_stack")(x)
h = ResidualStack( # pylint: disable=not-callable
self._num_hiddens,
self._num_residual_layers,
self._num_residual_hiddens,
initializers=self._initializers,
data_format=self._data_format,
name="residual_stack")(h)
for i in range(self._num_steps):
# Does reverse striding -- puts stride-2s after stride-1s.
stride = (2 if (self._num_steps - 1 - i) < self._num_steps_h else 1,
2 if (self._num_steps - 1 - i) < self._num_steps_w else 1)
h = snt.Conv2DTranspose(
output_channels=self._num_hiddens,
output_shape=None,
kernel_shape=(4, 4),
stride=stride,
initializers=self._initializers,
data_format=self._data_format,
name="strided_transpose_{}".format(i))(h)
h = tf.nn.relu(h)
x_recon = snt.Conv2D(
output_channels=self._num_output_channels,
kernel_shape=(3, 3),
stride=(1, 1),
initializers=self._initializers,
data_format=self._data_format,
name="final")(h)
return x_recon
def export_hub_module(agent_ctor,
observation_spec,
noise_dim,
module_path,
checkpoint_path,
name_transform_fn=None):
"""Exports the agent as a TF-Hub module.
Args:
agent_ctor: A function returning a Sonnet module for the agent.
observation_spec: A nested dict of `Array` specs describing an observation
coming from the environment.
noise_dim: The dimensionality of the noise vector used by the agent.
module_path: A path where to export the module to.
checkpoint_path: A path where to load the weights for the module.
name_transform_fn: An optional function to provide mapping between
variable name in the module and the variable name in the checkpoint.
"""
def module_fn():
"""Builds a graph for the TF-Hub module."""
agent = agent_ctor()
# Get the initial agent state tensor.
initial_agent_state = agent.initial_state(1)
# Create a bunch of placeholders for the step function inputs.
step_type_ph = tf.placeholder(dtype=tf.int32, shape=(1,))
observation_ph = nest.map_structure(
lambda s: tf.placeholder(dtype=tf.dtypes.as_dtype(s.dtype), # pylint: disable=g-long-lambda
shape=(1,) + s.shape),
observation_spec)
observation_ph["noise_sample"] = tf.placeholder(
dtype=tf.float32, shape=(1, noise_dim))
agent_state_ph = nest.map_structure(
lambda t: tf.placeholder(dtype=t.dtype, shape=t.shape),
initial_agent_state)
# Get the step function outputs.
agent_output, agent_state = agent.step(
step_type_ph, observation_ph, agent_state_ph)
# Now we need to add the module signatures. TF Hub modules require inputs
# to be flat dictionaries. Since the agent's methods accept multiple
# argument some of which being nested dictionaries we gotta work
# some magic in order flatten the structure of the placeholders.
initial_state_output_dict = dict(
state=initial_agent_state)
initial_state_output_dict = dict(
nest.flatten_with_joined_string_paths(initial_state_output_dict))
step_inputs_dict = dict(
step_type=step_type_ph,
observation=observation_ph,
state=agent_state_ph)
step_inputs_dict = dict(
nest.flatten_with_joined_string_paths(step_inputs_dict))
step_outputs_dict = dict(
action=agent_output.action,
state=agent_state)
step_outputs_dict = dict(
nest.flatten_with_joined_string_paths(step_outputs_dict))
hub.add_signature(
"initial_state", outputs=initial_state_output_dict)
hub.add_signature(
"step", inputs=step_inputs_dict, outputs=step_outputs_dict)
spec = hub.create_module_spec(module_fn, drop_collections=["sonnet"])
spec.export(module_path,
checkpoint_path=checkpoint_path,
name_transform_fn=name_transform_fn)
def get_module_wrappers(module_path):
"""Returns python functions implementing the agent.
Args:
module_path: A path which should be used to load the agent from.
Returns:
A tuple of two functions:
* A function that returns the initial state of the agent.
* A function that performs a step.
"""
g = tf.Graph()
session = tf.Session(graph=g)
with g.as_default():
agent = hub.Module(module_path)
def to_python_fn(session, signature):
"""Converts a symbolic function into a plain python functions."""
inputs_ph = {
k: tf.placeholder(v.dtype, v.get_shape().as_list(), k)
for k, v in six.iteritems(agent.get_input_info_dict(signature))}
outputs = agent(inputs=inputs_ph, signature=signature, as_dict=True)
def fn(**kwargs):
feed_dict = {inputs_ph[k]: kwargs[k] for k in six.iterkeys(inputs_ph)}
return session.run(outputs, feed_dict=feed_dict)
return fn
raw_initial_state_fn = to_python_fn(session, "initial_state")
raw_step_fn = to_python_fn(session, "step")
init_op = tf.global_variables_initializer()
g.finalize()
session.run(init_op)
def wrapped_step_fn(step_type, observation, prev_state):
"""A convenience wrapper for a raw step function."""
step_type, observation = nest.map_structure(
lambda t: np.expand_dims(t, 0),
(step_type, observation))
step_inputs_dict = dict(
step_type=step_type,
observation=observation)
step_inputs_dict = dict(
nest.flatten_with_joined_string_paths(step_inputs_dict))
step_inputs_dict.update(prev_state)
output = raw_step_fn(**step_inputs_dict)
action = {k.replace("action/", ""): v
for k, v in six.iteritems(output)
if k.startswith("action/")}
state = {k: v for k, v in six.iteritems(output) if k.startswith("state/")}
action = nest.map_structure(lambda t: np.squeeze(t, 0), action)
return action, state
return raw_initial_state_fn, wrapped_step_fn
| spiral-master | spiral/agents/utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for tabular MDPs and CMDPs."""
from typing import Tuple
from absl import logging
import cvxopt
import jax
import jax.config
import numpy as np
import scipy
import scipy.optimize
cvxopt.solvers.options['show_progress'] = False
jax.config.update('jax_enable_x64', True)
class MDP:
"""MDP class."""
def __init__(self,
num_states: int,
num_actions: int,
transition: np.ndarray,
reward: np.ndarray,
gamma: float):
"""MDP Constructor.
Args:
num_states: the number of states.
num_actions: the number of actions.
transition: transition matrix. [num_states, num_actions, num_states].
reward: reward function. [num_states, num_actions]
gamma: discount factor (0 ~ 1).
"""
self.num_states = num_states
self.num_actions = num_actions
self.transition = np.array(transition)
self.reward = np.array(reward)
self.gamma = gamma
self.initial_state = 0
self.absorbing_state = num_states - 1
assert self.transition.shape == (num_states, num_actions, num_states)
assert self.reward.shape == (num_states, num_actions)
def __copy__(self):
mdp = MDP(
num_states=self.num_states,
num_actions=self.num_actions,
transition=np.array(self.transition),
reward=np.array(self.reward),
gamma=self.gamma)
return mdp
class CMDP(MDP):
"""Constrained MDP class."""
def __init__(self,
num_states: int,
num_actions: int,
num_costs: int,
transition: np.ndarray,
reward: np.ndarray,
costs: np.ndarray,
cost_thresholds: np.ndarray,
gamma: float):
"""Constrained MDP Constructor.
Args:
num_states: the number of states.
num_actions: the number of actions.
num_costs: the number of cost types.
transition: transition matrix. [num_states, num_actions, num_states].
reward: reward function. [num_states, num_actions]
costs: cost function. [num_costs, num_states, num_actions]
cost_thresholds: cost thresholds. [num_costs]
gamma: discount factor (0 ~ 1).
"""
assert len(cost_thresholds) == num_costs
super(CMDP, self).__init__(num_states, num_actions, transition, reward,
gamma)
self.num_costs = num_costs
self.costs = np.array(costs)
self.cost_thresholds = np.array(cost_thresholds)
assert self.costs.shape == (num_costs, num_states, num_actions)
def __copy__(self):
cmdp = CMDP(
num_states=self.num_states,
num_actions=self.num_actions,
num_costs=self.num_costs,
transition=np.array(self.transition),
reward=np.array(self.reward),
costs=np.array(self.costs),
cost_thresholds=np.array(self.cost_thresholds),
gamma=self.gamma)
return cmdp
def generate_random_cmdp(num_states: int, num_actions: int, num_costs: int,
cost_thresholds: np.ndarray, gamma: float):
"""Create a random CMDP.
Args:
num_states: the number of states.
num_actions: the number of actions.
num_costs: the number of cost types.
cost_thresholds: cost thresholds. [num_costs]
gamma: discount factor (0 ~ 1).
Returns:
a CMDP instance.
"""
assert len(cost_thresholds) == num_costs
if num_costs != 1:
raise NotImplementedError('Only support when num_costs=1')
initial_state = 0
absorbing_state = num_states - 1 # the absorbing state index.
# Define a random transition.
transition = np.zeros((num_states, num_actions, num_states))
for s in range(num_states):
if s == absorbing_state:
transition[s, :, s] = 1 # absorbing state: self-transition
else:
for a in range(num_actions):
# Transition to next states is defined sparsely.
# For each (s,a), the connectivity to the next states is 4.
p = np.r_[np.random.dirichlet([1, 1, 1, 1]), [0] * (num_states - 4 - 1)]
np.random.shuffle(p)
transition[s, a, :] = np.r_[p, [0]]
# Define a reward function. Roughly speaking, a non-zero reward is given
# to the state which is most difficult to reach from the initial state.
min_value_state, min_value = -1, 1e10
for s in range(num_states - 1):
reward = np.zeros((num_states, num_actions))
reward[s, :] = 1 / (1 - gamma)
transition_tmp = np.array(transition[s, :, :])
transition[s, :, :] = 0
transition[s, :, absorbing_state] = 1 # from goal_state to absorbing state
mdp = MDP(num_states, num_actions, transition, reward, gamma)
_, v, _ = solve_mdp(mdp)
if v[initial_state] < min_value:
min_value = v[initial_state]
min_value_state = s
transition[s, :, :] = transition_tmp
# min_value_state will be the goal state that yields a non-zero reward.
goal_state = min_value_state
reward = np.zeros((num_states, num_actions))
reward[goal_state, :] = 1 / (1 - gamma)
transition[goal_state, :, :] = 0
transition[goal_state, :, absorbing_state] = 1 # to absorbing one
# Define a cost function.
while True:
costs = np.random.beta(0.2, 0.2, (num_costs, num_states, num_actions))
# For each state, there exists a no-cost action.
for s in range(num_states):
a_no_cost = np.random.randint(0, num_actions)
costs[:, s, a_no_cost] = 0
costs[:, absorbing_state, :] = 0
cmdp = CMDP(num_states, num_actions, num_costs, transition, reward, costs,
cost_thresholds, gamma)
pi_copt = solve_cmdp(cmdp)
v_c_opt = policy_evaluation(cmdp, pi_copt)[2][0, 0]
if v_c_opt >= cost_thresholds[0] - 1e-4:
# We want that optimal policy tightly matches the cost constraint.
break
cmdp = CMDP(num_states, num_actions, num_costs, transition, reward, costs,
cost_thresholds, gamma)
return cmdp
def policy_evaluation_mdp(mdp: MDP,
pi: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Policy evaluation (normalized value) for pi in the given MDP.
Args:
mdp: MDP instance.
pi: a stochastic policy. [num_states, num_actions]
Returns:
(V_R, Q_R)
"""
reward = mdp.reward * (1 - mdp.gamma) # normalized value
r = np.sum(mdp.reward * pi, axis=-1)
p = np.sum(pi[:, :, None] * mdp.transition, axis=1)
v = np.linalg.inv(np.eye(mdp.num_states) - mdp.gamma * p).dot(r)
q = reward + mdp.gamma * mdp.transition.dot(v)
return v, q
def policy_evaluation(
cmdp: CMDP,
pi: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Policy evaluation (normalized value) for pi in the given CMDP.
Args:
cmdp: CMDP instance.
pi: a stochastic policy. [num_states, num_actions]
Returns:
(V_R, Q_R, V_C, Q_C)
"""
def compute(transition, reward, pi):
reward = reward * (1 - cmdp.gamma) # normalized value
r = np.sum(reward * pi, axis=-1)
p = np.sum(pi[:, :, None] * transition, axis=1)
v = np.linalg.inv(np.eye(cmdp.num_states) - cmdp.gamma * p).dot(r)
q = reward + cmdp.gamma * cmdp.transition.dot(v)
return v, q
v_r, q_r = compute(cmdp.transition, cmdp.reward, pi)
v_cs = np.zeros((cmdp.num_costs, cmdp.num_states))
q_cs = np.zeros((cmdp.num_costs, cmdp.num_states, cmdp.num_actions))
for k in range(cmdp.num_costs):
v_c, q_c = compute(cmdp.transition, cmdp.costs[k], pi)
v_cs[k] = v_c
q_cs[k] = q_c
return v_r, q_r, v_cs, q_cs
def solve_mdp(mdp: MDP):
"""Solve MDP via policy iteration.
Args:
mdp: an MDP instance.
Returns:
(pi, V_R, Q_R).
"""
pi = np.ones((mdp.num_states, mdp.num_actions)) / mdp.num_actions
v_old = np.zeros(mdp.num_states)
for _ in range(1_000_000):
v, q = policy_evaluation_mdp(mdp, pi)
pi_new = np.zeros((mdp.num_states, mdp.num_actions))
pi_new[np.arange(mdp.num_states), np.argmax(q, axis=1)] = 1.
if np.all(pi == pi_new) or np.max(np.abs(v - v_old)) < 1e-8:
break
v_old = v
pi = pi_new
if not np.all(pi == pi_new):
logging.info('Warning: policy iteration process did not converge.')
return pi, v, q
def generate_trajectory(seed: int,
cmdp: CMDP,
pi: np.ndarray,
num_episodes: int = 10,
max_timesteps: int = 50):
"""Generate trajectories using the policy in the CMDP.
Args:
seed: random seed.
cmdp: CMDP instance.
pi: a stochastic policy. [num_states, num_actions]
num_episodes: the number of episodes to generate.
max_timesteps: the maximum timestep within an episode.
Returns:
trajectory: list of list of (episode_idx, t, s_t, a_t, r_t, c_t, s_t').
"""
if seed is not None:
np.random.seed(seed + 1)
def random_choice_prob_vectorized(p):
"""Batch random_choice.
e.g. p = np.array([
[0.1, 0.5, 0.4],
[0.8, 0.1, 0.1]])
Args:
p: batch of probability vector.
Returns:
Sampled indices
"""
r = np.expand_dims(np.random.rand(p.shape[0]), axis=1)
return (p.cumsum(axis=1) > r).argmax(axis=1)
trajectory = [[] for i in range(num_episodes)]
done = np.zeros(num_episodes, dtype=np.bool)
state = np.array([cmdp.initial_state] * num_episodes)
for t in range(max_timesteps):
action = random_choice_prob_vectorized(p=pi[state, :])
reward = cmdp.reward[state, action]
costs = cmdp.costs[:, state, action]
state1 = random_choice_prob_vectorized(p=cmdp.transition[state, action, :])
for i in range(num_episodes):
if not done[i]:
trajectory[i].append(
(i, t, state[i], action[i], reward[i], costs[:, i], state1[i]))
done = done | (state == cmdp.absorbing_state)
state = state1
return trajectory
def compute_mle_cmdp(num_states: int,
num_actions: int,
num_costs: int,
reward: np.ndarray,
costs: np.ndarray,
cost_thresholds: np.ndarray,
gamma: float,
trajectory,
absorb_unseen: bool = True) -> Tuple[CMDP, np.ndarray]:
"""Construct a maximum-likelihood estimation CMDP from the trajectories.
Args:
num_states: the number of states.
num_actions: the number of actions.
num_costs: the number of costs.
reward: reward function.
costs: cost function.
cost_thresholds: cost thresholds.
gamma: discount factor (0~1).
trajectory: trajectories collected by a behavior policy.
list of list of (episode_idx, t, s_t, a_t, r_t, c_t, s_t').
absorb_unseen: for unvisited s, whether to use transition to absorbing
state. If False, uniform transition is used.
Returns:
(MLE CMDP, visitation count matrix)
"""
absorbing_state = num_states - 1
n = np.zeros((num_states, num_actions, num_states))
for trajectory_one in trajectory:
# episode, t, s, a, r, c, s1
for _, _, s, a, _, _, s1 in trajectory_one:
n[s, a, s1] += 1
transition = np.zeros((num_states, num_actions, num_states))
for s in range(num_states):
for a in range(num_actions):
if n[s, a, :].sum() == 0:
if absorb_unseen:
transition[s, a, absorbing_state] = 1 # absorbing state
else:
transition[s, a, :] = 1. / num_states
else:
transition[s, a, :] = n[s, a, :] / n[s, a, :].sum()
mle_cmdp = CMDP(num_states, num_actions, num_costs, transition, reward, costs,
cost_thresholds, gamma)
return mle_cmdp, n
def solve_cmdp(cmdp: CMDP):
"""Solve CMDP via linear programming.
Args:
cmdp: a CMDP instance.
Returns:
optimal policy.
"""
c = -cmdp.reward.reshape(cmdp.num_states * cmdp.num_actions)
p0 = np.zeros(cmdp.num_states)
p0[cmdp.initial_state] = 1
p = cmdp.transition.reshape(cmdp.num_states * cmdp.num_actions,
cmdp.num_states) # |S||A| x |S|
p = p / np.sum(p, axis=1, keepdims=True)
b = np.repeat(
np.eye(cmdp.num_states), cmdp.num_actions, axis=0) # |S||A| x |S|
a_eq = (b - cmdp.gamma * p).T
b_eq = (1 - cmdp.gamma) * p0
a_ub = cmdp.costs.reshape(cmdp.num_costs, cmdp.num_states * cmdp.num_actions)
b_ub = cmdp.cost_thresholds
# Minimize::
# c @ x
# Subject to::
# A_ub @ x <= b_ub
# A_eq @ x == b_eq
# lb <= x <= ub
# where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.
res = scipy.optimize.linprog(
c,
A_ub=a_ub,
b_ub=b_ub,
A_eq=a_eq,
b_eq=b_eq,
bounds=(0, np.inf),
options={
'maxiter': 10000,
'tol': 1e-8
})
assert np.all(res.x >= -1e-4)
d = np.clip(res.x.reshape(cmdp.num_states, cmdp.num_actions), 1e-10, np.inf)
pi = d / np.sum(d, axis=1, keepdims=True)
return pi
| constrained_optidice-main | tabular/mdp_util.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of tabular offline (C)MDP methods."""
import copy
import time
from absl import logging
import cvxopt
import jax
import jax.config
import jax.numpy as jnp
import numpy as np
import scipy
import scipy.optimize
from constrained_optidice.tabular import mdp_util as util
cvxopt.solvers.options['show_progress'] = False
jax.config.update('jax_enable_x64', True)
def _compute_marginal_distribution(mdp, pi, regularizer=0):
"""Compute marginal distribution for the given policy pi, d^pi(s,a)."""
p0_s = np.zeros(mdp.num_states)
p0_s[mdp.initial_state] = 1
p0 = (p0_s[:, None] * pi).reshape(mdp.num_states * mdp.num_actions)
p_pi = (mdp.transition.reshape(mdp.num_states * mdp.num_actions,
mdp.num_states)[:, :, None] * pi).reshape(
mdp.num_states * mdp.num_actions,
mdp.num_states * mdp.num_actions)
d = np.ones(mdp.num_states * mdp.num_actions)
d /= np.sum(d)
d_diag = np.diag(d)
e = np.sqrt(d_diag) @ (
np.eye(mdp.num_states * mdp.num_actions) - mdp.gamma * p_pi)
q = np.linalg.solve(
e.T @ e + regularizer * np.eye(mdp.num_states * mdp.num_actions),
(1 - mdp.gamma) * p0)
w = q - mdp.gamma * p_pi @ q
assert np.all(w > -1e-6), w
d_pi = w * d
d_pi[w < 0] = 0
d_pi /= np.sum(d_pi)
return d_pi.reshape(mdp.num_states, mdp.num_actions)
def generate_baseline_policy(cmdp: util.CMDP,
behavior_cost_thresholds: np.ndarray,
optimality: float) -> np.ndarray:
"""Generate a baseline policy for the CMDP.
Args:
cmdp: a CMDP instance.
behavior_cost_thresholds: cost threshold for behavior policy. [num_costs]
optimality: optimality of behavior policy.
(0: uniform policy, 1: optimal policy)
Returns:
behavior policy. [num_states, num_actions]
"""
cmdp = copy.copy(cmdp)
cmdp.cost_thresholds = behavior_cost_thresholds
cmdp_no_reward = copy.copy(cmdp)
cmdp_no_reward.reward *= 0
pi_opt = util.solve_cmdp(cmdp)
pi_unif = np.ones((cmdp.num_states, cmdp.num_actions)) / cmdp.num_actions
v_opt = util.policy_evaluation(cmdp, pi_opt)[0][0]
q_opt = util.policy_evaluation(cmdp, pi_opt)[1]
v_unif = util.policy_evaluation(cmdp, pi_unif)[0][0]
v_final_target = v_opt * optimality + (1 - optimality) * v_unif
softmax_reduction_factor = 0.9
temperature = 1e-6
pi_soft = scipy.special.softmax(q_opt / temperature, axis=1)
while util.policy_evaluation(cmdp, pi_soft)[0][0] > v_final_target:
temperature /= softmax_reduction_factor
pi_soft = scipy.special.softmax(q_opt / temperature, axis=1)
pi_soft /= np.sum(pi_soft, axis=1, keepdims=True)
pi_soft = constrained_optidice(cmdp_no_reward, pi_soft, alpha=1)
r, _, c, _ = util.policy_evaluation(cmdp, pi_soft)
logging.info('temp=%.6f, R=%.3f, C=%.3f / v_opt=%.3f, f_target=%.3f',
temperature, r[0], c[0][0], v_opt, v_final_target)
assert np.all(pi_soft >= -1e-4)
pi_b = pi_soft.copy()
return pi_b
def optidice(mdp: util.MDP, pi_b: np.ndarray, alpha: float):
"""f-divergence regularized RL.
max_{d} E_d[R(s,a)] - alpha * E_{d_b}[f(d(s,a)/d_b(s,a))]
We assume that f(x) = 0.5 (x-1)^2.
Args:
mdp: a MDP instance.
pi_b: behavior policy. [num_states, num_actions]
alpha: regularization hyperparameter for f-divergence.
Returns:
the resulting policy. [num_states, num_actions]
"""
d_b = _compute_marginal_distribution(mdp, pi_b).reshape(
mdp.num_states * mdp.num_actions) + 1e-6 # |S||A|
d_b /= np.sum(d_b)
p0 = np.eye(mdp.num_states)[mdp.initial_state] # |S|
r = np.array(mdp.reward.reshape(mdp.num_states * mdp.num_actions))
p = np.array(
mdp.transition.reshape(mdp.num_states * mdp.num_actions, mdp.num_states))
p = p / np.sum(p, axis=1, keepdims=True)
b = np.repeat(np.eye(mdp.num_states), mdp.num_actions, axis=0) # |S||A| x |S|
# Solve:
# minimize (1/2)*x^T P x + q^T x
# subject to G x <= h
# A x = b.
d_diag = np.diag(d_b)
qp_p = alpha * (d_diag)
qp_q = -d_diag @ r - alpha * d_b
qp_g = -np.eye(mdp.num_states * mdp.num_actions)
qp_h = np.zeros(mdp.num_states * mdp.num_actions)
qp_a = (b.T - mdp.gamma * p.T) @ d_diag
qp_b = (1 - mdp.gamma) * p0
cvxopt.solvers.options['show_progress'] = False
res = cvxopt.solvers.qp(
cvxopt.matrix(qp_p), cvxopt.matrix(qp_q), cvxopt.matrix(qp_g),
cvxopt.matrix(qp_h), cvxopt.matrix(qp_a), cvxopt.matrix(qp_b))
w = np.array(res['x'])[:, 0] # [num_states * num_actions]
assert np.all(w >= -1e-4), w
w = np.clip(w, 1e-10, np.inf)
pi = (w * d_b).reshape(mdp.num_states, mdp.num_actions) + 1e-10
pi /= np.sum(pi, axis=1, keepdims=True)
return w, d_b, pi
def constrained_optidice(cmdp: util.CMDP,
pi_b: np.ndarray,
alpha: float):
"""f-divergence regularized constrained RL.
max_{d} E_d[R(s,a)] - alpha * E_{d_b}[f(d(s,a)/d_b(s,a))]
s.t. E_d[C(s,a)] <= hat{c}.
We assume that f(x) = 0.5 (x-1)^2.
Args:
cmdp: a CMDP instance.
pi_b: behavior policy.
alpha: regularization hyperparameter for f-divergence.
Returns:
the resulting policy. [num_states, num_actions]
"""
d_b = _compute_marginal_distribution(cmdp, pi_b).reshape(
cmdp.num_states * cmdp.num_actions) + 1e-6 # |S||A|
d_b /= np.sum(d_b)
p0 = np.eye(cmdp.num_states)[cmdp.initial_state] # |S|
p = np.array(
cmdp.transition.reshape(cmdp.num_states * cmdp.num_actions,
cmdp.num_states))
p = p / np.sum(p, axis=1, keepdims=True)
b = np.repeat(
np.eye(cmdp.num_states), cmdp.num_actions, axis=0) # |S||A| x |S|
r = np.array(cmdp.reward.reshape(cmdp.num_states * cmdp.num_actions))
c = np.array(
cmdp.costs.reshape(cmdp.num_costs, cmdp.num_states * cmdp.num_actions))
# Solve:
# minimize (1/2)*x^T P x + q^T x
# subject to G x <= h
# A x = b.
d_diag = np.diag(d_b)
qp_p = alpha * (d_diag)
qp_q = -d_diag @ r - alpha * d_b
qp_g = np.concatenate(
[c @ d_diag, -np.eye(cmdp.num_states * cmdp.num_actions)], axis=0)
qp_h = np.concatenate(
[cmdp.cost_thresholds,
np.zeros(cmdp.num_states * cmdp.num_actions)])
qp_a = (b.T - cmdp.gamma * p.T) @ d_diag
qp_b = (1 - cmdp.gamma) * p0
res = cvxopt.solvers.qp(
cvxopt.matrix(qp_p), cvxopt.matrix(qp_q), cvxopt.matrix(qp_g),
cvxopt.matrix(qp_h), cvxopt.matrix(qp_a), cvxopt.matrix(qp_b))
w = np.array(res['x'])[:, 0] # [num_states * num_actions]
assert np.all(w >= -1e-4), w
w = np.clip(w, 1e-10, np.inf)
pi = (w * d_b).reshape(cmdp.num_states, cmdp.num_actions) + 1e-10
pi /= np.sum(pi, axis=1, keepdims=True)
assert np.all(pi >= -1e-6), pi
return np.array(pi)
def cost_upper_bound(cmdp: util.CMDP,
w: np.ndarray,
d_b: np.ndarray,
epsilon: float):
"""Compute cost upper bound based on the DICE w.
Args:
cmdp: CMDP instance.
w: stationary distribution correction estimate of the target policy.
d_b: stationary distribution of the behavior policy.
epsilon: hyperparameter that controls conservatism. (epsilon > 0)
Returns:
(cost upper bound, additional information)
"""
if cmdp.num_costs != 1:
raise NotImplementedError('cmdp.num_costs=1 is supported only.')
s0 = cmdp.initial_state
w = w.reshape(cmdp.num_states, cmdp.num_actions)
p_n = d_b.reshape(cmdp.num_states,
cmdp.num_actions)[:, :, None] * cmdp.transition + 1e-10
p_n = p_n.reshape(cmdp.num_states * cmdp.num_actions * cmdp.num_states)
c = cmdp.costs[0, :, :] # |S| x |A|
def loss_fn(variables):
tau, x = variables[0], variables[1:]
l = (1 - cmdp.gamma) * x[s0] + w[:, :, None] * (
c[:, :, None] + cmdp.gamma * x[None, None, :] - x[:, None, None])
l = l.reshape(cmdp.num_states * cmdp.num_actions * cmdp.num_states)
loss = tau * jax.nn.logsumexp(jnp.log(p_n) + l / tau) + tau * epsilon
return loss
loss_jit = jax.jit(loss_fn)
grad_jit = jax.jit(jax.grad(loss_fn))
f = lambda x: np.array(loss_jit(x))
jac = lambda x: np.array(grad_jit(x))
# Minimize loss_fn.
x0 = np.ones(cmdp.num_states + 1)
lb, ub = -np.ones_like(x0) * np.inf, np.ones_like(x0) * np.inf
lb[0] = 0 # tau >= 0
bounds = scipy.optimize.Bounds(lb, ub, keep_feasible=False)
solution = scipy.optimize.minimize(
f,
x0=x0,
jac=jac,
bounds=bounds,
options={
'maxiter': 10000,
'ftol': 1e-10,
'gtol': 1e-10,
})
# Additional information.
tau, x = solution.x[0], solution.x[1:]
l = (1 - cmdp.gamma) * x[s0] + w[:, :, None] * (
c[:, :, None] + cmdp.gamma * x[None, None, :] - x[:, None, None])
l = l.reshape(cmdp.num_states * cmdp.num_actions * cmdp.num_states)
loss = tau * scipy.special.logsumexp(np.log(p_n) + l / tau) + tau * epsilon
p = scipy.special.softmax(np.log(p_n) + (l / tau)) + 1e-10
kl = np.sum(p * np.log(p / p_n))
p_sa = np.sum(
p.reshape(cmdp.num_states, cmdp.num_actions, cmdp.num_states), axis=-1)
cost_ub = np.sum(p_sa * w * c)
info = {
'loss': loss,
'kl': kl,
'cost_ub': cost_ub,
'p': p,
'gap': loss - cost_ub
}
return np.array([loss]), info
def conservative_constrained_optidice(cmdp, pi_b, alpha, epsilon, verbose=0):
"""f-divergence regularized conservative constrained RL.
max_{d} E_d[R(s,a)] - alpha * E_{d_b}[f(d(s,a)/d_b(s,a))]
s.t. (cost upper bound) <= hat{c}.
We assume that f(x) = 0.5 (x-1)^2.
Args:
cmdp: a CMDP instance.
pi_b: behavior policy.
alpha: regularization hyperparameter for f-divergence.
epsilon: degree of conservatism. (0: cost upper bound = E_d[C(s,a)]).
verbose: whether using logging or not.
Returns:
the resulting policy. [num_states, num_actions]
"""
if cmdp.num_costs != 1:
raise NotImplementedError('cmdp.num_costs=1 is supported only.')
lamb_left = np.array([0.0])
lamb_right = np.array([10.0])
start_time = time.time()
for i in range(15):
lamb = (lamb_left + lamb_right) * 0.5
r_lamb = cmdp.reward - np.sum(lamb[:, None, None] * cmdp.costs, axis=0)
mdp = util.MDP(cmdp.num_states, cmdp.num_actions, cmdp.transition, r_lamb,
cmdp.gamma)
w, d_b, _ = optidice(mdp, pi_b, alpha)
cost_mean = cmdp.costs.reshape(cmdp.num_costs, cmdp.num_states *
cmdp.num_actions).dot(w * d_b)
cost_ub, info = cost_upper_bound(cmdp, w, d_b, epsilon)
if verbose:
logging.info(
'[%g] Lamb=%g, cost_ub=%.6g, gap=%.6g, kl=%.6g, cost_mean=%.6g / '
'elapsed_time=%.3g', i, lamb, cost_ub, info['gap'], info['kl'],
cost_mean,
time.time() - start_time)
if cost_ub[0] > cmdp.cost_thresholds[0]:
lamb_left = lamb
else:
lamb_right = lamb
lamb = lamb_right
r_lamb = cmdp.reward - np.sum(lamb[:, None, None] * cmdp.costs, axis=0)
mdp = util.MDP(cmdp.num_states, cmdp.num_actions, cmdp.transition, r_lamb,
cmdp.gamma)
w, d_b, pi = optidice(mdp, pi_b, alpha)
return pi
| constrained_optidice-main | tabular/offline_cmdp.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main experiment script."""
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
from constrained_optidice.tabular import mdp_util
from constrained_optidice.tabular import offline_cmdp
flags.DEFINE_float('cost_thresholds', 0.1,
'The cost constraint threshold of the true CMDP.')
flags.DEFINE_float(
'behavior_optimality', 0.9,
'The optimality of data-collecting policy in terms of reward. '
'(0: performance of uniform policy. 1: performance of optimal policy).')
flags.DEFINE_float('behavior_cost_thresholds', 0.1,
'Set the cost value of data-collecting policy.')
flags.DEFINE_integer('num_iterations', 10,
'The number of iterations for the repeated experiments.')
FLAGS = flags.FLAGS
def main(unused_argv):
"""Main function."""
num_states, num_actions, num_costs, gamma = 50, 4, 1, 0.95
cost_thresholds = np.ones(num_costs) * FLAGS.cost_thresholds
behavior_optimality = FLAGS.behavior_optimality
behavior_cost_thresholds = np.array([FLAGS.behavior_cost_thresholds])
logging.info('==============================')
logging.info('Cost threshold: %g', cost_thresholds)
logging.info('Behavior optimality: %g', behavior_optimality)
logging.info('Behavior cost thresholds: %g', behavior_cost_thresholds)
logging.info('==============================')
results = []
start_time = time.time()
for seed in range(FLAGS.num_iterations):
# Construct a random CMDP
np.random.seed(seed)
cmdp = mdp_util.generate_random_cmdp(num_states, num_actions, num_costs,
cost_thresholds, gamma)
result = {}
# Optimal policy for unconstrained MDP
pi_uopt, _, _ = mdp_util.solve_mdp(cmdp)
v_r, _, v_c, _ = mdp_util.policy_evaluation(cmdp, pi_uopt)
uopt_r, uopt_c = v_r[0], v_c[0][0]
result.update({'uopt_r': uopt_r, 'uopt_c': uopt_c})
# Optimal policy for constrained MDP
pi_copt = mdp_util.solve_cmdp(cmdp)
v_r, _, v_c, _ = mdp_util.policy_evaluation(cmdp, pi_copt)
opt_r, opt_c = v_r[0], v_c[0][0]
result.update({'opt_r': opt_r, 'opt_c': opt_c})
# Construct behavior policy
pi_b = offline_cmdp.generate_baseline_policy(
cmdp,
behavior_cost_thresholds=behavior_cost_thresholds,
optimality=behavior_optimality)
v_r, _, v_c, _ = mdp_util.policy_evaluation(cmdp, pi_b)
pib_r, pib_c = v_r[0], v_c[0][0]
result.update({'behav_r': pib_r, 'behav_c': pib_c})
for num_trajectories in [10, 20, 50, 100, 200, 500, 1000, 2000]:
logging.info('==========================')
logging.info('* seed=%d, num_trajectories=%d', seed, num_trajectories)
alpha = 1. / num_trajectories # Parameter for CCIDCE, CDICE.
# Generate trajectory
trajectory = mdp_util.generate_trajectory(
seed, cmdp, pi_b, num_episodes=num_trajectories)
# MLE CMDP
mle_cmdp, _ = mdp_util.compute_mle_cmdp(num_states, num_actions,
num_costs, cmdp.reward,
cmdp.costs, cost_thresholds,
gamma, trajectory)
# Basic RL
pi = mdp_util.solve_cmdp(mle_cmdp)
v_r, _, v_c, _ = mdp_util.policy_evaluation(cmdp, pi)
basic_r = v_r[0]
basic_c = v_c[0][0]
result.update({'basic_r': basic_r, 'basic_c': basic_c})
# Vanilla ConstrainedOptiDICE
pi = offline_cmdp.constrained_optidice(mle_cmdp, pi_b, alpha)
v_r, _, v_c, _ = mdp_util.policy_evaluation(cmdp, pi)
cdice_r = v_r[0]
cdice_c = v_c[0][0]
result.update({'cdice_r': cdice_r, 'cdice_c': cdice_c})
# Conservative ConstrainedOptiDICE
epsilon = 0.1 / num_trajectories
pi = offline_cmdp.conservative_constrained_optidice(
mle_cmdp, pi_b, alpha=alpha, epsilon=epsilon)
v_r, _, v_c, _ = mdp_util.policy_evaluation(cmdp, pi)
ccdice_r = v_r[0]
ccdice_c = v_c[0][0]
result.update({'ccdice_r': ccdice_r, 'ccdice_c': ccdice_c})
# Print the result
elapsed_time = time.time() - start_time
result.update({
'seed': seed,
'num_trajectories': num_trajectories,
'elapsed_time': elapsed_time,
})
logging.info(result)
results.append(result)
if __name__ == '__main__':
app.run(main)
| constrained_optidice-main | tabular/run_random_cmdp_experiment.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple agent-environment interaction loop."""
import operator
import time
from typing import Optional, Callable
from acme import core
from acme.utils import counting
from acme.utils import loggers
import dm_env
from dm_env import specs
import numpy as np
import tree
class CostEnvironmentLoop(core.Worker):
"""A simple RL environment loop.
This tracks cost return as well as reward return.
This takes `Environment` and `Actor` instances and coordinates their
interaction. Agent is updated if `should_update=True`. This can be used as:
loop = EnvironmentLoop(environment, actor)
loop.run(num_episodes)
A `Counter` instance can optionally be given in order to maintain counts
between different Acme components. If not given a local Counter will be
created to maintain counts between calls to the `run` method.
A `Logger` instance can also be passed in order to control the output of the
loop. If not given a platform-specific default logger will be used as defined
by utils.loggers.make_default_logger. A string `label` can be passed to easily
change the label associated with the default logger; this is ignored if a
`Logger` instance is given.
"""
def __init__(
self,
environment: dm_env.Environment,
actor: core.Actor,
gamma: float,
num_costs: int,
cost_fn: Callable[[np.ndarray, np.ndarray], np.ndarray],
counter: Optional[counting.Counter] = None,
logger: Optional[loggers.Logger] = None,
should_update: bool = True,
label: str = 'environment_loop',
):
self._environment = environment
self._actor = actor
self._gamma = gamma
self._num_costs = num_costs
self._cost_fn = cost_fn
if counter is None:
counter = counting.Counter()
if logger is None:
logger = loggers.make_default_logger(label)
self._counter = counter
self._logger = logger
self._should_update = should_update
def run_episode(self) -> loggers.LoggingData:
"""Run one episode.
Each episode is a loop which interacts first with the environment to get an
observation and then give that observation to the agent in order to retrieve
an action.
Returns:
An instance of `loggers.LoggingData` containing episode stats.
"""
# Reset any counts and start the environment.
start_time = time.time()
episode_steps = 0
# For evaluation, this keeps track of the total undiscounted reward
# accumulated during the episode.
episode_return = tree.map_structure(_generate_zeros_from_spec,
self._environment.reward_spec())
cost_spec = specs.Array(shape=(self._num_costs,), dtype=float, name='cost')
episode_cost_return = tree.map_structure(_generate_zeros_from_spec,
cost_spec)
gamma_sum = 0.0
timestep = self._environment.reset()
self._actor.observe_first(timestep)
# Run an episode.
while not timestep.last():
# Generate an action from the agent's policy and step the environment.
action = self._actor.select_action(timestep.observation)
timestep = self._environment.step(action)
# Compute a immediate cost for (obs, action).
# cost_fn is defined in terms of batch input/output
cost = self._cost_fn(
tree.map_structure(lambda x: np.array([x]), timestep.observation),
np.array([action]))[0]
# Have the agent observe the timestep and let the actor update itself.
self._actor.observe(action, next_timestep=timestep)
if self._should_update:
self._actor.update()
# Equivalent to: episode_return += timestep.reward
# We capture the return value because if timestep.reward is a JAX
# DeviceArray, episode_return will not be mutated in-place. (In all other
# cases, the returned episode_return will be the same object as the
# argument episode_return.)
episode_return = tree.map_structure(operator.iadd, episode_return,
timestep.reward)
episode_cost_return = tree.map_structure(operator.iadd,
episode_cost_return, cost)
gamma_sum += self._gamma**episode_steps
episode_steps += 1
counts = self._counter.increment(episodes=1, steps=episode_steps)
# Collect the results and combine with counts.
steps_per_second = episode_steps / (time.time() - start_time)
result = {
'episode_length': episode_steps,
'episode_return': episode_return,
'steps_per_second': steps_per_second,
}
for k, cost in enumerate(episode_cost_return):
result[f'episode_cost_return_{k}'] = cost
result[f'episode_average_cost_{k}'] = cost / episode_steps
result.update(counts)
return result
def run(self,
*,
num_episodes: Optional[int] = None,
num_steps: Optional[int] = None):
"""Perform the run loop.
Run the environment loop either for `num_episodes` episodes or for at
least `num_steps` steps (the last episode is always run until completion,
so the total number of steps may be slightly more than `num_steps`).
At least one of these two arguments has to be None.
Upon termination of an episode a new episode will be started. If both
num_episodes and num_steps arguments are provided, the first criterion met
between the two will terminate the run loop.
Args:
num_episodes: number of episodes to run the loop for.
num_steps: minimal number of steps to run the loop for.
Raises:
ValueError: If both 'num_episodes' and 'num_steps' are not None.
"""
if not (num_episodes is None or num_steps is None):
raise ValueError('Either "num_episodes" or "num_steps" should be None.')
def should_terminate(episode_count: int, step_count: int) -> bool:
return (num_episodes is not None and
episode_count >= num_episodes) or (num_steps is not None and
step_count >= num_steps)
episode_count = 0
step_count = 0
while not should_terminate(episode_count, step_count):
result = self.run_episode()
episode_count += 1
step_count += result['episode_length']
self._logger.write(result)
def _generate_zeros_from_spec(spec: specs.Array) -> np.ndarray:
return np.zeros(spec.shape, spec.dtype)
| constrained_optidice-main | neural/cost_environment_loop.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RWRL environment builder."""
from typing import Optional
from acme import wrappers
from dm_control.rl import control
import dm_env
import numpy as np
import realworldrl_suite.environments as rwrl
# The hardest constraint for a specific task.
PER_DOMAIN_HARDEST_CONSTRAINT = {
'cartpole': 'slider_pos_constraint',
'humanoid': 'joint_angle_constraint',
'quadruped': 'joint_angle_constraint',
'walker': 'joint_velocity_constraint'}
def get_hardest_constraints_index(domain: str, env: control.Environment):
try:
constraint_name = PER_DOMAIN_HARDEST_CONSTRAINT[domain]
return list(env._task.constraints).index(constraint_name) # pylint: disable=protected-access
except ValueError as err:
raise ValueError('Invalid domain or domain unsupported') from err
class AddPredictionHeadsWrapper(wrappers.EnvironmentWrapper):
@property
def prediction_head_names(self):
# The first prediction head should be 'reward'
return ['reward', 'penalties']
class ConstraintsConverter(wrappers.EnvironmentWrapper):
"""Converts (bool) binary constraints to float penalties.
This wrapper:
- Extracts binary constraints from timestep.observation[from_key].
- Flips them (negates them) if requested.
- Keeps just a single constraint by index, if requested.
- Converts them to floats, yielding penalties.
- Stores the penalties in timestep.observation[to_key].
"""
def __init__(self,
environment: dm_env.Environment,
from_key: str = 'constraints',
flip: bool = True,
keep_only_at_index: Optional[int] = None,
to_key: str = 'penalties'):
"""Wrapper initializer.
Args:
environment (dm_env.Environment): Environment to wrap.
from_key (str, optional): Name of constraint in timestep.observation
which will be mapped into timestep.observation[to_key]
flip (bool, optional): Whether to negate observation[from_key]
keep_only_at_index (Optional[int], optional): Which individual
constraint to select from observation[from_key]
to_key (str, optional): Name of the key in timestep.observation where
the updated constraint will be saved into.
"""
super().__init__(environment)
self._from_key = from_key
self._keep_index = keep_only_at_index
self._flip = flip
self._to_key = to_key
def step(self, action) -> dm_env.TimeStep:
return self._convert_timestep(self._environment.step(action))
def reset(self) -> dm_env.TimeStep:
return self._convert_timestep(self._environment.reset())
def _convert_timestep(self, timestep: dm_env.TimeStep) -> dm_env.TimeStep:
"""Pops, selects, flips, casts and stores updated constraints."""
# Extract binary constraints
binary_constraints = timestep.observation.pop(self._from_key, None)
# Keep one constraint
if self._keep_index is not None:
i = self._keep_index
binary_constraints = binary_constraints[i:i + 1] # slice => 1-elem. array
# Flip semantics (useful if they were is-satisfied style constraints)
if self._flip:
# I.e., to obtain: (0.0 == no penalty and 1.0 = penalty)
binary_constraints = np.logical_not(binary_constraints)
# Convert to penalties as floats
timestep.observation[self._to_key] = binary_constraints.astype(np.float64)
return timestep
def observation_spec(self):
"""Alters the observation spec accordingly."""
observation_spec = self._environment.observation_spec()
# Convert binary constraints spec to a penalty spec
# i.e. convert dtype from bool to float64
constraints_spec = observation_spec.pop(self._from_key, None)
updated_spec = constraints_spec.replace(dtype=np.float64)
# Change spec to 1-element array if only one constraint is selected
if self._keep_index is not None:
updated_spec = updated_spec.replace(shape=(1,))
observation_spec[self._to_key] = updated_spec
return observation_spec
def make_environment(domain_name: str, task_name: str, safety_coeff: float):
"""Make RWRL environment with safety_spec."""
safety_spec_dict = {
'enable': True,
'binary': True,
'observations': True,
'safety_coeff': safety_coeff
}
environment = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec=safety_spec_dict,
environment_kwargs={'log_safety_vars': False, 'flat_observation': False})
environment = ConstraintsConverter(
environment,
from_key='constraints',
flip=True,
keep_only_at_index=get_hardest_constraints_index(
domain_name, environment),
to_key='penalties')
environment = AddPredictionHeadsWrapper(environment)
environment = wrappers.SinglePrecisionWrapper(environment)
return environment
| constrained_optidice-main | neural/rwrl.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data utility functions."""
import numpy as np
import reverb
import tree
def _unflatten(flat_data):
"""Converts a flat dict of numpy arrays to the batch tuple structure."""
o_tm1 = {
'penalties': flat_data['penalties_tm1'],
'position': flat_data['position_tm1'],
'velocity': flat_data['velocity_tm1'],
}
a_tm1 = flat_data['action_tm1']
r_t = flat_data['reward_t']
d_t = flat_data['discount_t']
o_t = {
'penalties': flat_data['penalties_t'],
'position': flat_data['position_t'],
'velocity': flat_data['velocity_t'],
}
return (o_tm1, a_tm1, r_t, d_t, o_t)
def _gen_batch_iterator(path, batch_size):
with np.load(path) as flat_data:
data = _unflatten(flat_data)
unused_o_tm1, unused_a_tm1, r_t, unused_d_t, unused_o_t = data
num_samples = len(r_t)
while True:
indices = np.random.randint(0, num_samples, (batch_size,))
yield tree.map_structure(lambda x: x[indices], data)
def create_data_iterators(data_path, init_obs_data_path, batch_size):
"""Create data iterator used for training."""
def gen_data_iterator():
"""Iterator for transition samples (o_tm1, a_tm1, r_t, d_t, o_t)."""
for batch_data in _gen_batch_iterator(data_path, batch_size):
batch_info = reverb.SampleInfo(
key=0, probability=1., table_size=1, priority=1.)
yield reverb.ReplaySample(info=batch_info, data=batch_data)
def gen_initial_obs_iterator():
"""Iterator for initial observation samples."""
for batch_data in _gen_batch_iterator(init_obs_data_path, batch_size):
yield batch_data[0] # 0: o_tm1
return gen_data_iterator(), gen_initial_obs_iterator()
| constrained_optidice-main | neural/data_util.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Offline constrained RL agent."""
import time
from typing import Any, Callable, Dict, Iterator, Tuple
from acme import core
from acme.agents.jax import actor_core as actor_core_lib
from acme.agents.jax import actors
from acme.jax import savers
from acme.jax import variable_utils
from acme.utils import counting
from acme.utils import loggers
from acme.utils import lp_utils
import dm_env
import jax
import launchpad as lp
import numpy as np
import reverb
import tree
from constrained_optidice.neural import cost
from constrained_optidice.neural import cost_environment_loop
from constrained_optidice.neural import learning
from constrained_optidice.neural.networks import CDICENetworks
NestedArraySpec = tree.Structure[dm_env.specs.Array]
class DistributedCDICE:
"""Program definition for COptiDICE."""
def __init__(
self,
environment_factory: Callable[[], dm_env.Environment],
dataset_iterator_factory: Callable[[],
Tuple[Iterator[reverb.ReplaySample],
Iterator[Dict[str,
np.ndarray]]]],
task_name: str,
make_networks: Callable[[NestedArraySpec, NestedArraySpec, int],
CDICENetworks],
seed: int,
agent_params: Dict[str, Any],
num_evaluators: int = 1,
):
# Define cost function per each task.
self._cost_fn, self._num_costs = cost.domain_cost_fn(task_name)
self._environment_factory = environment_factory
self._dataset_iterator_factory = dataset_iterator_factory
self._key = jax.random.PRNGKey(seed)
self._learner_key, self._evaluator_key, self._video_recorder_key, self._key = jax.random.split(
self._key, 4)
self._task_name = task_name
self._make_networks = make_networks
self._num_evaluators = num_evaluators
self._agent_params = agent_params
environment = self._environment_factory()
self._action_spec = environment.action_spec()
self._obs_spec = environment.observation_spec()
def counter(self):
"""The counter process."""
return savers.CheckpointingRunner(
counting.Counter(), subdirectory='counter', time_delta_minutes=5)
def learner(self, counter: counting.Counter):
"""The learning process."""
transition_dataset, init_obs_dataset = self._dataset_iterator_factory()
# Make networks
networks = self._make_networks(self._obs_spec, self._action_spec,
self._num_costs)
logger = loggers.make_default_logger('learner', time_delta=5.0)
# Record steps with learner prefix.
counter = counting.Counter(counter, prefix='learner')
return learning.CDICELearner(
key=self._learner_key,
networks=networks,
transition_dataset=transition_dataset,
init_obs_dataset=init_obs_dataset,
agent_params=self._agent_params,
num_costs=self._num_costs,
cost_fn=self._cost_fn,
logger=logger,
counter=counter)
def evaluator(self,
variable_source: core.VariableSource,
counter: counting.Counter):
"""The evaluation process."""
gamma = self._agent_params['gamma']
environment = self._environment_factory()
networks = self._make_networks(self._obs_spec, self._action_spec,
self._num_costs)
def actor_network(variables, key, obs):
unused_params, policy_params = variables
action = networks.behavior.apply(policy_params, key, obs)
return action
# Inference happens on CPU, so it's better to move variables there too.
variable_client = variable_utils.VariableClient(
variable_source,
key=['params', 'policy_params'],
device='cpu',
update_period=1000)
# Make sure not to evaluate random actor right after preemption.
variable_client.update_and_wait()
# Create the actor loading the weights from variable source.
# (Actor network (params, key, obs) -> action)
actor_core = actor_core_lib.batched_feed_forward_to_actor_core(
actor_network)
actor = actors.GenericActor(
actor_core, self._evaluator_key, variable_client, backend='cpu')
# Wait until the learner starts to learn.
while 'learner_steps' not in counter.get_counts():
time.sleep(1)
# Create the run loop and return it.
logger = loggers.make_default_logger('evaluator', time_delta=5.0)
return cost_environment_loop.CostEnvironmentLoop(environment, actor, gamma,
self._num_costs,
self._cost_fn, counter,
logger)
def coordinator(self,
counter: counting.Counter,
max_steps: int,
steps_key: str = 'learner_steps'):
return lp_utils.StepsLimiter(counter, max_steps, steps_key=steps_key)
def build(self, name: str = 'cdice'):
"""Build the distributed agent topology."""
program = lp.Program(name=name)
with program.group('counter'):
counter = program.add_node(lp.CourierNode(self.counter))
if self._agent_params['max_learner_steps']:
_ = program.add_node(
lp.CourierNode(self.coordinator, counter,
self._agent_params['max_learner_steps'],
'learner_steps'))
with program.group('learner'):
learner = program.add_node(lp.CourierNode(self.learner, counter))
with program.group('evaluator'):
for _ in range(self._num_evaluators):
program.add_node(lp.CourierNode(self.evaluator, learner, counter))
return program
| constrained_optidice-main | neural/agent_distributed.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Network architectures."""
from typing import Callable, Optional
from acme import specs
from acme.jax import networks as acme_networks
from acme.jax import utils as acme_utils
import haiku as hk
import jax.numpy as jnp
import numpy as np
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
uniform_initializer = hk.initializers.VarianceScaling(
mode='fan_out', scale=1. / 3.)
class ResidualLayerNormWrapper(hk.Module):
"""Wrapper that applies residual connections and layer norm."""
def __init__(self, layer: Callable[[jnp.ndarray], jnp.ndarray]):
"""Creates the Wrapper Class.
Args:
layer: module to wrap.
"""
super().__init__(name='ResidualLayerNormWrapper')
self._layer = layer
self._layer_norm = hk.LayerNorm(
axis=-1, create_scale=True, create_offset=True)
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Returns the result of the residual and layernorm computation.
Args:
inputs: inputs to the main module.
"""
# Apply main module.
outputs = self._layer(inputs)
outputs = self._layer_norm(outputs + inputs)
return outputs
class LayerNormAndResidualMLP(hk.Module):
"""MLP with residual connections and layer norm."""
def __init__(self, hidden_size: int, num_blocks: int):
"""Create the model.
Args:
hidden_size: width of each hidden layer.
num_blocks: number of blocks, each block being MLP([hidden_size,
hidden_size]) + layer norm + residual connection.
"""
super().__init__(name='LayerNormAndResidualMLP')
# Create initial MLP layer.
layers = [hk.nets.MLP([hidden_size], w_init=uniform_initializer)]
# Follow it up with num_blocks MLPs with layernorm and residual connections.
for _ in range(num_blocks):
mlp = hk.nets.MLP([hidden_size, hidden_size], w_init=uniform_initializer)
layers.append(ResidualLayerNormWrapper(mlp))
self._network = hk.Sequential(layers)
def __call__(self, inputs: jnp.ndarray):
return self._network(inputs)
class UnivariateGaussianMixture(acme_networks.GaussianMixture):
"""Head which outputs a Mixture of Gaussians Distribution."""
def __init__(self,
num_dimensions: int,
num_components: int = 5,
init_scale: Optional[float] = None):
"""Create an mixture of Gaussian actor head.
Args:
num_dimensions: dimensionality of the output distribution. Each dimension
is going to be an independent 1d GMM model.
num_components: number of mixture components.
init_scale: the initial scale for the Gaussian mixture components.
"""
super().__init__(num_dimensions=num_dimensions,
num_components=num_components,
multivariate=False,
init_scale=init_scale,
name='UnivariateGaussianMixture')
class StochasticSamplingHead(hk.Module):
"""Simple haiku module to sample from a tfd.Distribution."""
def __call__(self, sample_key: acme_networks.PRNGKey,
distribution: tfd.Distribution):
return distribution.sample(seed=sample_key)
def make_mix_gaussian_feedforward_networks(action_spec: specs.BoundedArray,
num_costs: int):
"""Makes feedforward networks with mix gaussian actor head."""
action_dim = np.prod(action_spec.shape, dtype=int)
hidden_size = 1024
nu_network = hk.Sequential([
acme_utils.batch_concat,
acme_networks.LayerNormMLP(layer_sizes=[512, 512, 256, 1]),
])
chi_network = hk.Sequential([
acme_utils.batch_concat,
acme_networks.LayerNormMLP(layer_sizes=[512, 512, 256, num_costs]),
])
actor_encoder = hk.Sequential([
acme_utils.batch_concat,
hk.Linear(300, w_init=uniform_initializer),
hk.LayerNorm(slice(1, None), True, True),
jnp.tanh,
])
actor_neck = LayerNormAndResidualMLP(hidden_size, num_blocks=4)
actor_head = UnivariateGaussianMixture(
num_components=5, num_dimensions=action_dim)
stochastic_policy_network = hk.Sequential(
[actor_encoder, actor_neck, actor_head])
class LowNoisePolicyNetwork(hk.Module):
def __call__(self, inputs):
x = actor_encoder(inputs)
x = actor_neck(x)
x = actor_head(x, low_noise_policy=True)
return x
low_noise_policy_network = LowNoisePolicyNetwork()
# Behavior networks output an action while the policy outputs a distribution.
stochastic_sampling_head = StochasticSamplingHead()
class BehaviorNetwork(hk.Module):
def __call__(self, sample_key, inputs):
dist = low_noise_policy_network(inputs)
return stochastic_sampling_head(sample_key, dist)
behavior_network = BehaviorNetwork()
return {
'nu': nu_network,
'chi': chi_network,
'policy': stochastic_policy_network,
'low_noise_policy': low_noise_policy_network,
'behavior': behavior_network,
}
| constrained_optidice-main | neural/net_templates.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CDICE network definition."""
import dataclasses
from acme.jax import networks as acme_networks
@dataclasses.dataclass
class CDICENetworks:
"""Network and pure functions for the neural CDICE agent."""
forward: acme_networks.FeedForwardNetwork
policy: acme_networks.FeedForwardNetwork
behavior: acme_networks.FeedForwardNetwork
| constrained_optidice-main | neural/networks.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CoptiDICE learner implementation."""
import functools
import time
from typing import Any, Dict, List, Optional, NamedTuple
from absl import logging
import acme
from acme.jax import networks as networks_lib
from acme.jax import utils as acme_utils
from acme.utils import counting
from acme.utils import loggers
import jax
import jax.numpy as jnp
import optax
import tensorflow as tf
import tree
from constrained_optidice.neural.networks import CDICENetworks
stop_gradient = jax.lax.stop_gradient
class TrainingState(NamedTuple):
"""Contains training state for the learner."""
optimizer_state: optax.OptState # for (nu, lamb, chi, tau) params
policy_optimizer_state: optax.OptState
params: networks_lib.Params # (nu, lamb, chi, tau)
target_params: networks_lib.Params # target network of (params)
policy_params: networks_lib.Params
target_policy_params: networks_lib.Params
key: networks_lib.PRNGKey
steps: int
def conditional_update(new_tensors, old_tensors, is_time):
"""Checks whether to update the params and returns the correct params."""
return jax.tree_multimap(lambda new, old: jax.lax.select(is_time, new, old),
new_tensors, old_tensors)
def periodic_update(new_tensors, old_tensors, steps, update_period: int):
"""Periodically switch all elements from a nested struct with new elements."""
return conditional_update(
new_tensors, old_tensors, is_time=steps % update_period == 0)
def get_f_divergence_fn(f_type: str):
"""Returns a function that computes the provided f-divergence type."""
if f_type == 'chisquare':
def f_fn(x):
return 0.5 * (x - 1)**2
def f_prime_inv_fn(x):
return x + 1
elif f_type == 'softchi':
def f_fn(x):
return jnp.where(x < 1,
x * (jnp.log(x + 1e-10) - 1) + 1, 0.5 * (x - 1)**2)
def f_prime_inv_fn(x):
return jnp.where(x < 0, jnp.exp(jnp.minimum(x, 0)), x + 1)
elif f_type == 'kl':
def f_fn(x):
return x * jnp.log(x + 1e-10)
def f_prime_inv_fn(x):
return jnp.exp(x - 1)
else:
raise NotImplementedError('undefined f_fn', f_type)
return f_fn, f_prime_inv_fn
class CDICELearner(acme.Learner):
"""CDICE learner."""
_state: TrainingState
def __init__(self,
key: networks_lib.PRNGKey,
networks: CDICENetworks,
transition_dataset,
init_obs_dataset,
agent_params: Dict[str, Any],
num_costs: int,
cost_fn,
target_update_period: int = 1000,
clipping: bool = False,
logger: Optional[loggers.Logger] = None,
counter: Optional[counting.Counter] = None):
self._agent_params = agent_params
self._target_update_period = target_update_period
self._cost_fn = cost_fn
self._transition_iterator = transition_dataset
if isinstance(transition_dataset, tf.data.Dataset):
self._transition_iterator = iter(transition_dataset.as_numpy_iterator())
self._init_obs_iterator = init_obs_dataset
if isinstance(init_obs_dataset, tf.data.Dataset):
self._init_obs_iterator = iter(init_obs_dataset.as_numpy_iterator())
policy_extraction_mode = self._agent_params['policy_extraction_mode']
learning_rate = self._agent_params['learning_rate']
gamma = self._agent_params['gamma']
alpha = self._agent_params['alpha']
f_type = self._agent_params['f_type']
gradient_penalty: float = self._agent_params['gradient_penalty']
cost_ub_eps: float = self._agent_params['cost_ub_epsilon']
c_hat = jnp.ones(num_costs) * self._agent_params['cost_thresholds']
# The function definition for f-divergence.
f_fn, f_prime_inv_fn = get_f_divergence_fn(f_type)
optimizer = optax.adam(learning_rate)
policy_optimizer = optax.adam(learning_rate)
def _analytic_w(params, data):
"""Compute the closed-form solution of w."""
o_tm1, unused_a_tm1, r_t, c_t, d_t, o_t = data
f = networks.forward.apply(params, o_tm1)
f_next = networks.forward.apply(params, o_t)
e_nu_lamb = r_t - jnp.sum(c_t * stop_gradient(f['lamb']), axis=-1)
e_nu_lamb += gamma * d_t * f_next['nu'] - f['nu']
w_sa = jax.nn.relu(f_prime_inv_fn(e_nu_lamb / alpha))
return f, f_next, e_nu_lamb, w_sa
# Compute gradients with respect to the input
@functools.partial(jax.vmap, in_axes=(None, 0))
@functools.partial(jax.grad, argnums=1)
def nu_grad_input(params, obs):
"""Forward computation of nu for a single sample: obs -> ()."""
f = networks.forward.apply(params, acme_utils.add_batch_dim(obs))
return f['nu'][0]
@functools.partial(jax.vmap, in_axes=(None, 0))
@functools.partial(jax.jacobian, argnums=1)
def chi_grad_input(params, obs):
"""Forward computation of nu for a single sample: obs -> ()."""
f = networks.forward.apply(params, acme_utils.add_batch_dim(obs))
return f['chi'][0]
def _compute_obs_mix(obs1, obs2, eps):
"""Compute eps * obs1 + (1 - eps) * obs2."""
e = tree.map_structure(lambda x, eps=eps: eps, obs1)
return tree.map_structure(lambda x0, x1, e: (x0.T * e + x1.T * (1 - e)).T,
obs1, obs2, e)
def loss(params: networks_lib.Params, data, init_o, key) -> jnp.ndarray:
# Compute losses
o_tm1, a_tm1, unused_r_t, c_t, d_t, unused_o_t = data
f_init = networks.forward.apply(params, init_o)
f, f_next, e_nu_lamb, w_sa = _analytic_w(params, data)
w_sa_no_grad = stop_gradient(w_sa)
# Gradient norm for o_mix: interpolate init_o and o_tm1 with eps~U(0,1)
eps = jax.random.uniform(key, shape=(a_tm1.shape[0],))
obs_mix = _compute_obs_mix(init_o, o_tm1, eps)
nu_grad_norm = jnp.linalg.norm( # 1e-10 was added to prevent nan
acme_utils.batch_concat(nu_grad_input(params, obs_mix)) + 1e-10,
axis=1) # [batch_size]
chi_grad_norm = jnp.linalg.norm(
acme_utils.batch_concat( # 1e-10 was added to prevent nan
chi_grad_input(params, obs_mix), num_batch_dims=2) + 1e-10,
axis=2) # [batch_size, num_costs]
# (chi, tau) loss
batch_size = a_tm1.shape[0]
if cost_ub_eps == 0:
ell = jnp.zeros((batch_size, num_costs))
chi_tau_loss = kl_divergence = 0
cost_ub = jnp.mean(w_sa[:, None] * c_t, axis=0)
else:
ell = (1 - gamma) * f_init['chi'] # [n, num_costs]
ell += w_sa_no_grad[:, None] * (
c_t + gamma * d_t[:, None] * f_next['chi'] - f['chi'])
logits = ell / stop_gradient(f['tau'])
weights = jax.nn.softmax(logits, axis=0) * batch_size # [n, num_costs]
log_weights = jax.nn.log_softmax(logits, axis=0) + jnp.log(batch_size)
kl_divergence = jnp.mean(
weights * log_weights - weights + 1, axis=0) # [num_costs]
cost_ub = jnp.mean(weights * w_sa_no_grad[:, None] * c_t, axis=0)
chi_tau_loss = jnp.sum(jnp.mean(weights * ell, axis=0))
chi_tau_loss += jnp.sum(-f['tau'] *
(stop_gradient(kl_divergence) - cost_ub_eps))
chi_tau_loss += gradient_penalty * jnp.mean(
jnp.sum(jax.nn.relu(chi_grad_norm - 5)**2, axis=1), axis=0) # GP
# nu loss
nu_loss = (1 - gamma) * jnp.mean(f_init['nu'])
nu_loss += -alpha * jnp.mean(f_fn(w_sa))
nu_loss += jnp.mean(w_sa * e_nu_lamb)
nu_loss += gradient_penalty * jnp.mean(jax.nn.relu(nu_grad_norm - 5)**2)
# lamb loss
lamb_loss = -jnp.dot(f['lamb'], stop_gradient(cost_ub) - c_hat)
total_loss = nu_loss + lamb_loss + chi_tau_loss
metrics = {
'nu_loss': nu_loss,
'lamb_loss': lamb_loss,
'chi_tau_loss': chi_tau_loss,
'nu': jnp.mean(f['nu']),
'next_nu': jnp.mean(f_next['nu']),
'initial_nu': jnp.mean(f_init['nu']),
'w_sa': jnp.mean(w_sa),
'cost_ub': cost_ub,
'kl_divergence': kl_divergence,
'chi': jnp.mean(f['chi'], axis=0),
'tau': f['tau'],
'lamb': f['lamb'],
}
return total_loss, metrics
def policy_loss(policy_params: networks_lib.Params,
params: networks_lib.Params,
data) -> jnp.ndarray:
o_tm1, a_tm1, unused_r_t, unused_c_t, unused_d_t, unused_o_t = data
pi_a_tm1 = networks.policy.apply(policy_params, o_tm1)
# weighted BC
assert len(pi_a_tm1.batch_shape) == 1
logp_tm1 = pi_a_tm1.log_prob(a_tm1)
if policy_extraction_mode == 'uniform':
policy_loss = -jnp.mean(logp_tm1) # vanilla BC
elif policy_extraction_mode == 'wsa':
_, _, _, w_sa = _analytic_w(params, data)
assert len(w_sa.shape) == 1
policy_loss = -jnp.mean(w_sa * logp_tm1)
else:
raise NotImplementedError('undefined policy extraction.',
policy_extraction_mode)
metrics = {'policy_loss': policy_loss}
return policy_loss, metrics
loss_grad = jax.grad(loss, has_aux=True)
policy_loss_grad = jax.grad(policy_loss, has_aux=True)
def _step(state: TrainingState, data, init_o):
metrics = {}
# Compute loss and gradients
key, key_input = jax.random.split(state.key)
loss_grads, info = loss_grad(state.params, data, init_o, key_input)
policy_loss_grads, policy_info = policy_loss_grad(state.policy_params,
state.params,
data)
metrics.update(info)
metrics.update(policy_info)
# Apply gradients
updates, optimizer_state = optimizer.update(loss_grads,
state.optimizer_state)
params = optax.apply_updates(state.params, updates)
policy_updates, policy_optimizer_state = policy_optimizer.update(
policy_loss_grads, state.policy_optimizer_state)
policy_params = optax.apply_updates(state.policy_params, policy_updates)
# Update training state
target_params = periodic_update(params, state.target_params, state.steps,
self._target_update_period)
target_policy_params = periodic_update(policy_params,
state.target_policy_params,
state.steps,
self._target_update_period)
new_state = TrainingState(
optimizer_state=optimizer_state,
policy_optimizer_state=policy_optimizer_state,
params=params,
target_params=target_params,
policy_params=policy_params,
target_policy_params=target_policy_params,
key=key,
steps=state.steps + 1)
return new_state, metrics
@jax.jit
def make_initial_state(key: networks_lib.PRNGKey) -> TrainingState:
"""Initializes the training state (parameters and optimizer state)."""
key_nu, key_policy, key = jax.random.split(key, 3)
# Initialize parameters
params = networks.forward.init(key_nu)
policy_params = networks.policy.init(key_policy)
# Initialize optimizer states
optimizer_state = optimizer.init(params)
policy_optimizer_state = policy_optimizer.init(policy_params)
# Define a training state
state = TrainingState(
optimizer_state=optimizer_state,
policy_optimizer_state=policy_optimizer_state,
params=params,
target_params=params,
policy_params=policy_params,
target_policy_params=policy_params,
key=key,
steps=0)
return state
# General learner book-keeping and loggers.
self._counter = counter or counting.Counter(prefix='learner')
self._logger = logger or loggers.TerminalLogger('learner', time_delta=1.)
# Create initial state.
self._state = make_initial_state(key)
self._step = jax.jit(_step)
self._timestamp = None
def step(self):
"""Take one SGD step in the learner."""
init_o = next(self._init_obs_iterator)
sample = next(self._transition_iterator)
o_tm1, a_tm1, r_t, d_t, o_t = sample.data
c_t = self._cost_fn(o_tm1, a_tm1)
data = (o_tm1, a_tm1, r_t, c_t, d_t, o_t)
# Gradient update
new_state, metrics = self._step(self._state, data, init_o)
self._state = new_state
# Compute elapsed time.
timestamp = time.time()
elapsed_time = timestamp - self._timestamp if self._timestamp else 0
self._timestamp = timestamp
# Increment counts and record the current time
counts = self._counter.increment(steps=1, walltime=elapsed_time)
self._logger.write({
**metrics,
**counts
})
def get_variables(self, names: List[str]) -> List[networks_lib.Params]:
variables = {
'params': self._state.target_params,
'policy_params': self._state.target_policy_params,
}
return [variables[name] for name in names]
def save(self) -> TrainingState:
logging.info('learner.save is called.')
return self._state
def restore(self, state: TrainingState):
logging.info('learner.restore is called.')
self._state = state
| constrained_optidice-main | neural/learning.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Cost function definitions."""
_DEFAULT_NUM_COSTS = 1
def _default_cost_fn(obs, unused_action):
"""Cost function C(s,a)."""
return obs['penalties']
def domain_cost_fn(unused_domain_task_name):
"""Output cost function and the number of costs for the given task."""
return _default_cost_fn, _DEFAULT_NUM_COSTS
| constrained_optidice-main | neural/cost.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experiment script for neural constrained OptiDICE."""
import functools
from typing import Any, Dict
from absl import app
from absl import flags
from acme.jax import networks as acme_networks
from acme.jax import utils
import haiku as hk
import jax
import jax.numpy as jnp
import launchpad as lp
from constrained_optidice.neural.agent_distributed import DistributedCDICE
import constrained_optidice.neural.data_util as data_util
import constrained_optidice.neural.net_templates as net_templates
from constrained_optidice.neural.networks import CDICENetworks
import constrained_optidice.neural.rwrl as rwrl
FLAGS = flags.FLAGS
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_integer('num_evaluators', 1, 'Number of workers for evaluation.')
flags.DEFINE_string('data_path', None,
'Filepath for dataset used for training.')
flags.DEFINE_string('init_obs_data_path', None,
'Filepath for dataset used for training.')
flags.DEFINE_string('policy_extraction_mode', 'wsa',
'Policy extraction mode. (wsa, uniform).')
flags.DEFINE_integer('max_learner_steps', 100,
'The maximum number of training iteration.')
flags.DEFINE_float('gamma', 0.995,
'Discount factor. (0 ~ 1)')
flags.DEFINE_float('learning_rate', 3e-4, 'Learning rate.')
flags.DEFINE_integer('batch_size', 1024, 'Batch size')
flags.DEFINE_float('alpha', 0.01, 'Regularizer on Df(d|dD).')
flags.DEFINE_float('cost_thresholds', 0.1, 'The cost constraint threshold.')
flags.DEFINE_string('f_type', 'softchi', 'The type of f-divergence function.')
flags.DEFINE_float('gradient_penalty', 1e-5,
'Gradient norm penalty regularization.')
flags.DEFINE_float('cost_ub_epsilon', 0.01,
'Adjusts the degree of overestimation of cost value.')
flags.DEFINE_string('task_name', 'rwrl:cartpole:realworld_swingup',
'Task name.')
flags.DEFINE_float('safety_coeff', 0.3,
'The safety coefficient for the RWRL task.')
def make_networks(observation_spec,
action_spec,
num_costs) -> CDICENetworks:
"""Create networks used by the agent."""
make_networks_fn = functools.partial(
net_templates.make_mix_gaussian_feedforward_networks,
action_spec=action_spec,
num_costs=num_costs)
def _forward(obs):
"""Forward computation of (nu, lamb, chi, tau)."""
networks = make_networks_fn()
nu = networks['nu'](obs)[:, 0]
lamb_params = hk.get_parameter('lamb_params', (num_costs,), init=jnp.zeros)
lamb = jnp.clip(jnp.exp(lamb_params), 0, 1e3)
chi = networks['chi'](obs)
tau_params = hk.get_parameter('tau_params', (num_costs,), init=jnp.zeros)
tau = jnp.exp(tau_params) + 1e-6
return {
'nu': nu,
'lamb': lamb,
'chi': chi,
'tau': tau,
}
def _policy_fn(obs):
"""Policy returns action distribution."""
networks = make_networks_fn()
return networks['policy'](obs)
def _behavior_fn(sample_key, obs):
"""Behavior returns action (will be used for evaluator)."""
networks = make_networks_fn()
return networks['behavior'](sample_key, obs)
forward = hk.without_apply_rng(hk.transform(_forward))
policy = hk.without_apply_rng(hk.transform(_policy_fn))
behavior = hk.without_apply_rng(hk.transform(_behavior_fn))
# Create dummy observations and actions to create network parameters.
dummy_obs = utils.add_batch_dim(utils.zeros_like(observation_spec))
dummy_action = utils.add_batch_dim(utils.zeros_like(action_spec))
dummy_sample_key = jax.random.PRNGKey(42)
return CDICENetworks(
forward=acme_networks.FeedForwardNetwork(
init=lambda key: forward.init(key, dummy_obs),
apply=forward.apply),
policy=acme_networks.FeedForwardNetwork(
init=lambda key: policy.init(key, dummy_obs), apply=policy.apply),
behavior=acme_networks.FeedForwardNetwork(
init=lambda key: behavior.init(key, dummy_sample_key, dummy_obs),
apply=behavior.apply))
def get_program(num_evaluators: int, agent_params: Dict[str, Any],
task_params: Dict[str, Any], seed: int) -> lp.Program:
"""Construct the program."""
if task_params['task_name'].startswith('rwrl:'):
_, domain_name, task_name = task_params['task_name'].split(':')
environment_factory = functools.partial(
rwrl.make_environment,
domain_name=domain_name,
task_name=task_name,
safety_coeff=task_params['safety_coeff'])
dataset_iterator_factory = functools.partial(
data_util.create_data_iterators,
data_path=task_params['data_path'],
init_obs_data_path=task_params['init_obs_data_path'],
batch_size=agent_params['batch_size']
)
else:
raise NotImplementedError('Undefined task', task_params['task_name'])
# Construct the program.
program_builder = DistributedCDICE(
environment_factory=environment_factory,
dataset_iterator_factory=dataset_iterator_factory,
task_name=task_params['task_name'],
make_networks=make_networks,
seed=seed,
agent_params=agent_params,
num_evaluators=num_evaluators)
program = program_builder.build()
return program
def main(unused_argv):
# Get list of hyperparameter setting to run.
agent_params = {
'policy_extraction_mode': FLAGS.policy_extraction_mode,
'max_learner_steps': FLAGS.max_learner_steps,
'gamma': FLAGS.gamma,
'learning_rate': FLAGS.learning_rate,
'batch_size': FLAGS.batch_size,
'alpha': FLAGS.alpha,
'cost_thresholds': FLAGS.cost_thresholds,
'f_type': FLAGS.f_type,
'gradient_penalty': FLAGS.gradient_penalty,
'cost_ub_epsilon': FLAGS.cost_ub_epsilon,
}
task_params = {
'task_name': FLAGS.task_name,
'safety_coeff': FLAGS.safety_coeff,
'data_path': FLAGS.data_path,
'init_obs_data_path': FLAGS.init_obs_data_path,
}
if FLAGS.data_path is None or FLAGS.init_obs_data_path is None:
raise ValueError(
'FLAGS.data_path and FLAGS.init_obs_data_path should be specified.')
# Local launch for debugging.
lp.launch(get_program(num_evaluators=FLAGS.num_evaluators,
agent_params=agent_params,
task_params=task_params,
seed=FLAGS.seed))
if __name__ == '__main__':
app.run(main)
| constrained_optidice-main | neural/run_experiment.py |
import h5py
f = h5py.File("test.h5", 'r')
dset = f['dset']
print(dset[...])
lua = """
require 'hdf5'
hdf5.open("in.h5", 'r')
"""
# TODO
| torch-hdf5-master | tests/python/testPython.py |
# This script is used to generate reference HDF5 files. It uses h5py, so that
# we can compare against that implementation.
import h5py
import argparse
import os
from collections import namedtuple
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("out")
args = parser.parse_args()
Case = namedtuple('Case', ['name', 'data'])
testCases = []
def addTestCase(name, data):
testCases.append(Case(name, data))
class Data(object):
def __init__(self, w, h, x, y):
super(Data, self).__init__()
self.w = w
self.h = h
self.x = x
self.y = y
def asPython(self, h5, name):
h5.create_dataset(name, (self.w, self.h))
h5[name][...] = np.linspace(self.x, self.y, self.w * self.h).reshape(self.w, self.h)
def asLua(self):
out = ""
out += "torch.linspace(%s, %s, %s)" % (self.x, self.y, self.w * self.h)
out += ":resize(%s, %s):float()" % (self.w, self.h)
return out
def luaDefinition(data):
return "return " + luaDefinitionHelper(data, 0)
def luaDefinitionHelper(data, level):
text = ""
indent = " "
if isinstance(data, dict):
text = "{\n"
for k, v in data.iteritems():
text += indent * (level + 1) + k + " = " + luaDefinitionHelper(v, level + 1) + ",\n"
text += indent * level + "}"
else:
text += data.asLua()
return text
def writeH5(h5, data):
for k, v in data.iteritems():
if isinstance(v, dict):
group = h5.create_group(k)
writeH5(group, v)
continue
v.asPython(h5, k)
addTestCase('empty', {})
addTestCase('oneTensor', { 'data' : Data(10, 10, 0, 100) })
addTestCase('twoTensors', { 'data1' : Data(10, 10, 0, 100), 'data2' : Data(10, 10, 0, 10) })
addTestCase('twoTensorsNested', { 'group' : { 'data' : Data(10, 10, 0, 100) } })
for case in testCases:
print("=== Generating %s ===" % (case.name,))
h5file = h5py.File(os.path.join(args.out, case.name + ".h5"), 'w')
writeH5(h5file, case.data)
luaFilePath = os.path.join(args.out, case.name + ".lua")
with open(luaFilePath, 'w') as luaFile:
luaFile.write(luaDefinition(case.data))
| torch-hdf5-master | tests/data/generate.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script for setuptools."""
import os
from setuptools import find_namespace_packages
from setuptools import setup
_CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
def _get_version():
with open('rlax/__init__.py') as fp:
for line in fp:
if line.startswith('__version__') and '=' in line:
version = line[line.find('=') + 1:].strip(' \'"\n')
if version:
return version
raise ValueError('`__version__` not defined in `rlax/__init__.py`')
def _parse_requirements(path):
with open(os.path.join(_CURRENT_DIR, path)) as f:
return [
line.rstrip()
for line in f
if not (line.isspace() or line.startswith('#'))
]
setup(
name='rlax',
version=_get_version(),
url='https://github.com/deepmind/rlax',
license='Apache 2.0',
author='DeepMind',
description=('A library of reinforcement learning building blocks in JAX.'),
long_description=open(os.path.join(_CURRENT_DIR, 'README.md')).read(),
long_description_content_type='text/markdown',
author_email='[email protected]',
keywords='reinforcement-learning python machine learning',
packages=find_namespace_packages(exclude=['*_test.py']),
install_requires=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements.txt')),
tests_require=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements-test.txt')),
zip_safe=False, # Required for full installation.
python_requires='>=3.9',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| rlax-master | setup.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RLax: building blocks for RL, in JAX."""
from rlax._src.base import AllSum
from rlax._src.base import batched_index
from rlax._src.base import lhs_broadcast
from rlax._src.base import one_hot
from rlax._src.base import replace_masked
from rlax._src.clipping import clip_gradient
from rlax._src.clipping import huber_loss
from rlax._src.distributions import categorical_cross_entropy
from rlax._src.distributions import categorical_importance_sampling_ratios
from rlax._src.distributions import categorical_kl_divergence
from rlax._src.distributions import categorical_sample
from rlax._src.distributions import clipped_entropy_softmax
from rlax._src.distributions import epsilon_greedy
from rlax._src.distributions import gaussian_diagonal
from rlax._src.distributions import greedy
from rlax._src.distributions import multivariate_normal_kl_divergence
from rlax._src.distributions import softmax
from rlax._src.distributions import squashed_gaussian
from rlax._src.embedding import embed_oar
from rlax._src.episodic_memory import knn_query
from rlax._src.exploration import add_dirichlet_noise
from rlax._src.exploration import add_gaussian_noise
from rlax._src.exploration import add_ornstein_uhlenbeck_noise
from rlax._src.exploration import episodic_memory_intrinsic_rewards
from rlax._src.general_value_functions import feature_control_rewards
from rlax._src.general_value_functions import pixel_control_rewards
from rlax._src.interruptions import fix_step_type_on_interruptions
from rlax._src.losses import expectile_loss
from rlax._src.losses import l2_loss
from rlax._src.losses import likelihood
from rlax._src.losses import log_loss
from rlax._src.losses import pixel_control_loss
from rlax._src.model_learning import extract_subsequences
from rlax._src.model_learning import sample_start_indices
from rlax._src.moving_averages import create_ema
from rlax._src.moving_averages import EmaMoments
from rlax._src.moving_averages import EmaState
from rlax._src.mpo_ops import compute_parametric_kl_penalty_and_dual_loss
from rlax._src.mpo_ops import LagrangePenalty
from rlax._src.mpo_ops import mpo_compute_weights_and_temperature_loss
from rlax._src.mpo_ops import mpo_loss
from rlax._src.mpo_ops import vmpo_compute_weights_and_temperature_loss
from rlax._src.mpo_ops import vmpo_loss
from rlax._src.multistep import discounted_returns
from rlax._src.multistep import general_off_policy_returns_from_action_values
from rlax._src.multistep import general_off_policy_returns_from_q_and_v
from rlax._src.multistep import lambda_returns
from rlax._src.multistep import n_step_bootstrapped_returns
from rlax._src.multistep import truncated_generalized_advantage_estimation
from rlax._src.nested_updates import conditional_update
from rlax._src.nested_updates import periodic_update
from rlax._src.nonlinear_bellman import compose_tx
from rlax._src.nonlinear_bellman import DISCOUNT_TRANSFORM_PAIR
from rlax._src.nonlinear_bellman import HYPERBOLIC_SIN_PAIR
from rlax._src.nonlinear_bellman import IDENTITY_PAIR
from rlax._src.nonlinear_bellman import muzero_pair
from rlax._src.nonlinear_bellman import SIGNED_HYPERBOLIC_PAIR
from rlax._src.nonlinear_bellman import SIGNED_LOGP1_PAIR
from rlax._src.nonlinear_bellman import transformed_general_off_policy_returns_from_action_values
from rlax._src.nonlinear_bellman import transformed_lambda_returns
from rlax._src.nonlinear_bellman import transformed_n_step_q_learning
from rlax._src.nonlinear_bellman import transformed_n_step_returns
from rlax._src.nonlinear_bellman import transformed_q_lambda
from rlax._src.nonlinear_bellman import transformed_retrace
from rlax._src.nonlinear_bellman import twohot_pair
from rlax._src.nonlinear_bellman import TxPair
from rlax._src.nonlinear_bellman import unbiased_transform_pair
from rlax._src.policy_gradients import clipped_surrogate_pg_loss
from rlax._src.policy_gradients import dpg_loss
from rlax._src.policy_gradients import entropy_loss
from rlax._src.policy_gradients import policy_gradient_loss
from rlax._src.policy_gradients import qpg_loss
from rlax._src.policy_gradients import rm_loss
from rlax._src.policy_gradients import rpg_loss
from rlax._src.policy_targets import constant_policy_targets
from rlax._src.policy_targets import PolicyTarget
from rlax._src.policy_targets import sampled_policy_distillation_loss
from rlax._src.policy_targets import zero_policy_targets
from rlax._src.pop_art import art
from rlax._src.pop_art import normalize
from rlax._src.pop_art import pop
from rlax._src.pop_art import popart
from rlax._src.pop_art import PopArtState
from rlax._src.pop_art import unnormalize
from rlax._src.pop_art import unnormalize_linear
from rlax._src.transforms import identity
from rlax._src.transforms import logit
from rlax._src.transforms import power
from rlax._src.transforms import sigmoid
from rlax._src.transforms import signed_expm1
from rlax._src.transforms import signed_hyperbolic
from rlax._src.transforms import signed_logp1
from rlax._src.transforms import signed_parabolic
from rlax._src.transforms import transform_from_2hot
from rlax._src.transforms import transform_to_2hot
from rlax._src.tree_util import transpose_first_axis_to_last
from rlax._src.tree_util import transpose_last_axis_to_first
from rlax._src.tree_util import tree_fn
from rlax._src.tree_util import tree_map_zipped
from rlax._src.tree_util import tree_replace_masked
from rlax._src.tree_util import tree_select
from rlax._src.tree_util import tree_split_key
from rlax._src.tree_util import tree_split_leaves
from rlax._src.value_learning import categorical_double_q_learning
from rlax._src.value_learning import categorical_l2_project
from rlax._src.value_learning import categorical_q_learning
from rlax._src.value_learning import categorical_td_learning
from rlax._src.value_learning import double_q_learning
from rlax._src.value_learning import expected_sarsa
from rlax._src.value_learning import persistent_q_learning
from rlax._src.value_learning import q_lambda
from rlax._src.value_learning import q_learning
from rlax._src.value_learning import quantile_expected_sarsa
from rlax._src.value_learning import quantile_q_learning
from rlax._src.value_learning import quantile_regression_loss
from rlax._src.value_learning import qv_learning
from rlax._src.value_learning import qv_max
from rlax._src.value_learning import retrace
from rlax._src.value_learning import retrace_continuous
from rlax._src.value_learning import sarsa
from rlax._src.value_learning import sarsa_lambda
from rlax._src.value_learning import td_lambda
from rlax._src.value_learning import td_learning
from rlax._src.vtrace import leaky_vtrace
from rlax._src.vtrace import leaky_vtrace_td_error_and_advantage
from rlax._src.vtrace import vtrace
from rlax._src.vtrace import vtrace_td_error_and_advantage
__version__ = "0.1.6"
__all__ = (
"add_gaussian_noise",
"add_ornstein_uhlenbeck_noise",
"add_dirichlet_noise",
"AllSum",
"batched_index",
"categorical_cross_entropy",
"categorical_double_q_learning",
"categorical_importance_sampling_ratios",
"categorical_kl_divergence",
"categorical_l2_project",
"categorical_q_learning",
"categorical_td_learning",
"clip_gradient",
"clipped_surrogate_pg_loss",
"compose_tx",
"conditional_update",
"constant_policy_targets",
"create_ema",
"discounted_returns",
"DISCOUNT_TRANSFORM_PAIR",
"double_q_learning",
"dpg_loss",
"EmaMoments",
"EmaState",
"entropy_loss",
"episodic_memory_intrinsic_rewards",
"epsilon_greedy",
"expected_sarsa",
"expectile_loss",
"extract_subsequences",
"feature_control_rewards",
"fix_step_type_on_interruptions",
"gaussian_diagonal",
"HYPERBOLIC_SIN_PAIR",
"squashed_gaussian",
"clipped_entropy_softmax",
"art",
"compute_parametric_kl_penalty_and_dual_loss",
"general_off_policy_returns_from_action_values",
"general_off_policy_returns_from_q_and_v",
"greedy",
"huber_loss",
"identity",
"IDENTITY_PAIR",
"knn_query",
"l2_loss",
"LagrangePenalty",
"lambda_returns",
"leaky_vtrace",
"leaky_vtrace_td_error_and_advantage",
"lhs_broadcast",
"likelihood",
"logit",
"log_loss",
"mpo_compute_weights_and_temperature_loss",
"mpo_loss",
"multivariate_normal_kl_divergence",
"muzero_pair",
"normalize",
"n_step_bootstrapped_returns",
"one_hot",
"periodic_update",
"persistent_q_learning",
"pixel_control_rewards",
"PolicyTarget",
"policy_gradient_loss",
"pop",
"popart",
"PopArtState",
"power",
"qpg_loss",
"quantile_expected_sarsa",
"quantile_q_learning",
"quantile_regression_loss",
"qv_learning",
"qv_max",
"q_lambda",
"q_learning",
"replace_masked",
"retrace",
"retrace_continuous",
"rm_loss",
"rpg_loss",
"sample_start_indices",
"sampled_policy_distillation_loss",
"sarsa",
"sarsa_lambda",
"sigmoid",
"signed_expm1",
"signed_hyperbolic",
"SIGNED_HYPERBOLIC_PAIR",
"signed_logp1",
"SIGNED_LOGP1_PAIR",
"signed_parabolic",
"softmax",
"td_lambda",
"td_learning",
"transformed_general_off_policy_returns_from_action_values",
"transformed_lambda_returns",
"transformed_n_step_q_learning",
"transformed_n_step_returns",
"transformed_q_lambda",
"transformed_retrace",
"transform_from_2hot",
"transform_to_2hot",
"transpose_last_axis_to_first",
"transpose_first_axis_to_last",
"tree_fn",
"tree_map_zipped",
"tree_replace_masked",
"tree_select",
"tree_split_key",
"tree_split_leaves",
"truncated_generalized_advantage_estimation",
"twohot_pair",
"TxPair",
"unbiased_transform_pair",
"unnormalize",
"unnormalize_linear",
"vmpo_compute_weights_and_temperature_loss",
"vmpo_loss",
"vtrace",
"vtrace_td_error_and_advantage",
"zero_policy_targets",
)
# _________________________________________
# / Please don't use symbols in `_src` they \
# \ are not part of the RLax public API. /
# -----------------------------------------
# \ ^__^
# \ (oo)\_______
# (__)\ )\/\
# ||----w |
# || ||
#
| rlax-master | rlax/__init__.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rlax."""
from absl.testing import absltest
import rlax
class RlaxTest(absltest.TestCase):
"""Test rlax can be imported correctly."""
def test_import(self):
self.assertTrue(hasattr(rlax, 'batched_index'))
if __name__ == '__main__':
absltest.main()
| rlax-master | rlax/rlax_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX functions for state value and action-value learning.
Value functions estimate the expected return (discounted sum of rewards) that
can be collected by an agent under a given policy of behaviour. This subpackage
implements a number of functions for value learning in discrete scalar action
spaces. Actions are assumed to be represented as indices in the range `[0, A)`
where `A` is the number of distinct actions.
"""
from typing import Union
import chex
import jax
import jax.numpy as jnp
from rlax._src import base
from rlax._src import clipping
from rlax._src import distributions
from rlax._src import multistep
Array = chex.Array
Numeric = chex.Numeric
def td_learning(
v_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
v_t: Numeric,
stop_target_gradients: bool = True,
) -> Numeric:
"""Calculates the TD-learning temporal difference error.
See "Learning to Predict by the Methods of Temporal Differences" by Sutton.
(https://link.springer.com/article/10.1023/A:1022633531479).
Args:
v_tm1: state values at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
v_t: state values at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
TD-learning temporal difference error.
"""
chex.assert_rank([v_tm1, r_t, discount_t, v_t], 0)
chex.assert_type([v_tm1, r_t, discount_t, v_t], float)
target_tm1 = r_t + discount_t * v_t
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - v_tm1
def td_lambda(
v_tm1: Array,
r_t: Array,
discount_t: Array,
v_t: Array,
lambda_: Numeric,
stop_target_gradients: bool = True,
) -> Array:
"""Calculates the TD(lambda) temporal difference error.
See "Reinforcement Learning: An Introduction" by Sutton and Barto.
(http://incompleteideas.net/book/ebook/node74.html).
Args:
v_tm1: sequence of state values at time t-1.
r_t: sequence of rewards at time t.
discount_t: sequence of discounts at time t.
v_t: sequence of state values at time t.
lambda_: mixing parameter lambda, either a scalar or a sequence.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
TD(lambda) temporal difference error.
"""
chex.assert_rank([v_tm1, r_t, discount_t, v_t, lambda_], [1, 1, 1, 1, {0, 1}])
chex.assert_type([v_tm1, r_t, discount_t, v_t, lambda_], float)
target_tm1 = multistep.lambda_returns(r_t, discount_t, v_t, lambda_)
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - v_tm1
def sarsa(
q_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
q_t: Array,
a_t: Numeric,
stop_target_gradients: bool = True,
) -> Numeric:
"""Calculates the SARSA temporal difference error.
See "Reinforcement Learning: An Introduction" by Sutton and Barto.
(http://incompleteideas.net/book/ebook/node64.html.)
Args:
q_tm1: Q-values at time t-1.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
q_t: Q-values at time t.
a_t: action index at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
SARSA temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, q_t, a_t],
[1, 0, 0, 0, 1, 0])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, q_t, a_t],
[float, int, float, float, float, int])
target_tm1 = r_t + discount_t * q_t[a_t]
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - q_tm1[a_tm1]
def expected_sarsa(
q_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
q_t: Array,
probs_a_t: Array,
stop_target_gradients: bool = True,
) -> Numeric:
"""Calculates the expected SARSA (SARSE) temporal difference error.
See "A Theoretical and Empirical Analysis of Expected Sarsa" by Seijen,
van Hasselt, Whiteson et al.
(http://www.cs.ox.ac.uk/people/shimon.whiteson/pubs/vanseijenadprl09.pdf).
Args:
q_tm1: Q-values at time t-1.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
q_t: Q-values at time t.
probs_a_t: action probabilities at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Expected SARSA temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, q_t, probs_a_t],
[1, 0, 0, 0, 1, 1])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, q_t, probs_a_t],
[float, int, float, float, float, float])
target_tm1 = r_t + discount_t * jnp.dot(q_t, probs_a_t)
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - q_tm1[a_tm1]
def sarsa_lambda(
q_tm1: Array,
a_tm1: Array,
r_t: Array,
discount_t: Array,
q_t: Array,
a_t: Array,
lambda_: Numeric,
stop_target_gradients: bool = True,
) -> Array:
"""Calculates the SARSA(lambda) temporal difference error.
See "Reinforcement Learning: An Introduction" by Sutton and Barto.
(http://incompleteideas.net/book/ebook/node77.html).
Args:
q_tm1: sequence of Q-values at time t-1.
a_tm1: sequence of action indices at time t-1.
r_t: sequence of rewards at time t.
discount_t: sequence of discounts at time t.
q_t: sequence of Q-values at time t.
a_t: sequence of action indices at time t.
lambda_: mixing parameter lambda, either a scalar or a sequence.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
SARSA(lambda) temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, q_t, a_t, lambda_],
[2, 1, 1, 1, 2, 1, {0, 1}])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, q_t, a_t, lambda_],
[float, int, float, float, float, int, float])
qa_tm1 = base.batched_index(q_tm1, a_tm1)
qa_t = base.batched_index(q_t, a_t)
target_tm1 = multistep.lambda_returns(r_t, discount_t, qa_t, lambda_)
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - qa_tm1
def q_learning(
q_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
q_t: Array,
stop_target_gradients: bool = True,
) -> Numeric:
"""Calculates the Q-learning temporal difference error.
See "Reinforcement Learning: An Introduction" by Sutton and Barto.
(http://incompleteideas.net/book/ebook/node65.html).
Args:
q_tm1: Q-values at time t-1.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
q_t: Q-values at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Q-learning temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, q_t], [1, 0, 0, 0, 1])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, q_t],
[float, int, float, float, float])
target_tm1 = r_t + discount_t * jnp.max(q_t)
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - q_tm1[a_tm1]
def double_q_learning(
q_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
q_t_value: Array,
q_t_selector: Array,
stop_target_gradients: bool = True,
) -> Numeric:
"""Calculates the double Q-learning temporal difference error.
See "Double Q-learning" by van Hasselt.
(https://papers.nips.cc/paper/3964-double-q-learning.pdf).
Args:
q_tm1: Q-values at time t-1.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
q_t_value: Q-values at time t.
q_t_selector: selector Q-values at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Double Q-learning temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, q_t_value, q_t_selector],
[1, 0, 0, 0, 1, 1])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, q_t_value, q_t_selector],
[float, int, float, float, float, float])
target_tm1 = r_t + discount_t * q_t_value[q_t_selector.argmax()]
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - q_tm1[a_tm1]
def persistent_q_learning(
q_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
q_t: Array,
action_gap_scale: float,
stop_target_gradients: bool = True,
) -> Numeric:
"""Calculates the persistent Q-learning temporal difference error.
See "Increasing the Action Gap: New Operators for Reinforcement Learning"
by Bellemare, Ostrovski, Guez et al. (https://arxiv.org/abs/1512.04860).
Args:
q_tm1: Q-values at time t-1.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
q_t: Q-values at time t.
action_gap_scale: coefficient in [0, 1] for scaling the action gap term.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Persistent Q-learning temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, q_t], [1, 0, 0, 0, 1])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, q_t],
[float, int, float, float, float])
corrected_q_t = (
(1. - action_gap_scale) * jnp.max(q_t)
+ action_gap_scale * q_t[a_tm1]
)
target_tm1 = r_t + discount_t * corrected_q_t
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - q_tm1[a_tm1]
def qv_learning(
q_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
v_t: Numeric,
stop_target_gradients: bool = True,
) -> Numeric:
"""Calculates the QV-learning temporal difference error.
See "Two Novel On-policy Reinforcement Learning Algorithms based on
TD(lambda)-methods" by Wiering and van Hasselt
(https://ieeexplore.ieee.org/abstract/document/4220845.)
Args:
q_tm1: Q-values at time t-1.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
v_t: state values at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
QV-learning temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, v_t], [1, 0, 0, 0, 0])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, v_t],
[float, int, float, float, float])
target_tm1 = r_t + discount_t * v_t
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - q_tm1[a_tm1]
def qv_max(
v_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
q_t: Array,
stop_target_gradients: bool = True,
) -> Numeric:
"""Calculates the QVMAX temporal difference error.
See "The QV Family Compared to Other Reinforcement Learning Algorithms" by
Wiering and van Hasselt (2009).
(http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.713.1931)
Args:
v_tm1: state values at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
q_t: Q-values at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
QVMAX temporal difference error.
"""
chex.assert_rank([v_tm1, r_t, discount_t, q_t], [0, 0, 0, 1])
chex.assert_type([v_tm1, r_t, discount_t, q_t], float)
target_tm1 = r_t + discount_t * jnp.max(q_t)
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - v_tm1
def q_lambda(
q_tm1: Array,
a_tm1: Array,
r_t: Array,
discount_t: Array,
q_t: Array,
lambda_: Numeric,
stop_target_gradients: bool = True,
) -> Array:
"""Calculates Peng's or Watkins' Q(lambda) temporal difference error.
See "Reinforcement Learning: An Introduction" by Sutton and Barto.
(http://incompleteideas.net/book/ebook/node78.html).
Args:
q_tm1: sequence of Q-values at time t-1.
a_tm1: sequence of action indices at time t-1.
r_t: sequence of rewards at time t.
discount_t: sequence of discounts at time t.
q_t: sequence of Q-values at time t.
lambda_: mixing parameter lambda, either a scalar (e.g. Peng's Q(lambda)) or
a sequence (e.g. Watkin's Q(lambda)).
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Q(lambda) temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, q_t, lambda_],
[2, 1, 1, 1, 2, {0, 1}])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, q_t, lambda_],
[float, int, float, float, float, float])
qa_tm1 = base.batched_index(q_tm1, a_tm1)
v_t = jnp.max(q_t, axis=-1)
target_tm1 = multistep.lambda_returns(r_t, discount_t, v_t, lambda_)
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - qa_tm1
def retrace(
q_tm1: Array,
q_t: Array,
a_tm1: Array,
a_t: Array,
r_t: Array,
discount_t: Array,
pi_t: Array,
mu_t: Array,
lambda_: float,
eps: float = 1e-8,
stop_target_gradients: bool = True,
) -> Array:
"""Calculates Retrace errors.
See "Safe and Efficient Off-Policy Reinforcement Learning" by Munos et al.
(https://arxiv.org/abs/1606.02647).
Args:
q_tm1: Q-values at time t-1.
q_t: Q-values at time t.
a_tm1: action index at time t-1.
a_t: action index at time t.
r_t: reward at time t.
discount_t: discount at time t.
pi_t: target policy probs at time t.
mu_t: behavior policy probs at time t.
lambda_: scalar mixing parameter lambda.
eps: small value to add to mu_t for numerical stability.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Retrace error.
"""
chex.assert_rank([q_tm1, q_t, a_tm1, a_t, r_t, discount_t, pi_t, mu_t],
[2, 2, 1, 1, 1, 1, 2, 1])
chex.assert_type([q_tm1, q_t, a_tm1, a_t, r_t, discount_t, pi_t, mu_t],
[float, float, int, int, float, float, float, float])
pi_a_t = base.batched_index(pi_t, a_t)
c_t = jnp.minimum(1.0, pi_a_t / (mu_t + eps)) * lambda_
target_tm1 = multistep.general_off_policy_returns_from_action_values(
q_t, a_t, r_t, discount_t, c_t, pi_t)
q_a_tm1 = base.batched_index(q_tm1, a_tm1)
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - q_a_tm1
def retrace_continuous(q_tm1: Array,
q_t: Array,
v_t: Array,
r_t: Array,
discount_t: Array,
log_rhos: Array,
lambda_: Union[Array, float],
stop_target_gradients: bool = True) -> Array:
"""Retrace continuous.
See "Safe and Efficient Off-Policy Reinforcement Learning" by Munos et al.
(https://arxiv.org/abs/1606.02647).
Args:
q_tm1: Q-values at times [0, ..., K - 1].
q_t: Q-values evaluated at actions collected using behavior
policy at times [1, ..., K - 1].
v_t: Value estimates of the target policy at times [1, ..., K].
r_t: reward at times [1, ..., K].
discount_t: discount at times [1, ..., K].
log_rhos: Log importance weight pi_target/pi_behavior evaluated at actions
collected using behavior policy [1, ..., K - 1].
lambda_: scalar or a vector of mixing parameter lambda.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Retrace error.
"""
chex.assert_rank([q_tm1, q_t, r_t, discount_t, log_rhos, lambda_],
[1, 1, 1, 1, 1, {0, 1}])
chex.assert_type([q_tm1, q_t, r_t, discount_t, log_rhos],
[float, float, float, float, float])
c_t = jnp.minimum(1.0, jnp.exp(log_rhos)) * lambda_
# The generalized returns are independent of Q-values and cs at the final
# state.
target_tm1 = multistep.general_off_policy_returns_from_q_and_v(
q_t, v_t, r_t, discount_t, c_t)
target_tm1 = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(target_tm1), target_tm1)
return target_tm1 - q_tm1
def categorical_l2_project(
z_p: Array,
probs: Array,
z_q: Array
) -> Array:
"""Projects a categorical distribution (z_p, p) onto a different support z_q.
The projection step minimizes an L2-metric over the cumulative distribution
functions (CDFs) of the source and target distributions.
Let kq be len(z_q) and kp be len(z_p). This projection works for any
support z_q, in particular kq need not be equal to kp.
See "A Distributional Perspective on RL" by Bellemare et al.
(https://arxiv.org/abs/1707.06887).
Args:
z_p: support of distribution p.
probs: probability values.
z_q: support to project distribution (z_p, probs) onto.
Returns:
Projection of (z_p, p) onto support z_q under Cramer distance.
"""
chex.assert_rank([z_p, probs, z_q], 1)
chex.assert_type([z_p, probs, z_q], float)
kp = z_p.shape[0]
kq = z_q.shape[0]
# Construct helper arrays from z_q.
d_pos = jnp.roll(z_q, shift=-1)
d_neg = jnp.roll(z_q, shift=1)
# Clip z_p to be in new support range (vmin, vmax).
z_p = jnp.clip(z_p, z_q[0], z_q[-1])[None, :]
assert z_p.shape == (1, kp)
# Get the distance between atom values in support.
d_pos = (d_pos - z_q)[:, None] # z_q[i+1] - z_q[i]
d_neg = (z_q - d_neg)[:, None] # z_q[i] - z_q[i-1]
z_q = z_q[:, None]
assert z_q.shape == (kq, 1)
# Ensure that we do not divide by zero, in case of atoms of identical value.
d_neg = jnp.where(d_neg > 0, 1. / d_neg, jnp.zeros_like(d_neg))
d_pos = jnp.where(d_pos > 0, 1. / d_pos, jnp.zeros_like(d_pos))
delta_qp = z_p - z_q # clip(z_p)[j] - z_q[i]
d_sign = (delta_qp >= 0.).astype(probs.dtype)
assert delta_qp.shape == (kq, kp)
assert d_sign.shape == (kq, kp)
# Matrix of entries sgn(a_ij) * |a_ij|, with a_ij = clip(z_p)[j] - z_q[i].
delta_hat = (d_sign * delta_qp * d_pos) - ((1. - d_sign) * delta_qp * d_neg)
probs = probs[None, :]
assert delta_hat.shape == (kq, kp)
assert probs.shape == (1, kp)
return jnp.sum(jnp.clip(1. - delta_hat, 0., 1.) * probs, axis=-1)
def categorical_td_learning(
v_atoms_tm1: Array,
v_logits_tm1: Array,
r_t: Numeric,
discount_t: Numeric,
v_atoms_t: Array,
v_logits_t: Array,
stop_target_gradients: bool = True,
) -> Numeric:
"""Implements TD-learning for categorical value distributions.
See "A Distributional Perspective on Reinforcement Learning", by
Bellemere, Dabney and Munos (https://arxiv.org/pdf/1707.06887.pdf).
Args:
v_atoms_tm1: atoms of V distribution at time t-1.
v_logits_tm1: logits of V distribution at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
v_atoms_t: atoms of V distribution at time t.
v_logits_t: logits of V distribution at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Categorical Q learning loss (i.e. temporal difference error).
"""
chex.assert_rank(
[v_atoms_tm1, v_logits_tm1, r_t, discount_t, v_atoms_t, v_logits_t],
[1, 1, 0, 0, 1, 1])
chex.assert_type(
[v_atoms_tm1, v_logits_tm1, r_t, discount_t, v_atoms_t, v_logits_t],
[float, float, float, float, float, float])
# Scale and shift time-t distribution atoms by discount and reward.
target_z = r_t + discount_t * v_atoms_t
# Convert logits to distribution.
v_t_probs = jax.nn.softmax(v_logits_t)
# Project using the Cramer distance and maybe stop gradient flow to targets.
target = categorical_l2_project(target_z, v_t_probs, v_atoms_tm1)
target = jax.lax.select(stop_target_gradients, jax.lax.stop_gradient(target),
target)
# Compute loss (i.e. temporal difference error).
return distributions.categorical_cross_entropy(
labels=target, logits=v_logits_tm1)
def categorical_q_learning(
q_atoms_tm1: Array,
q_logits_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
q_atoms_t: Array,
q_logits_t: Array,
stop_target_gradients: bool = True,
) -> Numeric:
"""Implements Q-learning for categorical Q distributions.
See "A Distributional Perspective on Reinforcement Learning", by
Bellemere, Dabney and Munos (https://arxiv.org/pdf/1707.06887.pdf).
Args:
q_atoms_tm1: atoms of Q distribution at time t-1.
q_logits_tm1: logits of Q distribution at time t-1.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
q_atoms_t: atoms of Q distribution at time t.
q_logits_t: logits of Q distribution at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Categorical Q-learning loss (i.e. temporal difference error).
"""
chex.assert_rank([
q_atoms_tm1, q_logits_tm1, a_tm1, r_t, discount_t, q_atoms_t, q_logits_t
], [1, 2, 0, 0, 0, 1, 2])
chex.assert_type([
q_atoms_tm1, q_logits_tm1, a_tm1, r_t, discount_t, q_atoms_t, q_logits_t
], [float, float, int, float, float, float, float])
# Scale and shift time-t distribution atoms by discount and reward.
target_z = r_t + discount_t * q_atoms_t
# Convert logits to distribution, then find greedy action in state s_t.
q_t_probs = jax.nn.softmax(q_logits_t)
q_t_mean = jnp.sum(q_t_probs * q_atoms_t[jnp.newaxis, :], axis=1)
pi_t = jnp.argmax(q_t_mean)
# Compute distribution for greedy action.
p_target_z = q_t_probs[pi_t]
# Project using the Cramer distance and maybe stop gradient flow to targets.
target = categorical_l2_project(target_z, p_target_z, q_atoms_tm1)
target = jax.lax.select(stop_target_gradients, jax.lax.stop_gradient(target),
target)
# Compute loss (i.e. temporal difference error).
logit_qa_tm1 = q_logits_tm1[a_tm1]
return distributions.categorical_cross_entropy(
labels=target, logits=logit_qa_tm1)
def categorical_double_q_learning(
q_atoms_tm1: Array,
q_logits_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
q_atoms_t: Array,
q_logits_t: Array,
q_t_selector: Array,
stop_target_gradients: bool = True,
) -> Numeric:
"""Implements double Q-learning for categorical Q distributions.
See "A Distributional Perspective on Reinforcement Learning", by
Bellemere, Dabney and Munos (https://arxiv.org/pdf/1707.06887.pdf)
and "Double Q-learning" by van Hasselt.
(https://papers.nips.cc/paper/3964-double-q-learning.pdf).
Args:
q_atoms_tm1: atoms of Q distribution at time t-1.
q_logits_tm1: logits of Q distribution at time t-1.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
q_atoms_t: atoms of Q distribution at time t.
q_logits_t: logits of Q distribution at time t.
q_t_selector: selector Q-values at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Categorical double Q-learning loss (i.e. temporal difference error).
"""
chex.assert_rank([
q_atoms_tm1, q_logits_tm1, a_tm1, r_t, discount_t, q_atoms_t, q_logits_t,
q_t_selector
], [1, 2, 0, 0, 0, 1, 2, 1])
chex.assert_type([
q_atoms_tm1, q_logits_tm1, a_tm1, r_t, discount_t, q_atoms_t, q_logits_t,
q_t_selector
], [float, float, int, float, float, float, float, float])
# Scale and shift time-t distribution atoms by discount and reward.
target_z = r_t + discount_t * q_atoms_t
# Select logits for greedy action in state s_t and convert to distribution.
p_target_z = jax.nn.softmax(q_logits_t[q_t_selector.argmax()])
# Project using the Cramer distance and maybe stop gradient flow to targets.
target = categorical_l2_project(target_z, p_target_z, q_atoms_tm1)
target = jax.lax.select(stop_target_gradients, jax.lax.stop_gradient(target),
target)
# Compute loss (i.e. temporal difference error).
logit_qa_tm1 = q_logits_tm1[a_tm1]
return distributions.categorical_cross_entropy(
labels=target, logits=logit_qa_tm1)
def quantile_regression_loss(
dist_src: Array,
tau_src: Array,
dist_target: Array,
huber_param: float = 0.,
stop_target_gradients: bool = True,
) -> Numeric:
"""Compute (Huber) QR loss between two discrete quantile-valued distributions.
See "Distributional Reinforcement Learning with Quantile Regression" by
Dabney et al. (https://arxiv.org/abs/1710.10044).
Args:
dist_src: source probability distribution.
tau_src: source distribution probability thresholds.
dist_target: target probability distribution.
huber_param: Huber loss parameter, defaults to 0 (no Huber loss).
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Quantile regression loss.
"""
chex.assert_rank([dist_src, tau_src, dist_target], 1)
chex.assert_type([dist_src, tau_src, dist_target], float)
# Calculate quantile error.
delta = dist_target[None, :] - dist_src[:, None]
delta_neg = (delta < 0.).astype(jnp.float32)
delta_neg = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(delta_neg), delta_neg)
weight = jnp.abs(tau_src[:, None] - delta_neg)
# Calculate Huber loss.
if huber_param > 0.:
loss = clipping.huber_loss(delta, huber_param)
else:
loss = jnp.abs(delta)
loss *= weight
# Average over target-samples dimension, sum over src-samples dimension.
return jnp.sum(jnp.mean(loss, axis=-1))
def quantile_q_learning(
dist_q_tm1: Array,
tau_q_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
dist_q_t_selector: Array,
dist_q_t: Array,
huber_param: float = 0.,
stop_target_gradients: bool = True,
) -> Numeric:
"""Implements Q-learning for quantile-valued Q distributions.
See "Distributional Reinforcement Learning with Quantile Regression" by
Dabney et al. (https://arxiv.org/abs/1710.10044).
Args:
dist_q_tm1: Q distribution at time t-1.
tau_q_tm1: Q distribution probability thresholds.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
dist_q_t_selector: Q distribution at time t for selecting greedy action in
target policy. This is separate from dist_q_t as in Double Q-Learning, but
can be computed with the target network and a separate set of samples.
dist_q_t: target Q distribution at time t.
huber_param: Huber loss parameter, defaults to 0 (no Huber loss).
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Quantile regression Q learning loss.
"""
chex.assert_rank([
dist_q_tm1, tau_q_tm1, a_tm1, r_t, discount_t, dist_q_t_selector, dist_q_t
], [2, 1, 0, 0, 0, 2, 2])
chex.assert_type([
dist_q_tm1, tau_q_tm1, a_tm1, r_t, discount_t, dist_q_t_selector, dist_q_t
], [float, float, int, float, float, float, float])
# Only update the taken actions.
dist_qa_tm1 = dist_q_tm1[:, a_tm1]
# Select target action according to greedy policy w.r.t. dist_q_t_selector.
q_t_selector = jnp.mean(dist_q_t_selector, axis=0)
a_t = jnp.argmax(q_t_selector)
dist_qa_t = dist_q_t[:, a_t]
# Compute target, do not backpropagate into it.
dist_target = r_t + discount_t * dist_qa_t
dist_target = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(dist_target), dist_target)
return quantile_regression_loss(
dist_qa_tm1, tau_q_tm1, dist_target, huber_param)
def quantile_expected_sarsa(
dist_q_tm1: Array,
tau_q_tm1: Array,
a_tm1: Numeric,
r_t: Numeric,
discount_t: Numeric,
dist_q_t: Array,
probs_a_t: Array,
huber_param: float = 0.,
stop_target_gradients: bool = True,
) -> Numeric:
"""Implements Expected SARSA for quantile-valued Q distributions.
Args:
dist_q_tm1: Q distribution at time t-1.
tau_q_tm1: Q distribution probability thresholds.
a_tm1: action index at time t-1.
r_t: reward at time t.
discount_t: discount at time t.
dist_q_t: target Q distribution at time t.
probs_a_t: action probabilities at time t.
huber_param: Huber loss parameter, defaults to 0 (no Huber loss).
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Quantile regression Expected SARSA learning loss.
"""
chex.assert_rank([
dist_q_tm1, tau_q_tm1, a_tm1, r_t, discount_t, dist_q_t, probs_a_t
], [2, 1, 0, 0, 0, 2, 1])
chex.assert_type([
dist_q_tm1, tau_q_tm1, a_tm1, r_t, discount_t, dist_q_t, probs_a_t
], [float, float, int, float, float, float, float])
# Only update the taken actions.
dist_qa_tm1 = dist_q_tm1[:, a_tm1]
# Compute target, do not backpropagate into it.
dist_target = r_t + discount_t * dist_q_t
dist_target = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(dist_target), dist_target)
probs_a_t = jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(probs_a_t), probs_a_t)
per_action_qr = jax.vmap(
quantile_regression_loss, in_axes=(None, None, 1, None))
per_action_loss = per_action_qr(
dist_qa_tm1, tau_q_tm1, dist_target, huber_param)
return jnp.dot(per_action_loss, probs_a_t)
| rlax-master | rlax/_src/value_learning.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for `base.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
from rlax._src import base
class OneHotTest(parameterized.TestCase):
def test_one_hot(self):
num_classes = 3
indices = jnp.array(
[[[1., 2., 3.], [1., 2., 2.]]])
expected_result = jnp.array([
[[[0., 1., 0.], [0., 0., 1.], [0., 0., 0.]],
[[0., 1., 0.], [0., 0., 1.], [0., 0., 1.]]]])
result = base.one_hot(indices, num_classes)
np.testing.assert_array_almost_equal(result, expected_result)
class BroadcastTest(parameterized.TestCase):
@parameterized.parameters(
([1], [1, 2, 3], [1, 1, 1]),
([1, 2, 1], [1, 2, 3], [1, 2, 1]),
([2, 1, 2], [2, 2, 2, 3], [2, 1, 2, 1]),
([1, 2, 4], [1, 2, 4], [1, 2, 4]),
)
def test_lhs_broadcasting(
self, source_shape, target_shape, expected_result_shape):
source = jnp.ones(shape=source_shape, dtype=jnp.float32)
target = jnp.ones(shape=target_shape, dtype=jnp.float32)
expected_result = jnp.ones(shape=expected_result_shape, dtype=jnp.float32)
result = base.lhs_broadcast(source, target)
np.testing.assert_array_almost_equal(result, expected_result)
def test_lhs_broadcast_raises(self):
source = jnp.ones(shape=(1, 2), dtype=jnp.float32)
target = jnp.ones(shape=(1, 3, 1, 1), dtype=jnp.float32)
with self.assertRaisesRegex(ValueError, 'source shape'):
base.lhs_broadcast(source, target)
class ReplaceTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.data = jnp.array([
[1, 2, 3, 4, 5, 6],
[-1, -2, -3, -4, -5, -6]
])
self.replacement = self.data * 10
self.mask = jnp.array([0, 1])
def test_replace_masked(self):
output = base.replace_masked(self.data, self.replacement, self.mask)
expected_output = jnp.array([
[1, 2, 3, 4, 5, 6],
[-10, -20, -30, -40, -50, -60],
])
# Test output.
np.testing.assert_allclose(output, expected_output)
def test_replace_masked_zeros(self):
output = base.replace_masked(self.data, None, self.mask)
expected_output = jnp.array([
[1, 2, 3, 4, 5, 6],
[0, 0, 0, 0, 0, 0],
])
# Test output.
np.testing.assert_allclose(output, expected_output)
if __name__ == '__main__':
jax.config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| rlax-master | rlax/_src/base_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for `policy_gradients.py`."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import jax.numpy as jnp
import numpy as np
from rlax._src import distributions
from rlax._src import policy_gradients
class DpgLossTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.s_t = np.array([[0, 1, 0], [1, 1, 2]], dtype=np.float32) # [B, T]
self.w_s = np.ones([3, 2], dtype=np.float32)
b_s = np.zeros([2], dtype=np.float32)
# Add batch dimension to satisfy shape assertions.
self.b_s = jnp.expand_dims(b_s, 0)
self.w = np.ones([2, 1], dtype=np.float32)
self.b = np.zeros([1], dtype=np.float32)
self.expected = np.array([0.5, 0.5], dtype=np.float32)
@chex.all_variants()
def test_dpg_loss_batch(self):
"""Tests for a full batch."""
dpg = self.variant(jax.vmap(functools.partial(
policy_gradients.dpg_loss, dqda_clipping=1.)))
# Actor and critic function approximators.
actor = lambda s_t: jnp.matmul(s_t, self.w_s) + self.b_s
critic = lambda a_t: jnp.squeeze(jnp.matmul(a_t, self.w) + self.b)
# Compute loss.
a_t = actor(self.s_t)
dqda = jax.vmap(jax.grad(critic))(a_t)
# Test outputs.
actual = np.sum(dpg(a_t, dqda), axis=1)
np.testing.assert_allclose(actual, self.expected, atol=1e-4)
class PolicyGradientLossTest(parameterized.TestCase):
def setUp(self):
super().setUp()
logits = np.array(
[[1., 1., 1.], [2., 0., 0.], [-1., -2., -3.]], dtype=np.float32)
self.logits = np.stack([logits, logits + 1.])
weights = np.array([-2., 2., 0], dtype=np.float32)
self.weights = np.stack([weights, weights - 1.])
advantages = np.array([0.3, 0.2, 0.1], dtype=np.float32)
self.advantages = np.stack([advantages, -advantages])
self.actions = np.array([[0, 1, 2], [0, 0, 0]], dtype=np.int32)
self.expected = np.array([0.0788835088, 0.327200909], dtype=np.float32)
@chex.all_variants()
def test_policy_gradient_loss_batch(self):
"""Tests for a full batch."""
policy_gradient_loss = self.variant(jax.vmap(
policy_gradients.policy_gradient_loss))
# Test outputs.
actual = policy_gradient_loss(self.logits, self.actions, self.advantages,
self.weights)
np.testing.assert_allclose(self.expected, actual, atol=1e-4)
class EntropyLossTest(parameterized.TestCase):
def setUp(self):
super().setUp()
logits = np.array(
[[1., 1., 1.], [2., 0., 0.], [-1., -2., -3.]], dtype=np.float32)
self.logits = np.stack([logits, logits + 1.])
weights = np.array([-2., 2., 0], dtype=np.float32)
self.weights = np.stack([weights, weights - 1.])
self.expected = np.array([0.288693, 1.15422], dtype=np.float32)
@chex.all_variants()
def test_entropy_loss_batch(self):
"""Tests for a full batch."""
entropy_loss = self.variant(jax.vmap(policy_gradients.entropy_loss))
# Test outputs.
actual = entropy_loss(self.logits, self.weights)
np.testing.assert_allclose(self.expected, actual, atol=1e-4)
class QPGLossTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.q_values = jnp.array([[0., -1., 1.], [1., -1., 0]])
self.policy_logits = jnp.array([[1., 1., 1.], [1., 1., 4.]])
# baseline = \sum_a pi_a * Q_a = 0.
# -\sum_a pi_a * relu(Q_a - baseline)
# negative sign as it's a loss term and loss needs to be minimized.
self.expected_policy_loss = (0.0 + 0.0) / 2
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_qpg_loss_batch(self, compile_fn, place_fn):
"""Tests for a full batch."""
# Vmap and optionally compile.
qpg_loss = compile_fn(policy_gradients.qpg_loss)
# Optionally convert to device array.
policy_logits, q_values = jax.tree_map(place_fn,
(self.policy_logits, self.q_values))
# Test outputs.
actual = qpg_loss(policy_logits, q_values)
np.testing.assert_allclose(self.expected_policy_loss, actual, atol=1e-4)
class RMLossTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.q_values = jnp.array([[0., -1., 1.], [1., -1., 0]])
self.policy_logits = jnp.array([[1., 1., 1.], [1., 1., 4.]])
# baseline = \sum_a pi_a * Q_a = 0.
# -\sum_a pi_a * relu(Q_a - baseline)
# negative sign as it's a loss term and loss needs to be minimized.
self.expected_policy_loss = -(.3333 + .0452) / 2
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_rm_loss_batch(self, compile_fn, place_fn):
"""Tests for a full batch."""
# Vmap and optionally compile.
rm_loss = compile_fn(policy_gradients.rm_loss)
# Optionally convert to device array.
policy_logits, q_values = jax.tree_map(place_fn,
(self.policy_logits, self.q_values))
# Test outputs.
actual = rm_loss(policy_logits, q_values)
np.testing.assert_allclose(self.expected_policy_loss, actual, atol=1e-4)
class RPGLossTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.q_values = jnp.array([[0., -1., 1.], [1., -1., 0]])
self.policy_logits = jnp.array([[1., 1., 1.], [1., 1., 4.]])
# baseline = \sum_a pi_a * Q_a = 0.
# -\sum_a pi_a * relu(Q_a - baseline)
# negative sign as it's a loss term and loss needs to be minimized.
self.expected_policy_loss = (1.0 + 1.0) / 2
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_rpg_loss(self, compile_fn, place_fn):
"""Tests for a full batch."""
# Vmap and optionally compile.
rpg_loss = compile_fn(policy_gradients.rpg_loss)
# Optionally convert to device array.
policy_logits, q_values = jax.tree_map(place_fn,
(self.policy_logits, self.q_values))
# Test outputs.
actual = rpg_loss(policy_logits, q_values)
np.testing.assert_allclose(self.expected_policy_loss, actual, atol=1e-4)
class ClippedSurrogatePGLossTest(parameterized.TestCase):
def setUp(self):
super().setUp()
logits = np.array(
[[1., 1., 1.], [2., 0., 0.], [-1., -2., -3.]], dtype=np.float32)
old_logits = np.array(
[[1., 1., 1.], [2., 0., 0.], [-3., -2., -1.]], dtype=np.float32)
self.logits = np.stack([logits, logits])
self.old_logits = np.stack([old_logits, old_logits])
advantages = np.array([0.3, 0.2, 0.1], dtype=np.float32)
self.advantages = np.stack([advantages, -advantages])
self.actions = np.array([[0, 1, 2], [0, 1, 2]], dtype=np.int32)
self.epsilon = 0.2
self.expected = np.array([-0.17117467, 0.19333333])
@chex.all_variants()
def test_clipped_surrogate_pg_loss_batch(self):
"""Tests for a full batch."""
get_ratios = jax.vmap(distributions.categorical_importance_sampling_ratios)
prob_ratios = get_ratios(self.logits, self.old_logits, self.actions)
batched_fn_variant = self.variant(jax.vmap(functools.partial(
policy_gradients.clipped_surrogate_pg_loss, epsilon=self.epsilon)))
actual = batched_fn_variant(prob_ratios, self.advantages)
np.testing.assert_allclose(actual, self.expected, atol=1e-4)
if __name__ == '__main__':
jax.config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| rlax-master | rlax/_src/policy_gradients_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `value_learning.py`."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import jax.numpy as jnp
import numpy as np
from rlax._src import distributions
from rlax._src import value_learning
class TDLearningTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.v_tm1 = np.array(
[1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.float32)
self.r_t = np.array(
[-1, -1, -1, -1, -1, -1, -1, -1, -1], dtype=np.float32)
self.discount_t = np.array(
[0, 0.5, 1, 0, 0.5, 1, 0, 0.5, 1], dtype=np.float32)
self.v_t = np.array(
[0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.float32)
self.expected_td = np.array(
[-2., -2., -2., -2., -1.5, -1., -2., -1., 0.], dtype=np.float32)
@chex.all_variants()
def test_td_learning_batch(self):
"""Tests for a full batch."""
td_learning = self.variant(jax.vmap(value_learning.td_learning))
# Compute errors in batch.
actual_td = td_learning(self.v_tm1, self.r_t, self.discount_t, self.v_t)
# Tets output.
np.testing.assert_allclose(self.expected_td, actual_td)
class TDLambdaTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.lambda_ = 0.75
self.v_tm1 = np.array(
[[1.1, -1.1, 3.1], [2.1, -1.1, -2.1]], dtype=np.float32)
self.discount_t = np.array(
[[0., 0.89, 0.85], [0.88, 1., 0.83]], dtype=np.float32)
self.r_t = np.array(
[[-1.3, -1.3, 2.3], [1.3, 5.3, -3.3]], dtype=np.float32)
self.bootstrap_v = np.array([2.2, -1.2], np.float32)
self.expected = np.array(
[[-2.4, 3.2732253, 1.0700002],
[-0.01701999, 2.6529999, -2.196]],
dtype=np.float32)
@chex.all_variants()
def test_batch_compatibility(self):
"""Tests for a full batch."""
td_lambda = self.variant(jax.vmap(functools.partial(
value_learning.td_lambda, lambda_=self.lambda_)))
# Get arguments.
v_t = np.concatenate([self.v_tm1[:, 1:], self.bootstrap_v[:, None]], axis=1)
# Test output
actual = td_lambda(self.v_tm1, self.r_t, self.discount_t, v_t)
np.testing.assert_allclose(self.expected, actual, rtol=1e-4)
class SarsaTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.q_tm1 = np.array([[1, 1, 0], [1, 1, 0]], dtype=np.float32)
self.a_tm1 = np.array([0, 1], dtype=np.int32)
self.r_t = np.array([1, 1], dtype=np.float32)
self.discount_t = np.array([0, 1], dtype=np.float32)
self.q_t = np.array([[0, 1, 0], [3, 2, 0]], dtype=np.float32)
self.a_t = np.array([1, 0], dtype=np.int32)
self.expected = np.array([0., 3.], dtype=np.float32)
@chex.all_variants()
def test_sarsa_batch(self):
"""Tests for a full batch."""
batch_sarsa = self.variant(jax.vmap(value_learning.sarsa))
# Test outputs.
actual = batch_sarsa(self.q_tm1, self.a_tm1, self.r_t, self.discount_t,
self.q_t, self.a_t)
np.testing.assert_allclose(self.expected, actual)
class ExpectedSarsaTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.q_tm1 = np.array(
[[1, 1, 0.5], [1, 1, 3]], dtype=np.float32)
self.a_tm1 = np.array(
[0, 1], dtype=np.int32)
self.r_t = np.array(
[4, 1], dtype=np.float32)
self.discount_t = np.array(
[1, 1], dtype=np.float32)
self.q_t = np.array(
[[1.5, 1, 2], [3, 2, 1]], dtype=np.float32)
self.probs_a_t = np.array(
[[0.2, 0.5, 0.3], [0.3, 0.4, 0.3]], dtype=np.float32)
self.expected = np.array(
[4.4, 2.], dtype=np.float32)
@chex.all_variants()
def test_expected_sarsa_batch(self):
"""Tests for a full batch."""
expected_sarsa = self.variant(jax.vmap(value_learning.expected_sarsa))
# Test outputs.
actual = expected_sarsa(self.q_tm1, self.a_tm1, self.r_t, self.discount_t,
self.q_t, self.probs_a_t)
np.testing.assert_allclose(self.expected, actual)
class SarsaLambdaTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.lambda_ = 0.75
self.q_tm1 = np.array(
[[[1.1, 2.1], [-1.1, 1.1], [3.1, -3.1]],
[[2.1, 3.1], [-1.1, 0.1], [-2.1, -1.1]]],
dtype=np.float32)
self.a_tm1 = np.array(
[[0, 1, 0],
[1, 0, 0]],
dtype=np.int32)
self.discount_t = np.array(
[[0., 0.89, 0.85],
[0.88, 1., 0.83]],
dtype=np.float32)
self.r_t = np.array(
[[-1.3, -1.3, 2.3],
[1.3, 5.3, -3.3]],
dtype=np.float32)
self.q_t = np.array(
[[[1.2, 2.2], [-1.2, 0.2], [2.2, -1.2]],
[[4.2, 2.2], [1.2, 1.2], [-1.2, -2.2]]],
dtype=np.float32)
self.a_t = np.array(
[[1, 0, 1],
[1, 1, 0]],
dtype=np.int32)
self.expected = np.array(
[[-2.4, -1.8126001, -1.8200002], [0.25347996, 3.4780002, -2.196]],
dtype=np.float32)
@chex.all_variants()
def test_sarsa_lambda_batch(self):
"""Tests for a full batch."""
sarsa_lambda = self.variant(jax.vmap(functools.partial(
value_learning.sarsa_lambda, lambda_=self.lambda_)))
# Test outputs.
actual = sarsa_lambda(self.q_tm1, self.a_tm1, self.r_t, self.discount_t,
self.q_t, self.a_t)
np.testing.assert_allclose(self.expected, actual, rtol=1e-4)
class QLearningTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.q_tm1 = np.array([[1, 1, 0], [1, 2, 0]], dtype=np.float32)
self.a_tm1 = np.array([0, 1], dtype=np.int32)
self.r_t = np.array([1, 1], dtype=np.float32)
self.discount_t = np.array([0, 1], dtype=np.float32)
self.q_t = np.array([[0, 1, 0], [1, 2, 0]], dtype=np.float32)
self.expected = np.array([0., 1.], dtype=np.float32)
@chex.all_variants()
def test_q_learning_batch(self):
"""Tests for a full batch."""
q_learning = self.variant(jax.vmap(value_learning.q_learning))
# Test outputs.
actual = q_learning(self.q_tm1, self.a_tm1, self.r_t, self.discount_t,
self.q_t)
np.testing.assert_allclose(self.expected, actual)
class DoubleQLearningTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.q_tm1 = np.array([[1, 1, 0], [1, 2, 0]], dtype=np.float32)
self.a_tm1 = np.array([0, 1], dtype=np.int32)
self.r_t = np.array([1, 1], dtype=np.float32)
self.discount_t = np.array([0, 1], dtype=np.float32)
self.q_t_value = np.array([[99, 1, 98], [91, 2, 66]], dtype=np.float32)
self.q_t_selector = np.array([[2, 10, 1], [11, 20, 1]], dtype=np.float32)
self.expected = np.array([0., 1.], dtype=np.float32)
@chex.all_variants()
def test_double_q_learning_batch(self):
"""Tests for a full batch."""
double_q_learning = self.variant(jax.vmap(value_learning.double_q_learning))
# Test outputs.
actual = double_q_learning(self.q_tm1, self.a_tm1, self.r_t,
self.discount_t, self.q_t_value,
self.q_t_selector)
np.testing.assert_allclose(self.expected, actual)
class PersistentQLearningTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.action_gap_scale = 0.25
self.q_tm1 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32)
self.a_tm1 = np.array([0, 1, 1], dtype=np.int32)
self.r_t = np.array([3, 2, 7], dtype=np.float32)
self.discount_t = np.array([0, 1, 0.5], dtype=np.float32)
self.q_t = np.array([[11, 12], [20, 16], [-8, -4]], dtype=np.float32)
self.expected = np.array([2., 17., -1.], dtype=np.float32)
@chex.all_variants()
def test_persistent_q_learning_batch(self):
"""Tests for a full batch."""
# Vmap and optionally compile.
persistent_q_learning = self.variant(jax.vmap(functools.partial(
value_learning.persistent_q_learning,
action_gap_scale=self.action_gap_scale)))
# Test outputs.
actual = persistent_q_learning(self.q_tm1, self.a_tm1, self.r_t,
self.discount_t, self.q_t)
np.testing.assert_allclose(self.expected, actual)
class QVLearningTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.q_tm1 = np.array([[1, 1, 0], [1, 2, 0]], dtype=np.float32)
self.a_tm1 = np.array([0, 1], dtype=np.int32)
self.r_t = np.array([1, 1], dtype=np.float32)
self.discount_t = np.array([0, 1], dtype=np.float32)
self.v_t = np.array([1, 3], dtype=np.float32)
self.expected = np.array([0., 2.], dtype=np.float32)
@chex.all_variants()
def test_qv_learning_batch(self):
"""Tests for a full batch."""
batch_qv_learning = self.variant(jax.vmap(value_learning.qv_learning))
# Test outputs.
actual = batch_qv_learning(self.q_tm1, self.a_tm1, self.r_t,
self.discount_t, self.v_t)
np.testing.assert_allclose(self.expected, actual)
class QVMaxTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.v_tm1 = np.array(
[1, 1, 1, 1, 1, 1, 1, 1, 1],
dtype=np.float32)
self.r_t = np.array(
[-1, -1, -1, -1, -1, -1, -1, -1, -1],
dtype=np.float32)
self.discount_t = np.array(
[0, 0.5, 1, 0, 0.5, 1, 0, 0.5, 1],
dtype=np.float32)
self.q_t = np.array(
[[0, -1], [-2, 0], [0, -3], [1, 0], [1, 1], [0, 1],
[1, 2], [2, -2], [2, 2]],
dtype=np.float32)
self.expected = np.array(
[-2., -2., -2., -2., -1.5, -1., -2., -1., 0.],
dtype=np.float32)
@chex.all_variants()
def test_qv_max_batch(self):
"""Tests for a full batch."""
qv_max = self.variant(jax.vmap(value_learning.qv_max))
# Test outputs.
actual = qv_max(self.v_tm1, self.r_t, self.discount_t, self.q_t)
np.testing.assert_allclose(self.expected, actual)
class QLambdaTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.lambda_ = 0.75
self.q_tm1 = np.array(
[[[1.1, 2.1], [-1.1, 1.1], [3.1, -3.1]],
[[2.1, 3.1], [-1.1, 0.1], [-2.1, -1.1]]],
dtype=np.float32)
self.a_tm1 = np.array(
[[0, 1, 0],
[1, 0, 0]],
dtype=np.int32)
self.discount_t = np.array(
[[0., 0.89, 0.85],
[0.88, 1., 0.83]],
dtype=np.float32)
self.r_t = np.array(
[[-1.3, -1.3, 2.3],
[1.3, 5.3, -3.3]],
dtype=np.float32)
self.q_t = np.array(
[[[1.2, 2.2], [-1.2, 0.2], [2.2, -1.2]],
[[4.2, 2.2], [1.2, 1.2], [-1.2, -2.2]]],
dtype=np.float32)
self.expected = np.array(
[[-2.4, 0.427975, 1.07],
[0.69348, 3.478, -2.196]],
dtype=np.float32)
@chex.all_variants()
def test_q_lambda_batch(self):
"""Tests for a full batch."""
q_lambda = self.variant(jax.vmap(functools.partial(
value_learning.q_lambda, lambda_=self.lambda_)))
# Test outputs.
actual = q_lambda(self.q_tm1, self.a_tm1, self.r_t, self.discount_t,
self.q_t)
np.testing.assert_allclose(self.expected, actual, rtol=1e-5)
class RetraceTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._lambda = 0.9
self._qs = np.array(
[[[1.1, 2.1], [-1.1, 1.1], [3.1, -3.1], [-1.2, 0.0]],
[[2.1, 3.1], [9.5, 0.1], [-2.1, -1.1], [0.1, 7.4]]],
dtype=np.float32)
self._targnet_qs = np.array(
[[[1.2, 2.2], [-1.2, 0.2], [2.2, -1.2], [-2.25, -6.0]],
[[4.2, 2.2], [1.2, 1.2], [-1.2, -2.2], [1.5, 1.0]]],
dtype=np.float32)
self._actions = np.array(
[[0, 1, 0, 0], [1, 0, 0, 1]],
dtype=np.int32)
self._rewards = np.array(
[[-1.3, -1.3, 2.3, 42.0],
[1.3, 5.3, -3.3, -5.0]],
dtype=np.float32)
self._pcontinues = np.array(
[[0., 0.89, 0.85, 0.99],
[0.88, 1., 0.83, 0.95]],
dtype=np.float32)
self._target_policy_probs = np.array(
[[[0.5, 0.5], [0.2, 0.8], [0.6, 0.4], [0.9, 0.1]],
[[0.1, 0.9], [1.0, 0.0], [0.3, 0.7], [0.7, 0.3]]],
dtype=np.float32)
self._behavior_policy_probs = np.array(
[[0.5, 0.1, 0.9, 0.3], [0.4, 0.6, 1.0, 0.9]],
dtype=np.float32)
self._inputs = [
self._qs, self._targnet_qs, self._actions,
self._rewards, self._pcontinues,
self._target_policy_probs, self._behavior_policy_probs]
self.expected = np.array(
[[2.8800001, 3.8934109, 4.5942383],
[3.1121615e-1, 2.0253206e1, 3.1601219e-3]],
dtype=np.float32)
@chex.all_variants()
def test_retrace_batch(self):
"""Tests for a full batch."""
retrace = self.variant(jax.vmap(functools.partial(
value_learning.retrace, lambda_=self._lambda)))
# Test outputs.
actual_td = retrace(self._qs[:, :-1], self._targnet_qs[:, 1:],
self._actions[:, :-1], self._actions[:, 1:],
self._rewards[:, :-1], self._pcontinues[:, :-1],
self._target_policy_probs[:, 1:],
self._behavior_policy_probs[:, 1:])
actual_loss = 0.5 * np.square(actual_td)
np.testing.assert_allclose(self.expected, actual_loss, rtol=1e-5)
def _generate_sorted_support(size):
"""Generate a random support vector."""
support = np.random.normal(-1.0, 1.0, size=size).astype(np.float32)
return np.sort(support, axis=-1)
def _generate_weights(size):
"""Generates a weight distribution where half of entries are zero."""
normal = np.random.normal(-1.0, 1.0, size=size).astype(np.float32)
mask = (np.random.random(size=size) > 0.5).astype(np.float32)
return normal * mask
class RetraceContinuousTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._lambda = 0.9 * np.ones((2, 2), dtype=np.float32)
self._qs = np.array([[1.1, 1.1, 3.1, -1.2], [3.1, 9.5, -2.1, 7.4]],
dtype=np.float32)
self._targnet_qs = np.array([[1.2, 0.2, 2.2, -2.25], [2.2, 1.2, -1.2, 1.0]],
dtype=np.float32)
self._exp_q_t = np.array(
[[-0.08, 0.84000003, -2.625], [1.2, -1.9, 1.3499999]], dtype=np.float32)
self._rewards = np.array([[-1.3, -1.3, 2.3, 42.0], [1.3, 5.3, -3.3, -5.0]],
dtype=np.float32)
self._pcontinues = np.array(
[[0., 0.89, 0.85, 0.99], [0.88, 1., 0.83, 0.95]], dtype=np.float32)
self._log_rhos = np.array([[2.0794415, -0.4054651, 1.0986122],
[0.51082563, -1.2039728, -1.0986123]],
dtype=np.float32)
self._inputs = [
self._qs, self._targnet_qs, self._exp_q_t, self._rewards,
self._pcontinues, self._log_rhos
]
self.expected = np.array([[2.880000e+00, 5.643226e+00, 4.594238e+00],
[4.236919e-02, 2.044066e+01, 3.160141e-03]],
dtype=np.float32)
@chex.all_variants()
def test_retrace_batch(self):
"""Tests for a full batch."""
retrace = self.variant(jax.vmap(value_learning.retrace_continuous))
# Test outputs.
actual_td = retrace(self._qs[:, :-1], self._targnet_qs[:,
1:-1], self._exp_q_t,
self._rewards[:, :-1], self._pcontinues[:, :-1],
self._log_rhos[:, 1:], self._lambda)
actual_loss = 0.5 * np.square(actual_td)
np.testing.assert_allclose(self.expected, actual_loss, rtol=1e-5)
@chex.all_variants()
def test_retrace_terminal_batch(self):
"""Tests for a full batch with terminal state."""
is_terminal = np.array([[0., 1.], [0., 1.]], dtype=np.float32)
lambda_ = (1. - is_terminal) * self._lambda
expected = np.array([[2.880000e+00, 1.365213e+00, 4.594238e+00],
[2.448239e-02, 1.860500e+01, 3.160141e-03]],
dtype=np.float32)
retrace = self.variant(jax.vmap(value_learning.retrace_continuous))
# Test outputs.
actual_td = retrace(self._qs[:, :-1], self._targnet_qs[:, 1:-1],
self._exp_q_t, self._rewards[:, :-1],
self._pcontinues[:, :-1], self._log_rhos[:,
1:], lambda_)
actual_loss = 0.5 * np.square(actual_td)
np.testing.assert_allclose(expected, actual_loss, rtol=1e-5)
class L2ProjectTest(parameterized.TestCase):
def setUp(self):
super().setUp()
old_supports = np.arange(-1, 1., 0.25)
self.old_supports = np.stack([old_supports, old_supports + 1.])
weights = self.old_supports.copy()
weights[0, ::2] = 0.
weights[1, 1::2] = 0.
self.weights = weights
new_supports = np.arange(-1, 1., 0.5)
self.new_supports = np.stack([new_supports, new_supports + 1.])
self.expected = np.array([[-0.375, -0.5, 0., 0.875], [0., 0.5, 1., 1.5]],
dtype=np.float32)
@chex.all_variants()
def test_categorical_l2_project_batch(self):
"""Testsfor a full batch."""
l2_project = self.variant(jax.vmap(functools.partial(
value_learning.categorical_l2_project)))
# Compute projection in batch.
actual = l2_project(self.old_supports, self.weights, self.new_supports)
# Test outputs.
np.testing.assert_allclose(actual, self.expected)
class CategoricalTDLearningTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.atoms = np.array([.5, 1., 1.5], dtype=np.float32)
self.logits_tm1 = np.array(
[[0, 9, 0], [9, 0, 9], [0, 9, 0], [9, 9, 0], [9, 0, 9]],
dtype=np.float32)
self.r_t = np.array(
[0.5, 0., 0.5, 0.8, -0.1],
dtype=np.float32)
self.discount_t = np.array(
[0.8, 1., 0.8, 0., 1.],
dtype=np.float32)
self.logits_t = np.array(
[[0, 0, 9], [1, 1, 1], [0, 0, 9], [1, 1, 1], [0, 9, 9]],
dtype=np.float32)
self.expected = np.array(
[8.998915, 3.6932087, 8.998915, 0.69320893, 5.1929307],
dtype=np.float32)
@chex.all_variants()
def test_categorical_td_learning_batch(self):
"""Tests for a full batch."""
# Not using vmap for atoms.
def fn(v_logits_tm1, r_t, discount_t, v_logits_t):
return value_learning.categorical_td_learning(
v_atoms_tm1=self.atoms,
v_logits_tm1=v_logits_tm1,
r_t=r_t,
discount_t=discount_t,
v_atoms_t=self.atoms,
v_logits_t=v_logits_t)
categorical_td_learning = self.variant(jax.vmap(fn))
# Test outputs.
actual = categorical_td_learning(
self.logits_tm1, self.r_t, self.discount_t, self.logits_t)
np.testing.assert_allclose(self.expected, actual, rtol=1e-4)
class CategoricalQLearningTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.atoms = np.array([.5, 1., 1.5], dtype=np.float32)
self.q_logits_tm1 = np.array(
[[[1, 1, 1], [0, 9, 9], [0, 9, 0], [0, 0, 0]],
[[9, 9, 0], [9, 0, 9], [0, 0, 0], [9, -9, 0]],
[[1, 1, 1], [0, 9, 9], [0, 0, 0], [0, 9, 0]],
[[9, 9, 0], [9, 0, 9], [0, 0, 0], [9, -9, 0]],
[[9, 9, 0], [9, 0, 9], [0, 0, 0], [9, -9, 0]]],
dtype=np.float32)
self.q_logits_t = np.array(
[[[1, 1, 1], [9, 0, 9], [1, 0, 0], [0, 0, 9]],
[[9, 9, 0], [9, 0, 0], [1, 1, 1], [9, -9, 0]],
[[1, 1, 1], [9, 0, 9], [0, 0, 9], [1, 0, 0]],
[[9, 9, 0], [9, 0, 0], [1, 1, 1], [9, -9, 0]],
[[9, 9, 0], [9, 0, 0], [0, 9, 9], [9, -9, 0]]],
dtype=np.float32)
self.a_tm1 = np.array(
[2, 1, 3, 0, 1],
dtype=np.int32)
self.r_t = np.array(
[0.5, 0., 0.5, 0.8, -0.1],
dtype=np.float32)
self.discount_t = np.array(
[0.8, 1., 0.8, 0., 1.],
dtype=np.float32)
self.inputs = (
self.q_logits_tm1, self.a_tm1, self.r_t,
self.discount_t, self.q_logits_t)
self.expected = np.array(
[8.998915, 3.6932087, 8.998915, 0.69320893, 5.1929307],
dtype=np.float32)
@chex.all_variants()
def test_categorical_q_learning_batch(self):
"""Tests for a full batch."""
# Not using vmap for atoms.
def fn(q_logits_tm1, a_tm1, r_t, discount_t, q_logits_t):
return value_learning.categorical_q_learning(
q_atoms_tm1=self.atoms,
q_logits_tm1=q_logits_tm1,
a_tm1=a_tm1,
r_t=r_t,
discount_t=discount_t,
q_atoms_t=self.atoms,
q_logits_t=q_logits_t)
categorical_q_learning = self.variant(jax.vmap(fn))
# Test outputs.
actual = categorical_q_learning(*self.inputs)
np.testing.assert_allclose(self.expected, actual, rtol=1e-4)
class CategoricalDoubleQLearningTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.atoms = np.array([.5, 1., 1.5], dtype=np.float32)
self.q_logits_tm1 = np.array(
[[[1, 1, 1], [0, 9, 9], [0, 9, 0], [0, 0, 0]],
[[9, 9, 0], [9, 0, 9], [0, 0, 0], [9, -9, 0]],
[[1, 1, 1], [0, 9, 9], [0, 0, 0], [0, 9, 0]],
[[9, 9, 0], [9, 0, 9], [0, 0, 0], [9, -9, 0]],
[[9, 9, 0], [9, 0, 9], [0, 0, 0], [9, -9, 0]]],
dtype=np.float32)
self.q_logits_t = np.array(
[[[1, 1, 1], [9, 0, 9], [1, 0, 0], [0, 0, 9]],
[[9, 9, 0], [9, 0, 0], [1, 1, 1], [9, -9, 0]],
[[1, 1, 1], [9, 0, 9], [0, 0, 9], [1, 0, 0]],
[[9, 9, 0], [9, 0, 0], [1, 1, 1], [9, -9, 0]],
[[9, 9, 0], [9, 0, 0], [0, 9, 9], [9, -9, 0]]],
dtype=np.float32)
self.q_t_selector = np.array(
[[1, 0, 0, 9], [9, 0, 1, 1], [1, 9, 1, 1], [0, 1, 0, 9], [1, 1, 1, 9]],
dtype=np.float32)
self.a_tm1 = np.array(
[2, 1, 3, 0, 1],
dtype=np.int32)
self.r_t = np.array(
[0.5, 0., 0.5, 0.8, -0.1],
dtype=np.float32)
self.discount_t = np.array(
[0.8, 1., 0.8, 0., 1.],
dtype=np.float32)
self.inputs = (
self.q_logits_tm1, self.a_tm1, self.r_t,
self.discount_t, self.q_logits_t, self.q_t_selector)
self.expected = np.array(
[8.998915, 5.192931, 5.400247, 0.693209, 0.693431],
dtype=np.float32)
@chex.all_variants()
def test_categorical_double_q_learning_batch(self):
"""Tests for a full batch."""
# Not using vmap for atoms.
def fn(q_logits_tm1, a_tm1, r_t, discount_t, q_logits_t, q_t_selector):
return value_learning.categorical_double_q_learning(
q_atoms_tm1=self.atoms,
q_logits_tm1=q_logits_tm1,
a_tm1=a_tm1,
r_t=r_t,
discount_t=discount_t,
q_atoms_t=self.atoms,
q_logits_t=q_logits_t,
q_t_selector=q_t_selector)
categorical_double_q_learning = self.variant(jax.vmap(fn))
# Test outputs.
actual = categorical_double_q_learning(*self.inputs)
np.testing.assert_allclose(self.expected, actual, rtol=1e-4)
@chex.all_variants()
def test_single_double_q_learning_eq_batch(self):
"""Tests equivalence to categorical_q_learning when q_t_selector == q_t."""
# Not using vmap for atoms.
@self.variant
@jax.vmap
def batch_categorical_double_q_learning(
q_logits_tm1, a_tm1, r_t, discount_t, q_logits_t, q_t_selector):
return value_learning.categorical_double_q_learning(
q_atoms_tm1=self.atoms,
q_logits_tm1=q_logits_tm1,
a_tm1=a_tm1,
r_t=r_t,
discount_t=discount_t,
q_atoms_t=self.atoms,
q_logits_t=q_logits_t,
q_t_selector=q_t_selector)
@self.variant
@jax.vmap
def batch_categorical_q_learning(
q_logits_tm1, a_tm1, r_t, discount_t, q_logits_t):
return value_learning.categorical_q_learning(
q_atoms_tm1=self.atoms,
q_logits_tm1=q_logits_tm1,
a_tm1=a_tm1,
r_t=r_t,
discount_t=discount_t,
q_atoms_t=self.atoms,
q_logits_t=q_logits_t)
# Double Q-learning estimate with q_t_selector=q_t
distrib = distributions.softmax()
# Add batch and time dimension to atoms.
atoms = jnp.expand_dims(jnp.expand_dims(self.atoms, 0), 0)
q_t_selector = jnp.sum(distrib.probs(self.q_logits_t) * atoms, axis=-1)
actual = batch_categorical_double_q_learning(
self.q_logits_tm1, self.a_tm1, self.r_t, self.discount_t,
self.q_logits_t, q_t_selector)
# Q-learning estimate.
expected = batch_categorical_q_learning(
self.q_logits_tm1, self.a_tm1, self.r_t, self.discount_t,
self.q_logits_t)
# Test equivalence.
np.testing.assert_allclose(expected, actual)
class QuantileRegressionLossTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.dist_src = np.array([[-1., 3.], [-1., 3.]])
self.tau_src = np.array([[0.2, 0.7], [0., 0.4]])
self.dist_target = np.array([[-3., 4., 2.], [-3., 4., 2.]])
# delta = [[ -2 5 3 ], [ -6 1 -1 ]]
# Huber(2.)-delta = [[ 2 8 4 ], [ 10 .5 .5 ]]
#
# First batch element:
# |tau - Id_{d<0}| = [[ .8 .2 .2 ], [ .3 .7 .3 ]]
# Loss = 1/3 sum( |delta| . |tau - Id_{d<0}| ) = 2.0
# Huber(2.)-loss = 2.5
#
# Second batch element:
# |tau - Id_{d<0}| = [[ 1. 0. 0. ], [ .6 .4 .6 ]]
# Loss = 2.2
# Huber(2.)-loss = 8.5 / 3
self.expected_loss = {
0.: np.array([2.0, 2.2]),
2.: np.array([2.5, 8.5 / 3.])
}
@chex.all_variants()
@parameterized.named_parameters(
('nohuber', 0.),
('huber', 2.))
def test_quantile_regression_loss_batch(self, huber_param):
"""Tests for a full batch."""
loss_fn = value_learning.quantile_regression_loss
loss_fn = self.variant(jax.vmap(functools.partial(
loss_fn, huber_param=huber_param)))
# Compute quantile regression loss.
actual = loss_fn(self.dist_src, self.tau_src, self.dist_target)
# Test outputs in batch.
np.testing.assert_allclose(actual, self.expected_loss[huber_param],
rtol=3e-7)
class QuantileLearningTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.dist_q_tm1 = np.array( # n_batch = 3, n_taus = 2, n_actions = 4
[[[0, 1, -5, 6], [-1, 3, 0, -2]],
[[-5, 9, -5, 6], [2, 3, 1, -4]],
[[5, 1, -5, 7], [-1, 3, 0, -2]]],
dtype=np.float32)
self.tau_q_tm1 = np.array(
[[0.2, 0.7],
[0.1, 0.5],
[0.3, 0.4]],
dtype=np.float32)
self.a_tm1 = np.array(
[1, 2, 0],
dtype=np.int32)
self.r_t = np.array(
[0.5, -1., 0.],
dtype=np.float32)
self.discount_t = np.array(
[0.5, 0., 1],
dtype=np.float32)
self.dist_q_t = np.array(
[[[0, 5, 2, 2], [0, -3, 2, 2]],
[[-3, -1, 4, -3], [1, 3, 1, -4]],
[[-2, 2, -5, -7], [1, 3, 2, -2]]],
dtype=np.float32)
self.dist_q_t_selector = np.array(
[[[0, 7, 2, -2], [0, 4, 2, 2]],
[[-3, -1, 4, 3], [1, 3, 1, 4]],
[[-1, -2, -5, -6], [-1, -5, 2, -2]]],
dtype=np.float32)
# Scenario 1: these match the dist_q_t_selector above
self.greedy_probs_a_t = np.array(
[[0, 1, 0, 0],
[0, 0, 0, 1],
[1, 0, 0, 0]],
dtype=np.float32)
dist_qa_tm1 = np.array(
[[1, 3], [-5, 1], [5, -1]],
dtype=np.float32)
# dist_qa_tm1 [ 1, 3]
# (batch x n_tau) = [-5, 1]
# [ 5, -1]
# dist_q_t_selector[mean] [ 0.0, 5.5, 2.0, 0.0]
# (batch x n_actions) = [-1.0, 1.0, 2.5, 3.5]
# [-1.0, -3.5, -1.5, -4.0]
# a_t = argmax_a dist_q_t_selector [1]
# (batch) = [3]
# [0]
# dist_qa_t [ 5, -3]
# (batch x n_taus) = [-3, -4]
# [-2, 1]
# target = r + gamma * dist_qa_t [ 3, -1]
# (batch x n_taus) = [-1, -1]
# [-2, 1]
dist_target = np.array(
[[3, -1], [-1, -1], [-2, 1]],
dtype=np.float32)
# Use qr loss to compute expected results (itself tested explicitly in
# distributions_test.py).
self.expected = {}
for huber_param in [0.0, 1.0]:
self.expected[huber_param] = np.array( # loop over batch
[value_learning.quantile_regression_loss(dqa, tau, dt, huber_param)
for (dqa, tau, dt) in zip(dist_qa_tm1, self.tau_q_tm1, dist_target)],
dtype=np.float32)
# Scenario 2:
# bootstrap targets are not an argmax, but averaging across actions
self.uniform_probs_a_t = np.ones((3, 4), dtype=np.float32) / 4.
# all_action_targets = r + gamma * dist_q_t (batch x n_taus x n_actions)
dts = np.array(
[[[0.5, 3, 1.5, 1.5], [0.5, -1, 1.5, 1.5]],
[[-1, -1, -1, -1], [-1, -1, -1, -1]],
[[-2, 2, -5, -7], [1, 3, 2, -2]]],
dtype=np.float32)
self.uniform_expected = {}
for huber_param in [0.0, 1.0]:
all_action_losses = [ # loop over actions and batch
[value_learning.quantile_regression_loss(dqa, tau, dt, huber_param)
for (dqa, tau, dt) in zip(dist_qa_tm1, self.tau_q_tm1, dts[:, :, a])]
for a in range(4)]
# uniform probabilities means the loss is averaged over actions
self.uniform_expected[huber_param] = np.array(
all_action_losses, dtype=np.float32).mean(axis=0)
@chex.all_variants()
@parameterized.named_parameters(
('nohuber', 0.0),
('huber', 1.0))
def test_quantile_q_learning_batch(self, huber_param):
"""Tests for a full batch."""
quantile_q_learning = self.variant(jax.vmap(functools.partial(
value_learning.quantile_q_learning, huber_param=huber_param)))
# Test outputs.
actual = quantile_q_learning(
self.dist_q_tm1, self.tau_q_tm1, self.a_tm1, self.r_t, self.discount_t,
self.dist_q_t_selector, self.dist_q_t)
np.testing.assert_allclose(self.expected[huber_param], actual, rtol=1e-5)
@chex.all_variants()
@parameterized.named_parameters(
('nohuber', 0.0),
('huber', 1.0))
def test_quantile_expected_sarsa_batch_greedy(self, huber_param):
"""Tests for a full batch."""
quantile_expected_sarsa = self.variant(jax.vmap(functools.partial(
value_learning.quantile_expected_sarsa, huber_param=huber_param)))
# Test outputs.
actual = quantile_expected_sarsa(
self.dist_q_tm1, self.tau_q_tm1, self.a_tm1, self.r_t, self.discount_t,
self.dist_q_t, self.greedy_probs_a_t)
np.testing.assert_allclose(self.expected[huber_param], actual, rtol=1e-5)
@chex.all_variants()
@parameterized.named_parameters(
('nohuber', 0.0),
('huber', 1.0))
def test_quantile_expected_sarsa_batch_uniform(self, huber_param):
"""Tests for a full batch."""
quantile_expected_sarsa = self.variant(jax.vmap(functools.partial(
value_learning.quantile_expected_sarsa, huber_param=huber_param)))
# Test outputs.
actual = quantile_expected_sarsa(
self.dist_q_tm1, self.tau_q_tm1, self.a_tm1, self.r_t, self.discount_t,
self.dist_q_t, self.uniform_probs_a_t)
np.testing.assert_allclose(
self.uniform_expected[huber_param], actual, rtol=1e-5)
if __name__ == '__main__':
jax.config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| rlax-master | rlax/_src/value_learning_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for creating and managing moving averages."""
from typing import Union
import chex
import jax
import jax.numpy as jnp
@chex.dataclass(frozen=True)
class EmaMoments:
"""data-class holding the latest mean and variance estimates."""
# The tree of means.
mean: chex.ArrayTree
# The tree of variances.
variance: chex.ArrayTree
@chex.dataclass(frozen=True)
class EmaState:
"""data-class holding the exponential moving average state."""
# The tree of exponential moving averages of the values
mu: chex.ArrayTree
# The tree of exponential moving averages of the squared values
nu: chex.ArrayTree
# The product of the all decays from the start of accumulating.
decay_product: Union[float, jax.Array]
def debiased_moments(self):
"""Returns debiased moments as in Adam."""
tiny = jnp.finfo(self.decay_product).tiny
debias = 1.0 / jnp.maximum(1 - self.decay_product, tiny)
mean = jax.tree_map(lambda m1: m1 * debias, self.mu)
# This computation of the variance may lose some numerical precision, if
# the mean is not approximately zero.
variance = jax.tree_map(
lambda m2, m: jnp.maximum(0.0, m2 * debias - jnp.square(m)),
self.nu, mean)
return EmaMoments(mean=mean, variance=variance)
def create_ema(decay=0.999, pmean_axis_name=None):
"""An updater of moments.
Given a `tree` it will track first and second moments of the leaves.
Args:
decay: The decay of the moments. I.e., the learning rate is `1 - decay`.
pmean_axis_name: If not None, use lax.pmean to average the moment updates.
Returns:
Two functions: `(init_state, update_moments)`.
"""
def init_state(template_tree):
zeros = jax.tree_map(lambda x: jnp.zeros_like(jnp.mean(x)), template_tree)
scalar_zero = jnp.ones([], dtype=jnp.float32)
return EmaState(mu=zeros, nu=zeros, decay_product=scalar_zero)
def _update(moment, value):
mean = jnp.mean(value)
# Compute the mean across all learner devices involved in the `pmap`.
if pmean_axis_name is not None:
mean = jax.lax.pmean(mean, axis_name=pmean_axis_name)
return decay * moment + (1 - decay) * mean
def update_moments(tree, state):
squared_tree = jax.tree_map(jnp.square, tree)
mu = jax.tree_map(_update, state.mu, tree)
nu = jax.tree_map(_update, state.nu, squared_tree)
state = EmaState(
mu=mu, nu=nu, decay_product=state.decay_product * decay)
return state.debiased_moments(), state
return init_state, update_moments
| rlax-master | rlax/_src/moving_averages.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX functions implementing custom non-linear transformations.
This is a collection of element-wise non-linear transformations that may be
used to transform losses, value estimates, or other multidimensional data.
"""
import chex
import jax
import jax.numpy as jnp
from rlax._src import base
Array = chex.Array
Numeric = chex.Numeric
def identity(x: Array) -> Array:
"""Identity transform."""
chex.assert_type(x, float)
return x
def sigmoid(x: Numeric) -> Array:
"""Sigmoid transform."""
chex.assert_type(x, float)
return jax.nn.sigmoid(x)
def logit(x: Array) -> Array:
"""Logit transform, inverse of sigmoid."""
chex.assert_type(x, float)
return -jnp.log(1. / x - 1.)
def signed_logp1(x: Array) -> Array:
"""Signed logarithm of x + 1."""
chex.assert_type(x, float)
return jnp.sign(x) * jnp.log1p(jnp.abs(x))
def signed_expm1(x: Array) -> Array:
"""Signed exponential of x - 1, inverse of signed_logp1."""
chex.assert_type(x, float)
return jnp.sign(x) * jnp.expm1(jnp.abs(x))
def signed_hyperbolic(x: Array, eps: float = 1e-3) -> Array:
"""Signed hyperbolic transform, inverse of signed_parabolic."""
chex.assert_type(x, float)
return jnp.sign(x) * (jnp.sqrt(jnp.abs(x) + 1) - 1) + eps * x
def hyperbolic_sin(x: Array) -> Array:
"""Hyperbolic sinus transform."""
chex.assert_type(x, float)
return jnp.sinh(x)
def hyperbolic_arcsin(x: Array) -> Array:
"""Hyperbolic arcsinus transform."""
chex.assert_type(x, float)
return jnp.arcsinh(x)
def signed_parabolic(x: Array, eps: float = 1e-3) -> Array:
"""Signed parabolic transform, inverse of signed_hyperbolic."""
chex.assert_type(x, float)
z = jnp.sqrt(1 + 4 * eps * (eps + 1 + jnp.abs(x))) / 2 / eps - 1 / 2 / eps
return jnp.sign(x) * (jnp.square(z) - 1)
def power(x: Array, p: float) -> Array:
"""Power transform; `power_tx(_, 1/p)` is the inverse of `power_tx(_, p)`."""
chex.assert_type(x, float)
q = jnp.sqrt(p)
return jnp.sign(x) * (jnp.power(jnp.abs(x) / q + 1., p) - 1) / q
def transform_to_2hot(
scalar: Array,
min_value: float,
max_value: float,
num_bins: int) -> Array:
"""Transforms a scalar tensor to a 2 hot representation."""
scalar = jnp.clip(scalar, min_value, max_value)
scalar_bin = (scalar - min_value) / (max_value - min_value) * (num_bins - 1)
lower, upper = jnp.floor(scalar_bin), jnp.ceil(scalar_bin)
lower_value = (lower / (num_bins - 1.0)) * (max_value - min_value) + min_value
upper_value = (upper / (num_bins - 1.0)) * (max_value - min_value) + min_value
p_lower = (upper_value - scalar) / (upper_value - lower_value + 1e-5)
p_upper = 1 - p_lower
lower_one_hot = base.one_hot(
lower, num_bins, dtype=scalar.dtype) * jnp.expand_dims(p_lower, -1)
upper_one_hot = base.one_hot(
upper, num_bins, dtype=scalar.dtype) * jnp.expand_dims(p_upper, -1)
return lower_one_hot + upper_one_hot
def transform_from_2hot(
probs: Array,
min_value: float,
max_value: float,
num_bins: int) -> Array:
"""Transforms from a categorical distribution to a scalar."""
support_space = jnp.linspace(
min_value, max_value, num_bins).astype(probs.dtype)
scalar = jnp.sum(probs * jnp.expand_dims(support_space, 0), -1)
return scalar
def transform_to_2hot_nonlinear(scalar: Array, bins: Array) -> Array:
"""Transforms a scalar tensor to a 2 hot representation defined using bins."""
min_value, max_value = bins[0], bins[-1]
num_bins = len(bins)
scalar = jnp.clip(scalar, min_value, max_value)
upper_index = jnp.argmax(scalar[..., None] <= bins.reshape(1, -1), axis=-1)
upper_value = bins[upper_index]
lower_value = bins[upper_index - 1]
p_lower = (upper_value - scalar) / (upper_value - lower_value)
p_upper = 1 - p_lower
lower_one_hot = jax.nn.one_hot(upper_index - 1, num_bins) * p_lower[..., None]
upper_one_hot = jax.nn.one_hot(upper_index, num_bins) * p_upper[..., None]
return lower_one_hot + upper_one_hot
def transform_from_2hot_nonlinear(
probs: Array,
bins: Array) -> Array:
"""Transforms from a categorical distribution to a scalar."""
scalar = jnp.sum(probs * jnp.expand_dims(bins, 0), -1)
return scalar
| rlax-master | rlax/_src/transforms.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing utilities for RLax."""
import inspect
import types
from typing import Sequence, Tuple
def find_internal_python_modules(
root_module: types.ModuleType,
) -> Sequence[Tuple[str, types.ModuleType]]:
"""Returns `(name, module)` for all RLax submodules under `root_module`."""
modules = set([(root_module.__name__, root_module)])
visited = set()
to_visit = [root_module]
while to_visit:
mod = to_visit.pop()
visited.add(mod)
for name in dir(mod):
obj = getattr(mod, name)
if inspect.ismodule(obj) and obj not in visited:
if obj.__name__.startswith('rlax'):
if '_src' not in obj.__name__:
to_visit.append(obj)
modules.add((obj.__name__, obj))
return sorted(modules)
| rlax-master | rlax/_src/test_utils.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for `distributions.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import numpy as np
from rlax._src import distributions
@chex.dataclass(frozen=True)
class _MockActionSpec:
minimum: chex.Array
maximum: chex.Array
class CategoricalSampleTest(parameterized.TestCase):
@chex.all_variants()
def test_categorical_sample(self):
key = np.array([1, 2], dtype=np.uint32)
probs = np.array([0.2, 0.3, 0.5])
sample = self.variant(distributions.categorical_sample)(key, probs)
self.assertEqual(sample, 0)
@chex.all_variants()
@parameterized.parameters(
((-1., 10., -1.),),
((0., 0., 0.),),
((1., np.inf, 3.),),
((1., 2., -np.inf),),
((1., 2., np.nan),),
)
def test_categorical_sample_on_invalid_distributions(self, probs):
key = np.array([1, 2], dtype=np.uint32)
probs = np.asarray(probs)
sample = self.variant(distributions.categorical_sample)(key, probs)
self.assertEqual(sample, -1)
class SoftmaxTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.logits = np.array([[1, 1, 0], [1, 2, 0]], dtype=np.float32)
self.samples = np.array([0, 1], dtype=np.int32)
self.expected_probs = np.array( # softmax with temperature=10
[[0.34425336, 0.34425336, 0.31149334],
[0.332225, 0.3671654, 0.3006096]],
dtype=np.float32)
probs = np.array( # softmax with temperature=1
[[0.42231882, 0.42231882, 0.15536241],
[0.24472848, 0.66524094, 0.09003057]],
dtype=np.float32)
logprobs = np.log(probs)
self.expected_logprobs = np.array(
[logprobs[0][self.samples[0]], logprobs[1][self.samples[1]]])
self.expected_entropy = -np.sum(probs * logprobs, axis=-1)
self.expected_clipped_entropy = {0.5: 0.549306, 0.9: 0.988751}
@chex.all_variants()
@parameterized.named_parameters(
('softmax', distributions.softmax),
('clipped_entropy_softmax', distributions.clipped_entropy_softmax))
def test_softmax_probs(self, softmax_dist):
"""Tests for a single element."""
distrib = softmax_dist(temperature=10.)
softmax = self.variant(distrib.probs)
# For each element in the batch.
for logits, expected in zip(self.logits, self.expected_probs):
# Test outputs.
actual = softmax(logits)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@chex.all_variants()
@parameterized.named_parameters(
('softmax', distributions.softmax),
('clipped_entropy_softmax', distributions.clipped_entropy_softmax))
def test_softmax_probs_batch(self, softmax_dist):
"""Tests for a full batch."""
distrib = softmax_dist(temperature=10.)
softmax = self.variant(distrib.probs)
# Test softmax output in batch.
actual = softmax(self.logits)
np.testing.assert_allclose(self.expected_probs, actual, atol=1e-4)
@chex.all_variants()
@parameterized.named_parameters(
('softmax', distributions.softmax),
('clipped_entropy_softmax', distributions.clipped_entropy_softmax))
def test_softmax_logprob(self, softmax_dist):
"""Tests for a single element."""
distrib = softmax_dist()
logprob_fn = self.variant(distrib.logprob)
# For each element in the batch.
for logits, samples, expected in zip(
self.logits, self.samples, self.expected_logprobs):
# Test output.
actual = logprob_fn(samples, logits)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@chex.all_variants()
@parameterized.named_parameters(
('softmax', distributions.softmax),
('clipped_entropy_softmax', distributions.clipped_entropy_softmax))
def test_softmax_logprob_batch(self, softmax_dist):
"""Tests for a full batch."""
distrib = softmax_dist()
logprob_fn = self.variant(distrib.logprob)
# Test softmax output in batch.
actual = logprob_fn(self.samples, self.logits)
np.testing.assert_allclose(self.expected_logprobs, actual, atol=1e-4)
@chex.all_variants()
@parameterized.named_parameters(
('softmax', distributions.softmax),
('clipped_entropy_softmax', distributions.clipped_entropy_softmax))
def test_softmax_entropy(self, softmax_dist):
"""Tests for a single element."""
distrib = softmax_dist()
entropy_fn = self.variant(distrib.entropy)
# For each element in the batch.
for logits, expected in zip(self.logits, self.expected_entropy):
# Test outputs.
actual = entropy_fn(logits)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@chex.all_variants()
@parameterized.parameters((0.9, [0.988751, 0.832396]),
(0.5, [0.549306, 0.549306]))
def test_softmax_clipped_entropy_batch(self, entropy_clip, expected_clipped):
"""Tests for a single element."""
distrib = distributions.clipped_entropy_softmax(entropy_clip=entropy_clip)
entropy_fn = self.variant(distrib.entropy)
# Test softmax output in batch.
actual = entropy_fn(self.logits)
np.testing.assert_allclose(expected_clipped, actual, atol=1e-4)
@chex.all_variants()
@parameterized.named_parameters(
('softmax', distributions.softmax),
('clipped_entropy_softmax', distributions.clipped_entropy_softmax))
def test_softmax_entropy_batch(self, softmax_dist):
"""Tests for a full batch."""
distrib = softmax_dist()
entropy_fn = self.variant(distrib.entropy)
# Test softmax output in batch.
actual = entropy_fn(self.logits)
np.testing.assert_allclose(self.expected_entropy, actual, atol=1e-4)
class GreedyTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.preferences = np.array([[1, 1, 0], [1, 2, 0]], dtype=np.float32)
self.samples = np.array([0, 1], dtype=np.int32)
self.expected_probs = np.array(
[[0.5, 0.5, 0.], [0., 1., 0.]], dtype=np.float32)
self.expected_logprob = np.array(
[-0.6931472, 0.], dtype=np.float32)
self.expected_entropy = np.array(
[0.6931472, 0.], dtype=np.float32)
@chex.all_variants()
def test_greedy_probs(self):
"""Tests for a single element."""
distrib = distributions.greedy()
greedy = self.variant(distrib.probs)
# For each element in the batch.
for preferences, expected in zip(self.preferences, self.expected_probs):
# Test outputs.
actual = greedy(preferences)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@chex.all_variants()
def test_greedy_probs_batch(self):
"""Tests for a full batch."""
distrib = distributions.greedy()
greedy = self.variant(distrib.probs)
# Test greedy output in batch.
actual = greedy(self.preferences)
np.testing.assert_allclose(self.expected_probs, actual, atol=1e-4)
@chex.all_variants()
def test_greedy_logprob(self):
"""Tests for a single element."""
distrib = distributions.greedy()
logprob_fn = self.variant(distrib.logprob)
# For each element in the batch.
for preferences, samples, expected in zip(
self.preferences, self.samples, self.expected_logprob):
# Test output.
actual = logprob_fn(samples, preferences)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@chex.all_variants()
def test_greedy_logprob_batch(self):
"""Tests for a full batch."""
distrib = distributions.greedy()
logprob_fn = self.variant(distrib.logprob)
# Test greedy output in batch.
actual = logprob_fn(self.samples, self.preferences)
np.testing.assert_allclose(self.expected_logprob, actual, atol=1e-4)
@chex.all_variants()
def test_greedy_entropy(self):
"""Tests for a single element."""
distrib = distributions.greedy()
entropy_fn = self.variant(distrib.entropy)
# For each element in the batch.
for preferences, expected in zip(self.preferences, self.expected_entropy):
# Test outputs.
actual = entropy_fn(preferences)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@chex.all_variants()
def test_greedy_entropy_batch(self):
"""Tests for a full batch."""
distrib = distributions.greedy()
entropy_fn = self.variant(distrib.entropy)
# Test greedy output in batch.
actual = entropy_fn(self.preferences)
np.testing.assert_allclose(self.expected_entropy, actual, atol=1e-4)
class EpsilonGreedyTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.epsilon = 0.2
self.preferences = np.array([[1, 1, 0, 0], [1, 2, 0, 0]], dtype=np.float32)
self.samples = np.array([0, 1], dtype=np.int32)
self.expected_probs = np.array(
[[0.45, 0.45, 0.05, 0.05], [0.05, 0.85, 0.05, 0.05]], dtype=np.float32)
self.expected_logprob = np.array(
[-0.7985077, -0.1625189], dtype=np.float32)
self.expected_entropy = np.array(
[1.01823008, 0.58750093], dtype=np.float32)
@chex.all_variants()
def test_greedy_probs(self):
"""Tests for a single element."""
distrib = distributions.epsilon_greedy(self.epsilon)
probs_fn = self.variant(distrib.probs)
# For each element in the batch.
for preferences, expected in zip(self.preferences, self.expected_probs):
# Test outputs.
actual = probs_fn(preferences)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@chex.all_variants()
def test_greedy_probs_batch(self):
"""Tests for a full batch."""
distrib = distributions.epsilon_greedy(self.epsilon)
probs_fn = self.variant(distrib.probs)
# Test greedy output in batch.
actual = probs_fn(self.preferences)
np.testing.assert_allclose(self.expected_probs, actual, atol=1e-4)
@chex.all_variants()
def test_greedy_logprob(self):
"""Tests for a single element."""
distrib = distributions.epsilon_greedy(self.epsilon)
logprob_fn = self.variant(distrib.logprob)
# For each element in the batch.
for preferences, samples, expected in zip(
self.preferences, self.samples, self.expected_logprob):
# Test output.
actual = logprob_fn(samples, preferences)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@chex.all_variants()
def test_greedy_logprob_batch(self):
"""Tests for a full batch."""
distrib = distributions.epsilon_greedy(self.epsilon)
logprob_fn = self.variant(distrib.logprob)
# Test greedy output in batch.
actual = logprob_fn(self.samples, self.preferences)
np.testing.assert_allclose(self.expected_logprob, actual, atol=1e-4)
@chex.all_variants()
def test_greedy_entropy(self):
"""Tests for a single element."""
distrib = distributions.epsilon_greedy(self.epsilon)
entropy_fn = self.variant(distrib.entropy)
# For each element in the batch.
for preferences, expected in zip(self.preferences, self.expected_entropy):
# Test outputs.
actual = entropy_fn(preferences)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@chex.all_variants()
def test_greedy_entropy_batch(self):
"""Tests for a full batch."""
distrib = distributions.epsilon_greedy(self.epsilon)
entropy_fn = self.variant(distrib.entropy)
# Test greedy output in batch.
actual = entropy_fn(self.preferences)
np.testing.assert_allclose(self.expected_entropy, actual, atol=1e-4)
class GaussianDiagonalTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.mu = np.array([[1., -1], [0.1, -0.1]], dtype=np.float32)
self.sigma = np.array([[0.1, 0.1], [0.2, 0.3]], dtype=np.float32)
self.sample = np.array([[1.2, -1.1], [-0.1, 0.]], dtype=np.float32)
self.other_mu = np.array([[1., -10.], [0.3, -0.2]], dtype=np.float32)
self.other_sigma = np.array([[0.1, 0.1], [0.8, 0.3]], dtype=np.float32)
# Expected values for the distribution's function were computed using
# tfd.MultivariateNormalDiag (from the tensorflow_probability package).
self.expected_prob_a = np.array(
[1.3064219, 1.5219283], dtype=np.float32)
self.expected_logprob_a = np.array(
[0.26729202, 0.41997814], dtype=np.float32)
self.expected_entropy = np.array(
[-1.7672932, 0.02446628], dtype=np.float32)
self.expected_kl = np.array(
[4050.00, 1.00435], dtype=np.float32)
self.expected_kl_to_std_normal = np.array(
[4.6151705, 1.8884108], dtype=np.float32)
@chex.all_variants()
def test_gaussian_prob(self):
"""Tests for a single element."""
distrib = distributions.gaussian_diagonal()
prob_fn = self.variant(distrib.prob)
# For each element in the batch.
for mu, sigma, sample, expected in zip(
self.mu, self.sigma, self.sample, self.expected_prob_a):
# Test outputs.
actual = prob_fn(sample, mu, sigma)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@chex.all_variants()
def test_gaussian_prob_batch(self):
"""Tests for a full batch."""
distrib = distributions.gaussian_diagonal()
prob_fn = self.variant(distrib.prob)
# Test greedy output in batch.
actual = prob_fn(self.sample, self.mu, self.sigma)
np.testing.assert_allclose(self.expected_prob_a, actual, atol=1e-4)
@chex.all_variants()
def test_gaussian_logprob(self):
"""Tests for a single element."""
distrib = distributions.gaussian_diagonal()
logprob_fn = self.variant(distrib.logprob)
# For each element in the batch.
for mu, sigma, sample, expected in zip(
self.mu, self.sigma, self.sample, self.expected_logprob_a):
# Test output.
actual = logprob_fn(sample, mu, sigma)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@chex.all_variants()
def test_gaussian_logprob_batch(self):
"""Tests for a full batch."""
distrib = distributions.gaussian_diagonal()
logprob_fn = self.variant(distrib.logprob)
# Test greedy output in batch.
actual = logprob_fn(self.sample, self.mu, self.sigma)
np.testing.assert_allclose(self.expected_logprob_a, actual, atol=1e-4)
@chex.all_variants()
def test_gaussian_entropy(self):
"""Tests for a single element."""
distrib = distributions.gaussian_diagonal()
entropy_fn = self.variant(distrib.entropy)
# For each element in the batch.
for mu, sigma, expected in zip(
self.mu, self.sigma, self.expected_entropy):
# Test outputs.
actual = entropy_fn(mu, sigma)
np.testing.assert_allclose(expected, actual, atol=1e-4)
@chex.all_variants()
def test_gaussian_entropy_batch(self):
"""Tests for a full batch."""
distrib = distributions.gaussian_diagonal()
entropy_fn = self.variant(distrib.entropy)
# Test greedy output in batch.
actual = entropy_fn(self.mu, self.sigma)
np.testing.assert_allclose(self.expected_entropy, actual, atol=1e-4)
@chex.all_variants()
def test_gaussian_kl_batch(self):
"""Tests for a full batch."""
distrib = distributions.gaussian_diagonal()
kl_fn = self.variant(distrib.kl)
# Test greedy output in batch.
actual = kl_fn(self.mu, self.sigma, self.other_mu, self.other_sigma)
np.testing.assert_allclose(self.expected_kl, actual, atol=1e-3, rtol=1e-6)
@chex.all_variants()
def test_gaussian_kl_to_std_normal_batch(self):
"""Tests for a full batch."""
distrib = distributions.gaussian_diagonal()
kl_fn = self.variant(distrib.kl_to_standard_normal)
# Test greedy output in batch.
actual = kl_fn(self.mu, self.sigma)
np.testing.assert_allclose(self.expected_kl_to_std_normal, actual,
atol=1e-4)
class SquashedGaussianTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.mu = np.array([[1., -1], [0.1, -0.1]], dtype=np.float32)
self.sigma = np.array([[0.1, 0.1], [0.2, 0.3]], dtype=np.float32)
self.sample = np.array([[0.5, -0.6], [-.4, -.2]], dtype=np.float32)
self.other_mu = np.array([[1., -10.], [0.3, -0.2]], dtype=np.float32)
self.other_sigma = np.array([[0.1, 0.1], [0.8, 0.3]], dtype=np.float32)
self.action_spec = _MockActionSpec(minimum=np.array([-1.0]),
maximum=np.array([2.0]))
self.sigma_min = 0.0
self.sigma_max = 2.0
# Expected values for the distribution's function were computed using
# tfd.MultivariateNormalDiag (from the tensorflow_probability package).
self.expected_prob_a = np.array(
[0.016403, 0.011328], dtype=np.float32)
self.expected_logprob_a = np.array(
[-4.110274, -4.480485], dtype=np.float32)
self.expected_entropy = np.array(
[5.037213, 5.326565], dtype=np.float32)
self.expected_kl = np.array(
[0.003151, 0.164303], dtype=np.float32)
self.expected_kl_to_std_normal = np.array(
[6.399713, 8.61989], dtype=np.float32)
@chex.all_variants()
def test_squashed_gaussian_prob(self):
"""Tests for a full batch."""
distrib = distributions.squashed_gaussian(sigma_min=self.sigma_min,
sigma_max=self.sigma_max)
prob_fn = self.variant(distrib.prob)
# Test greedy output in batch.
actual = prob_fn(self.sample, self.mu, self.sigma, self.action_spec)
np.testing.assert_allclose(self.expected_prob_a, actual, atol=1e-4)
@chex.all_variants()
def test_squashed_gaussian_logprob(self):
"""Tests for a full batch."""
distrib = distributions.squashed_gaussian(sigma_min=self.sigma_min,
sigma_max=self.sigma_max)
logprob_fn = self.variant(distrib.logprob)
# Test greedy output in batch.
actual = logprob_fn(self.sample, self.mu, self.sigma, self.action_spec)
np.testing.assert_allclose(self.expected_logprob_a, actual, atol=1e-3,
rtol=1e-6)
@chex.all_variants()
def test_squashed_gaussian_entropy(self):
"""Tests for a full batch."""
distrib = distributions.squashed_gaussian(sigma_min=self.sigma_min,
sigma_max=self.sigma_max)
entropy_fn = self.variant(distrib.entropy)
# Test greedy output in batch.
actual = entropy_fn(self.mu, self.sigma)
np.testing.assert_allclose(self.expected_entropy, actual, atol=1e-3,
rtol=1e-6)
@chex.all_variants()
def test_squashed_gaussian_kl(self):
"""Tests for a full batch."""
distrib = distributions.squashed_gaussian(sigma_min=self.sigma_min,
sigma_max=self.sigma_max)
kl_fn = self.variant(distrib.kl)
# Test greedy output in batch.
actual = kl_fn(self.mu, self.sigma, self.other_mu, self.other_sigma)
np.testing.assert_allclose(self.expected_kl, actual, atol=1e-3, rtol=1e-6)
@chex.all_variants()
def test_squashed_gaussian_kl_to_std_normal(self):
"""Tests for a full batch."""
distrib = distributions.squashed_gaussian(sigma_min=self.sigma_min,
sigma_max=self.sigma_max)
kl_fn = self.variant(distrib.kl_to_standard_normal)
# Test greedy output in batch.
actual = kl_fn(self.mu, self.sigma)
np.testing.assert_allclose(self.expected_kl_to_std_normal, actual,
atol=1e-3, rtol=1e-5)
class ImportanceSamplingTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.pi_logits = np.array([[0.2, 0.8], [0.6, 0.4]], dtype=np.float32)
self.mu_logits = np.array([[0.8, 0.2], [0.6, 0.4]], dtype=np.float32)
self.actions = np.array([1, 0], dtype=np.int32)
pi = jax.nn.softmax(self.pi_logits)
mu = jax.nn.softmax(self.mu_logits)
self.expected_rhos = np.array(
[pi[0][1] / mu[0][1], pi[1][0] / mu[1][0]], dtype=np.float32)
@chex.all_variants()
def test_importance_sampling_ratios_batch(self):
"""Tests for a full batch."""
ratios_fn = self.variant(
distributions.categorical_importance_sampling_ratios)
# Test softmax output in batch.
actual = ratios_fn(self.pi_logits, self.mu_logits, self.actions)
np.testing.assert_allclose(self.expected_rhos, actual, atol=1e-4)
class CategoricalKLTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.p_logits = np.array([[1, 1, 0], [1, 2, 0]], dtype=np.float32)
p_probs = np.array([[0.42231882, 0.42231882, 0.15536241],
[0.24472848, 0.66524094, 0.09003057]],
dtype=np.float32)
p_logprobs = np.log(p_probs)
self.q_logits = np.array([[1, 2, 0], [1, 1, 0]], dtype=np.float32)
q_probs = np.array([[0.24472848, 0.66524094, 0.09003057],
[0.42231882, 0.42231882, 0.15536241]],
dtype=np.float32)
q_logprobs = np.log(q_probs)
self.expected_kl = np.sum(p_probs * (p_logprobs - q_logprobs), axis=-1)
@chex.all_variants()
def test_categorical_kl_divergence_batch(self):
"""Tests for a full batch."""
kl_fn = self.variant(distributions.categorical_kl_divergence)
# Test softmax output in batch.
actual = kl_fn(self.p_logits, self.q_logits)
np.testing.assert_allclose(self.expected_kl, actual, atol=1e-4)
class CategoricalCrossEntropyTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.labels = np.array([[0., 1., 0.], [1., 0., 0.]], dtype=np.float32)
self.logits = np.array([[10., 1., -2.], [1., 4., 0.2]], dtype=np.float32)
self.expected = np.array([9.00013, 3.0696733], dtype=np.float32)
@chex.all_variants()
def test_categorical_cross_entropy_batch(self):
"""Tests for a full batch."""
cross_entropy = self.variant(jax.vmap(
distributions.categorical_cross_entropy))
# Test outputs.
actual = cross_entropy(self.labels, self.logits)
np.testing.assert_allclose(self.expected, actual, atol=1e-4)
class MultivariateNormalKLTest(parameterized.TestCase):
def setUp(self):
super().setUp()
# Test numbers taken from tfd.MultivariateNormalDiag
self.mu0 = np.array([[5., -1], [0.1, -0.1]], dtype=np.float32)
self.sigma0 = np.array([[0.3, 0.1], [0.2, 0.3]], dtype=np.float32)
self.mu1 = np.array([[0.005, -11.], [-0.25, -0.2]], dtype=np.float32)
self.sigma1 = np.array([[0.1, 0.1], [0.6, 0.3]], dtype=np.float32)
self.expected_kl = np.array([6.2504023e+03, 8.7986231e-01],
dtype=np.float32)
@chex.all_variants()
def test_multivariate_normal_kl_divergence_batch(self):
kl_fn = self.variant(distributions.multivariate_normal_kl_divergence)
actual = kl_fn(self.mu0, self.sigma0, self.mu1, self.sigma1)
np.testing.assert_allclose(self.expected_kl, actual, atol=1e-3, rtol=1e-6)
if __name__ == '__main__':
jax.config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| rlax-master | rlax/_src/distributions_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for moving_averages.py."""
from absl.testing import absltest
import numpy as np
from rlax._src import moving_averages
class EmaTest(absltest.TestCase):
def test_moments(self):
values = [5.0, 7.0]
decay = 0.9
d = decay
init_state, update = moving_averages.create_ema(decay=decay)
state = init_state(values[0])
moments, state = update(values[0], state)
np.testing.assert_allclose(moments.mean, values[0], atol=1e-5)
np.testing.assert_allclose(moments.variance, 0.0, atol=1e-5)
moments, state = update(values[1], state)
np.testing.assert_allclose(
moments.mean,
(d * (1 - d) * values[0] + (1 - d) * values[1]) / (1 - d**2), atol=1e-5)
np.testing.assert_allclose(
moments.variance,
(d * (1 - d) * values[0]**2 + (1 - d) * values[1]**2) / (1 - d**2) -
moments.mean**2, atol=1e-4)
if __name__ == '__main__':
absltest.main()
| rlax-master | rlax/_src/moving_averages_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for losses.py."""
from absl.testing import absltest
import dm_env
import jax
import jax.numpy as jnp
import numpy as np
from rlax._src import interruptions
class InterruptionsTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.correct_trajectory = jnp.array([
[dm_env.StepType.MID],
[dm_env.StepType.MID],
[dm_env.StepType.MID],
[dm_env.StepType.LAST],
[dm_env.StepType.FIRST],
[dm_env.StepType.MID],
])
self.broken_trajectory = jnp.array([
[dm_env.StepType.MID],
[dm_env.StepType.MID],
[dm_env.StepType.MID],
[dm_env.StepType.MID],
[dm_env.StepType.FIRST],
[dm_env.StepType.MID],
])
def test_fix_step_type_on_interruptions(self):
output1 = interruptions.fix_step_type_on_interruptions(
self.correct_trajectory)
output2 = interruptions.fix_step_type_on_interruptions(
self.broken_trajectory)
# Test output.
np.testing.assert_allclose(output1, self.correct_trajectory)
np.testing.assert_allclose(output2, self.correct_trajectory)
if __name__ == '__main__':
jax.config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| rlax-master | rlax/_src/interruptions_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transformed value functions.
Canonical value functions map states onto the expected discounted sum of rewards
that may be collected by an agent from any starting state. Value functions may
also be defined as the fixed points of certain linear recursive relations known
as Bellman equations. It is sometimes useful to consider transformed values that
are the solution to non-linear generalization of traditional Bellman equations.
In this subpackage we provide a general utility for wrapping bootstrapped return
calculations to construct regression targets for these transformed values.
We also use this to implement different learning algorithms from the literature.
"""
import collections
import functools
import chex
import jax.numpy as jnp
from rlax._src import base
from rlax._src import multistep
from rlax._src import transforms
Array = chex.Array
TxPair = collections.namedtuple('TxPair', ['apply', 'apply_inv'])
# Example transform pairs; these typically consist of a monotonically increasing
# squashing fn `apply` and its inverse `apply_inv`. Other choices are possible.
IDENTITY_PAIR = TxPair(
transforms.identity, transforms.identity)
SIGNED_LOGP1_PAIR = TxPair(
transforms.signed_logp1, transforms.signed_expm1)
SIGNED_HYPERBOLIC_PAIR = TxPair(
transforms.signed_hyperbolic, transforms.signed_parabolic)
HYPERBOLIC_SIN_PAIR = TxPair(
transforms.hyperbolic_arcsin, transforms.hyperbolic_sin)
DISCOUNT_TRANSFORM_PAIR = TxPair(
lambda x: -jnp.log(1 - x),
lambda x: 1 - jnp.exp(-x))
def twohot_pair(
min_value: float,
max_value: float,
num_bins: int) -> TxPair:
"""Construct a TxPair matching a 2-hot reparametrisation and its inverse."""
apply_fn = functools.partial(
transforms.transform_to_2hot,
min_value=min_value,
max_value=max_value,
num_bins=num_bins)
apply_inv_fn = functools.partial(
transforms.transform_from_2hot,
min_value=min_value,
max_value=max_value,
num_bins=num_bins)
return TxPair(apply_fn, apply_inv_fn)
def compose_tx(*tx_list):
"""Utility to compose a sequence of TxPairs.
The transformations are applied in order during the `apply` method:
e.g. [f, g] --> y = g(f(x))
and in reverse order during the `apply_inv` method:
e.g. [f, g] --> x = f^-1(g^-1(x))
Args:
*tx_list: a sequence of TxPairs as positional arguments.
Returns:
a new TxPair.
"""
def apply_fn(x: chex.Array):
for tx in tx_list:
x = tx.apply(x)
return x
def apply_inv_fn(x: chex.Array):
for tx in tx_list[::-1]:
x = tx.apply_inv(x)
return x
return TxPair(apply_fn, apply_inv_fn)
def muzero_pair(
min_value: float,
max_value: float,
num_bins: int,
tx: TxPair) -> TxPair:
"""Create the transformation pair introduced in MuZero.
This more complex pair of transformations, combines a monotonic squashing
function with a reparametrisation of the value over a fixed support.
See "Mastering Atari, Go, Chess and Shogi by Planning with a Learned Model"
by Schrittwieser et al. (https://arxiv.org/abs/1911.08265).
Args:
min_value: minimum value of the discrete support.
max_value: maximum value of the discrete support.
num_bins: number of discrete bins used by the fixed support.
tx: non-linear transformation to be applied before reparametrisation.
Returns:
a transformation pair.
"""
return compose_tx(tx, twohot_pair(min_value, max_value, num_bins))
def unbiased_transform_pair(
min_value: float,
max_value: float,
num_bins: int,
tx: TxPair) -> TxPair:
"""Create an unbiased value transformation pair.
A pair of transformations, similar to 'muzero_pair' but where the
non-linearity is applied to the bin locations rather than the scalar value.
Args:
min_value: minimum value of the discrete support in the transformed space.
max_value: maximum value of the discrete support in the transformed space.
num_bins: number of discrete bins used by the fixed support.
tx: non-linear transformation to be applied to the bin locations.
Returns:
a transformation pair.
"""
bins = tx.apply_inv(jnp.linspace(min_value, max_value, num_bins))
apply_fn = functools.partial(
transforms.transform_to_2hot_nonlinear, bins=bins)
apply_inv_fn = functools.partial(
transforms.transform_from_2hot_nonlinear, bins=bins)
nonlinear_twohot_pair = TxPair(apply_fn, apply_inv_fn)
return compose_tx(IDENTITY_PAIR, nonlinear_twohot_pair)
def transform_values(build_targets, *value_argnums):
"""Decorator to convert targets to use transformed value function."""
@functools.wraps(build_targets)
def wrapped_build_targets(tx_pair, *args, **kwargs):
tx_args = list(args)
for index in value_argnums:
tx_args[index] = tx_pair.apply_inv(tx_args[index])
targets = build_targets(*tx_args, **kwargs)
return tx_pair.apply(targets)
return wrapped_build_targets
transformed_lambda_returns = transform_values(multistep.lambda_returns, 2)
transformed_general_off_policy_returns_from_action_values = transform_values(
multistep.general_off_policy_returns_from_action_values, 0)
transformed_n_step_returns = transform_values(
multistep.n_step_bootstrapped_returns, 2)
def transformed_q_lambda(
q_tm1: Array,
a_tm1: Array,
r_t: Array,
discount_t: Array,
q_t: Array,
lambda_: Array,
stop_target_gradients: bool = True,
tx_pair: TxPair = IDENTITY_PAIR,
) -> Array:
"""Calculates Peng's or Watkins' Q(lambda) temporal difference error.
See "General non-linear Bellman equations" by van Hasselt et al.
(https://arxiv.org/abs/1907.03687).
Args:
q_tm1: sequence of Q-values at time t-1.
a_tm1: sequence of action indices at time t-1.
r_t: sequence of rewards at time t.
discount_t: sequence of discounts at time t.
q_t: sequence of Q-values at time t.
lambda_: mixing parameter lambda, either a scalar (e.g. Peng's Q(lambda)) or
a sequence (e.g. Watkin's Q(lambda)).
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
tx_pair: TxPair of value function transformation and its inverse.
Returns:
Q(lambda) temporal difference error.
"""
chex.assert_rank([q_tm1, a_tm1, r_t, discount_t, q_t, lambda_],
[2, 1, 1, 1, 2, {0, 1}])
chex.assert_type([q_tm1, a_tm1, r_t, discount_t, q_t, lambda_],
[float, int, float, float, float, float])
qa_tm1 = base.batched_index(q_tm1, a_tm1)
v_t = jnp.max(q_t, axis=-1)
target_tm1 = transformed_lambda_returns(
tx_pair, r_t, discount_t, v_t, lambda_, stop_target_gradients)
return target_tm1 - qa_tm1
def transformed_retrace(
q_tm1: Array,
q_t: Array,
a_tm1: Array,
a_t: Array,
r_t: Array,
discount_t: Array,
pi_t: Array,
mu_t: Array,
lambda_: float,
eps: float = 1e-8,
stop_target_gradients: bool = True,
tx_pair: TxPair = IDENTITY_PAIR,
) -> Array:
"""Calculates transformed Retrace errors.
See "Recurrent Experience Replay in Distributed Reinforcement Learning" by
Kapturowski et al. (https://openreview.net/pdf?id=r1lyTjAqYX).
Args:
q_tm1: Q-values at time t-1.
q_t: Q-values at time t.
a_tm1: action index at time t-1.
a_t: action index at time t.
r_t: reward at time t.
discount_t: discount at time t.
pi_t: target policy probs at time t.
mu_t: behavior policy probs at time t.
lambda_: scalar mixing parameter lambda.
eps: small value to add to mu_t for numerical stability.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
tx_pair: TxPair of value function transformation and its inverse.
Returns:
Transformed Retrace error.
"""
chex.assert_rank([q_tm1, q_t, a_tm1, a_t, r_t, discount_t, pi_t, mu_t],
[2, 2, 1, 1, 1, 1, 2, 1])
chex.assert_type([q_tm1, q_t, a_tm1, a_t, r_t, discount_t, pi_t, mu_t],
[float, float, int, int, float, float, float, float])
pi_a_t = base.batched_index(pi_t, a_t)
c_t = jnp.minimum(1.0, pi_a_t / (mu_t + eps)) * lambda_
target_tm1 = transformed_general_off_policy_returns_from_action_values(
tx_pair, q_t, a_t, r_t, discount_t, c_t, pi_t, stop_target_gradients)
q_a_tm1 = base.batched_index(q_tm1, a_tm1)
return target_tm1 - q_a_tm1
def transformed_n_step_q_learning(
q_tm1: Array,
a_tm1: Array,
target_q_t: Array,
a_t: Array,
r_t: Array,
discount_t: Array,
n: int,
stop_target_gradients: bool = True,
tx_pair: TxPair = IDENTITY_PAIR,
) -> Array:
"""Calculates transformed n-step TD errors.
See "Recurrent Experience Replay in Distributed Reinforcement Learning" by
Kapturowski et al. (https://openreview.net/pdf?id=r1lyTjAqYX).
Args:
q_tm1: Q-values at times [0, ..., T - 1].
a_tm1: action index at times [0, ..., T - 1].
target_q_t: target Q-values at time [1, ..., T].
a_t: action index at times [[1, ... , T]] used to select target q-values to
bootstrap from; max(target_q_t) for normal Q-learning, max(q_t) for double
Q-learning.
r_t: reward at times [1, ..., T].
discount_t: discount at times [1, ..., T].
n: number of steps over which to accumulate reward before bootstrapping.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
tx_pair: TxPair of value function transformation and its inverse.
Returns:
Transformed N-step TD error.
"""
chex.assert_rank([q_tm1, target_q_t, a_tm1, a_t, r_t, discount_t],
[2, 2, 1, 1, 1, 1])
chex.assert_type([q_tm1, target_q_t, a_tm1, a_t, r_t, discount_t],
[float, float, int, int, float, float])
v_t = base.batched_index(target_q_t, a_t)
target_tm1 = transformed_n_step_returns(
tx_pair, r_t, discount_t, v_t, n,
stop_target_gradients=stop_target_gradients)
q_a_tm1 = base.batched_index(q_tm1, a_tm1)
return target_tm1 - q_a_tm1
| rlax-master | rlax/_src/nonlinear_bellman.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_learning.py."""
from absl.testing import absltest
import jax.numpy as jnp
import numpy as np
from rlax._src import model_learning
class ModelLearningTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.trajectories = jnp.array([ # [T, B]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, 10, 20, 30, 40, 50, 60, 70, 80, 90]
]).transpose()
self.start_indices = jnp.array([ # [B, num_starts]
[0, 1, 4],
[1, 2, 5]
])
self.invalid_start_indices = jnp.array([ # [B, num_starts]
[0, 1, 25], # 25 is out of bound
[1, 2, 5]])
def test_extract_subsequences(self):
output = model_learning.extract_subsequences(
self.trajectories, self.start_indices, 3)
expected_output = jnp.array([
[[0, 1, 4],
[10, 20, 50]],
[[1, 2, 5],
[20, 30, 60]],
[[2, 3, 6],
[30, 40, 70]]])
# Test output.
np.testing.assert_allclose(output, expected_output)
def test_extract_subsequences_with_validation_bounds(self):
with self.assertRaisesRegex(AssertionError, 'Expected len >='):
model_learning.extract_subsequences(
self.trajectories, self.invalid_start_indices, 1,
max_valid_start_idx=24)
if __name__ == '__main__':
absltest.main()
| rlax-master | rlax/_src/model_learning_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for target network switching."""
from typing import Any
import warnings
import chex
import jax
Numeric = chex.Numeric
def conditional_update(new_tensors: Any, old_tensors: Any, is_time: Numeric):
"""Checks whether to update the params and returns the correct params."""
warnings.warn(
"Rlax conditional_update will be deprecated. Please use optax instead.",
PendingDeprecationWarning, stacklevel=2
)
return jax.tree_map(
lambda new, old: jax.lax.select(is_time, new, old),
new_tensors, old_tensors)
def periodic_update(
new_tensors: Any, old_tensors: Any,
steps: chex.Array, update_period: int):
"""Periodically switch all elements from a nested struct with new elements."""
warnings.warn(
"Rlax periodic_update will be deprecated. Please use optax instead.",
PendingDeprecationWarning, stacklevel=2
)
return conditional_update(
new_tensors, old_tensors, is_time=steps % update_period == 0)
| rlax-master | rlax/_src/nested_updates.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX functions implementing custom embeddings."""
import chex
import jax
import jax.numpy as jnp
Array = chex.Array
def embed_oar(features: Array, action: Array, reward: Array,
num_actions: int) -> Array:
"""Embed each of the (observation, action, reward) inputs & concatenate."""
chex.assert_rank([features, action, reward], [2, 1, 1])
action = jax.nn.one_hot(action, num_classes=num_actions) # [B, A]
reward = jnp.tanh(reward)
while reward.ndim < action.ndim:
reward = jnp.expand_dims(reward, axis=-1)
embedding = jnp.concatenate([features, action, reward], axis=-1) # [B, D+A+1]
return embedding
| rlax-master | rlax/_src/embedding.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for `general_value_functions.py`."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import numpy as np
from rlax._src import general_value_functions
class PixelControlTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.cell_size = 2
time, self.batch_size = 3, 2
height, width, channels = 4, 4, 3
shape = (time, self.batch_size, height, width, channels)
hw = np.matmul(
np.arange(0, 1, 0.25)[:, None],
np.arange(0, 1, 0.25)[None, :])
hwc = np.stack([hw, hw + 0.1, hw + 0.2], axis=-1)
bhwc = np.stack([hwc, hwc + 0.1], axis=0)
tbhwc = np.stack([bhwc, bhwc + 0.05, bhwc + 0.1], axis=0)
assert tbhwc.shape == shape
self.obs = tbhwc
self.expected = 0.05 * np.ones((2, 2, 2, 2), dtype=np.float32)
@chex.all_variants()
def test_pixel_control_rewards(self):
"""Tests for a single element."""
pixel_control_rewards = self.variant(
functools.partial(
general_value_functions.pixel_control_rewards,
cell_size=self.cell_size))
# Test pseudo rewards.
for i in range(self.batch_size):
rs = pixel_control_rewards(self.obs[:, i])
np.testing.assert_allclose(self.expected[:, i], rs, rtol=1e-5)
@chex.all_variants()
def test_pixel_control_rewards_batch(self):
"""Tests for a batch."""
pixel_control_rewards = functools.partial(
general_value_functions.pixel_control_rewards, cell_size=self.cell_size)
pixel_control_rewards = self.variant(jax.vmap(
pixel_control_rewards, in_axes=(1,), out_axes=1))
# Test pseudo rewards.
rs = pixel_control_rewards(self.obs)
np.testing.assert_allclose(self.expected, rs, rtol=1e-5)
class FeatureControlTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.potential_discount = .99
self.features = np.array([
[[1., 2.], [2., 4.], [3., 6.]],
[[1., 2.], [0., 0.], [-1., -2.]]])
self.exp_feature = np.array([
[[2., 4.], [3., 6.]],
[[0., 0.], [-1., -2.]]])
self.exp_abs_change = np.array([
[[1., 2.], [1., 2.]],
[[1., 2.], [1., 2.]]])
self.exp_increase = np.array([
[[1., 2.], [1., 2.]],
[[-1., -2.], [-1., -2.]]])
self.exp_decrease = np.array([
[[-1., -2.], [-1., -2.]],
[[1., 2.], [1., 2.]]])
g = self.potential_discount
self.exp_potential = np.array([
[[g*2.-1., g*4.-2.], [g*3.-2., g*6.-4.]],
[[-1., -2.], [-g, -2.*g]]])
@chex.all_variants()
def test_feature_control_rewards_feature_batch(self):
"""Tests for a batch, cumulant_type='feature'."""
feature_control_rewards = self.variant(jax.vmap(functools.partial(
general_value_functions.feature_control_rewards,
cumulant_type='feature')))
# Test pseudo rewards.
rs = feature_control_rewards(self.features)
np.testing.assert_allclose(self.exp_feature, rs, rtol=1e-5)
@chex.all_variants()
def test_feature_control_rewards_abs_change_batch(self):
"""Tests for a batch, cumulant_type='absolute_change'."""
feature_control_rewards = self.variant(jax.vmap(functools.partial(
general_value_functions.feature_control_rewards,
cumulant_type='absolute_change')))
# Test pseudo rewards.
rs = feature_control_rewards(self.features)
np.testing.assert_allclose(self.exp_abs_change, rs, rtol=1e-5)
@chex.all_variants()
def test_feature_control_rewards_increase_batch(self):
"""Tests for a batch, cumulant_type='increase'."""
feature_control_rewards = self.variant(jax.vmap(functools.partial(
general_value_functions.feature_control_rewards,
cumulant_type='increase')))
# Test pseudo rewards.
rs = feature_control_rewards(self.features)
np.testing.assert_allclose(self.exp_increase, rs, rtol=1e-5)
@chex.all_variants()
def test_feature_control_rewards_decrease_batch(self):
"""Tests for a batch, cumulant_type='decrease'."""
feature_control_rewards = self.variant(jax.vmap(functools.partial(
general_value_functions.feature_control_rewards,
cumulant_type='decrease')))
# Test pseudo rewards.
rs = feature_control_rewards(self.features)
np.testing.assert_allclose(self.exp_decrease, rs, rtol=1e-5)
@chex.all_variants()
def test_feature_control_rewards_potential_batch(self):
"""Tests for a batch, cumulant_type='potential'."""
feature_control_rewards = self.variant(jax.vmap(functools.partial(
general_value_functions.feature_control_rewards,
cumulant_type='potential',
discount=self.potential_discount)))
# Test pseudo rewards.
rs = feature_control_rewards(self.features)
np.testing.assert_allclose(self.exp_potential, rs, rtol=1e-5)
if __name__ == '__main__':
jax.config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| rlax-master | rlax/_src/general_value_functions_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX functions implementing policy gradient losses.
Policy gradient algorithms directly update the policy of an agent based on
a stochatic estimate of the direction of steepest ascent in a score function
representing the expected return of that policy. This subpackage provides a
number of utility functions for implementing policy gradient algorithms for
discrete and continuous policies.
"""
from typing import Optional, Tuple
import chex
import jax
import jax.numpy as jnp
from rlax._src import distributions
from rlax._src import losses
Array = chex.Array
Scalar = chex.Scalar
def _clip_by_l2_norm(x: Array, max_norm: float) -> Array:
"""Clip gradients to maximum l2 norm `max_norm`."""
# Compute the sum of squares and find out where things are zero.
sum_sq = jnp.sum(jnp.vdot(x, x))
nonzero = sum_sq > 0
# Compute the norm wherever sum_sq > 0 and leave it <= 0 otherwise. This makes
# use of the the "double where" trick; see
# https://jax.readthedocs.io/en/latest/faq.html#gradients-contain-nan-where-using-where
# for more info. In short this is necessary because although norm ends up
# computed correctly where nonzero is true if we ignored this we'd end up with
# nans on the off-branches which would leak through when computed gradients in
# the backward pass.
sum_sq_ones = jnp.where(nonzero, sum_sq, jnp.ones_like(sum_sq))
norm = jnp.where(nonzero, jnp.sqrt(sum_sq_ones), sum_sq)
# Normalize by max_norm. Whenever norm < max_norm we're left with x (this
# happens trivially for indices where nonzero is false). Otherwise we're left
# with the desired x * max_norm / norm.
return (x * max_norm) / jnp.maximum(norm, max_norm)
def dpg_loss(
a_t: Array,
dqda_t: Array,
dqda_clipping: Optional[Scalar] = None,
use_stop_gradient: bool = True,
) -> Array:
"""Calculates the deterministic policy gradient (DPG) loss.
See "Deterministic Policy Gradient Algorithms" by Silver, Lever, Heess,
Degris, Wierstra, Riedmiller (http://proceedings.mlr.press/v32/silver14.pdf).
Args:
a_t: continuous-valued action at time t.
dqda_t: gradient of Q(s,a) wrt. a, evaluated at time t.
dqda_clipping: clips the gradient to have norm <= `dqda_clipping`.
use_stop_gradient: bool indicating whether or not to apply stop gradient
to targets.
Returns:
DPG loss.
"""
chex.assert_rank([a_t, dqda_t], 1)
chex.assert_type([a_t, dqda_t], float)
if dqda_clipping is not None:
dqda_t = _clip_by_l2_norm(dqda_t, dqda_clipping)
target_tm1 = dqda_t + a_t
target_tm1 = jax.lax.select(use_stop_gradient,
jax.lax.stop_gradient(target_tm1), target_tm1)
return losses.l2_loss(target_tm1 - a_t)
def policy_gradient_loss(
logits_t: Array,
a_t: Array,
adv_t: Array,
w_t: Array,
use_stop_gradient: bool = True,
) -> Array:
"""Calculates the policy gradient loss.
See "Simple Gradient-Following Algorithms for Connectionist RL" by Williams.
(http://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf)
Args:
logits_t: a sequence of unnormalized action preferences.
a_t: a sequence of actions sampled from the preferences `logits_t`.
adv_t: the observed or estimated advantages from executing actions `a_t`.
w_t: a per timestep weighting for the loss.
use_stop_gradient: bool indicating whether or not to apply stop gradient to
advantages.
Returns:
Loss whose gradient corresponds to a policy gradient update.
"""
chex.assert_rank([logits_t, a_t, adv_t, w_t], [2, 1, 1, 1])
chex.assert_type([logits_t, a_t, adv_t, w_t], [float, int, float, float])
log_pi_a_t = distributions.softmax().logprob(a_t, logits_t)
adv_t = jax.lax.select(use_stop_gradient, jax.lax.stop_gradient(adv_t), adv_t)
loss_per_timestep = -log_pi_a_t * adv_t
return jnp.mean(loss_per_timestep * w_t)
def entropy_loss(
logits_t: Array,
w_t: Array,
) -> Array:
"""Calculates the entropy regularization loss.
See "Function Optimization using Connectionist RL Algorithms" by Williams.
(https://www.tandfonline.com/doi/abs/10.1080/09540099108946587)
Args:
logits_t: a sequence of unnormalized action preferences.
w_t: a per timestep weighting for the loss.
Returns:
Entropy loss.
"""
chex.assert_rank([logits_t, w_t], [2, 1])
chex.assert_type([logits_t, w_t], float)
entropy_per_timestep = distributions.softmax().entropy(logits_t)
return -jnp.mean(entropy_per_timestep * w_t)
def _compute_advantages(logits_t: Array,
q_t: Array,
use_stop_gradient=True) -> Tuple[Array, Array]:
"""Computes summed advantage using logits and action values."""
policy_t = jax.nn.softmax(logits_t, axis=1)
# Avoid computing gradients for action_values.
q_t = jax.lax.select(use_stop_gradient, jax.lax.stop_gradient(q_t), q_t)
baseline_t = jnp.sum(policy_t * q_t, axis=1)
adv_t = q_t - jnp.expand_dims(baseline_t, 1)
return policy_t, adv_t
def qpg_loss(
logits_t: Array,
q_t: Array,
use_stop_gradient: bool = True,
) -> Array:
"""Computes the QPG (Q-based Policy Gradient) loss.
See "Actor-Critic Policy Optimization in Partially Observable Multiagent
Environments" by Srinivasan, Lanctot (https://arxiv.org/abs/1810.09026).
Args:
logits_t: a sequence of unnormalized action preferences.
q_t: the observed or estimated action value from executing actions `a_t` at
time t.
use_stop_gradient: bool indicating whether or not to apply stop gradient to
advantages.
Returns:
QPG Loss.
"""
chex.assert_rank([logits_t, q_t], 2)
chex.assert_type([logits_t, q_t], float)
policy_t, advantage_t = _compute_advantages(logits_t, q_t)
advantage_t = jax.lax.select(use_stop_gradient,
jax.lax.stop_gradient(advantage_t), advantage_t)
policy_advantages = -policy_t * advantage_t
loss = jnp.mean(jnp.sum(policy_advantages, axis=1), axis=0)
return loss
def rm_loss(
logits_t: Array,
q_t: Array,
use_stop_gradient: bool = True,
) -> Array:
"""Computes the RMPG (Regret Matching Policy Gradient) loss.
The gradient of this loss adapts the Regret Matching rule by weighting the
standard PG update with thresholded regret.
See "Actor-Critic Policy Optimization in Partially Observable Multiagent
Environments" by Srinivasan, Lanctot (https://arxiv.org/abs/1810.09026).
Args:
logits_t: a sequence of unnormalized action preferences.
q_t: the observed or estimated action value from executing actions `a_t` at
time t.
use_stop_gradient: bool indicating whether or not to apply stop gradient to
advantages.
Returns:
RM Loss.
"""
chex.assert_rank([logits_t, q_t], 2)
chex.assert_type([logits_t, q_t], float)
policy_t, advantage_t = _compute_advantages(logits_t, q_t)
action_regret_t = jax.nn.relu(advantage_t)
action_regret_t = jax.lax.select(use_stop_gradient,
jax.lax.stop_gradient(action_regret_t),
action_regret_t)
policy_regret = -policy_t * action_regret_t
loss = jnp.mean(jnp.sum(policy_regret, axis=1), axis=0)
return loss
def rpg_loss(
logits_t: Array,
q_t: Array,
use_stop_gradient: bool = True,
) -> Array:
"""Computes the RPG (Regret Policy Gradient) loss.
The gradient of this loss adapts the Regret Matching rule by weighting the
standard PG update with regret.
See "Actor-Critic Policy Optimization in Partially Observable Multiagent
Environments" by Srinivasan, Lanctot (https://arxiv.org/abs/1810.09026).
Args:
logits_t: a sequence of unnormalized action preferences.
q_t: the observed or estimated action value from executing actions `a_t` at
time t.
use_stop_gradient: bool indicating whether or not to apply stop gradient to
advantages.
Returns:
RPG Loss.
"""
chex.assert_rank([logits_t, q_t], 2)
chex.assert_type([logits_t, q_t], float)
_, adv_t = _compute_advantages(logits_t, q_t, use_stop_gradient)
regrets_t = jnp.sum(jax.nn.relu(adv_t), axis=1)
total_regret_t = jnp.mean(regrets_t, axis=0)
return total_regret_t
def clipped_surrogate_pg_loss(
prob_ratios_t: Array,
adv_t: Array,
epsilon: Scalar,
use_stop_gradient=True) -> Array:
"""Computes the clipped surrogate policy gradient loss.
L_clipₜ(θ) = - min(rₜ(θ)Âₜ, clip(rₜ(θ), 1-ε, 1+ε)Âₜ)
Where rₜ(θ) = π_θ(aₜ| sₜ) / π_θ_old(aₜ| sₜ) and Âₜ are the advantages.
See Proximal Policy Optimization Algorithms, Schulman et al.:
https://arxiv.org/abs/1707.06347
Args:
prob_ratios_t: Ratio of action probabilities for actions a_t:
rₜ(θ) = π_θ(aₜ| sₜ) / π_θ_old(aₜ| sₜ)
adv_t: the observed or estimated advantages from executing actions a_t.
epsilon: Scalar value corresponding to how much to clip the objecctive.
use_stop_gradient: bool indicating whether or not to apply stop gradient to
advantages.
Returns:
Loss whose gradient corresponds to a clipped surrogate policy gradient
update.
"""
chex.assert_rank([prob_ratios_t, adv_t], [1, 1])
chex.assert_type([prob_ratios_t, adv_t], [float, float])
adv_t = jax.lax.select(use_stop_gradient, jax.lax.stop_gradient(adv_t), adv_t)
clipped_ratios_t = jnp.clip(prob_ratios_t, 1. - epsilon, 1. + epsilon)
clipped_objective = jnp.fmin(prob_ratios_t * adv_t, clipped_ratios_t * adv_t)
return -jnp.mean(clipped_objective)
| rlax-master | rlax/_src/policy_gradients.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX functions implementing different exploration methods.
This file contains a (growing) list of exploration methods used by RL agents.
Currently, we support noise-bsaed exploration methods, such as adding Gaussian
noise or temporally correlated noise drawn from an OU process.
We also support the computation of intrinsic rewards a la Agent57 / NGU style
exploration (see docstring), which is to be used as part of recurrent cell to
process states and a growing memory of previously visited states.
"""
from typing import Optional, Union
import chex
import jax
import jax.numpy as jnp
from rlax._src import episodic_memory
Array = chex.Array
Scalar = chex.Scalar
def add_gaussian_noise(
key: Array,
action: Array,
stddev: float
) -> Array:
"""Returns continuous action with noise drawn from a Gaussian distribution.
Args:
key: a key from `jax.random`.
action: continuous action scalar or vector.
stddev: standard deviation of noise distribution.
Returns:
noisy action, of the same shape as input action.
"""
chex.assert_type(action, float)
noise = jax.random.normal(key, shape=action.shape) * stddev
return action + noise
def add_ornstein_uhlenbeck_noise(
key: Array,
action: Array,
noise_tm1: Array,
damping: float,
stddev: float
) -> Array:
"""Returns continuous action with noise from Ornstein-Uhlenbeck process.
See "On the theory of Brownian Motion" by Uhlenbeck and Ornstein.
(https://journals.aps.org/pr/abstract/10.1103/PhysRev.36.823).
Args:
key: a key from `jax.random`.
action: continuous action scalar or vector.
noise_tm1: noise sampled from OU process in previous timestep.
damping: parameter for controlling autocorrelation of OU process.
stddev: standard deviation of noise distribution.
Returns:
noisy action, of the same shape as input action.
"""
chex.assert_rank([action, noise_tm1], 1)
chex.assert_type([action, noise_tm1], float)
noise_t = (1. - damping) * noise_tm1 + jax.random.normal(
key, shape=action.shape) * stddev
return action + noise_t
def add_dirichlet_noise(
key: Array,
prior: Array,
dirichlet_alpha: float,
dirichlet_fraction: float
) -> Array:
"""Returns discrete actions with noise drawn from a Dirichlet distribution.
See "Mastering the Game of Go without Human Knowledge" by Silver et. al. 2017
(https://discovery.ucl.ac.uk/id/eprint/10045895/1/agz_unformatted_nature.pdf),
"A General Reinforcement Learning Algorithm that Masters Chess, Shogi and
Go Through Self-Play" by Silver et. al. 2018
(http://airesearch.com/wp-content/uploads/2016/01/deepmind-mastering-go.pdf),
and "Mastering Atari, Go, Chess and Shogi by Planning with a Learned Model"
by Schrittwieser et. al., 2019 (https://arxiv.org/abs/1911.08265).
The AlphaZero family of algorithms adds noise sampled from a symmetric
Dirichlet distribution to the prior policy generated by MCTS. Because the
agent then samples from this new, noisy prior over actions, this encourages
better exploration of the root node's children.
Specifically, this computes:
noise ~ Dirichlet(alpha)
noisy_prior = (1 - fraction) * prior + fraction * noise
Note that alpha is a single float to draw from a symmetric Dirichlet.
For reference values, AlphaZero uses 0.3, 0.15, 0.03 for Chess, Shogi, and
Go respectively, and MuZero uses 0.25 for Atari.
Args:
key: a key from `jax.random`.
prior: 2-dim continuous prior policy vector of shapes [B, N], for B batch
size and N num_actions.
dirichlet_alpha: concentration parameter to parametrize Dirichlet
distribution.
dirichlet_fraction: float from 0 to 1 interpolating between using only the
prior policy or just the noise.
Returns:
noisy action, of the same shape as input action.
"""
chex.assert_rank(prior, 2)
chex.assert_type([dirichlet_alpha, dirichlet_fraction], float)
batch_size, num_actions = prior.shape
noise = jax.random.dirichlet(
key=key,
alpha=jnp.full(shape=(num_actions,), fill_value=dirichlet_alpha),
shape=(batch_size,))
noisy_prior = (1 - dirichlet_fraction) * prior + dirichlet_fraction * noise
return noisy_prior
@chex.dataclass
class IntrinsicRewardState():
memory: jnp.ndarray
next_memory_index: Scalar = 0
distance_sum: Union[Array, Scalar] = 0
distance_count: Scalar = 0
def episodic_memory_intrinsic_rewards(
embeddings: Array,
num_neighbors: int,
reward_scale: float,
intrinsic_reward_state: Optional[IntrinsicRewardState] = None,
constant: float = 1e-3,
epsilon: float = 1e-4,
cluster_distance: float = 8e-3,
max_similarity: float = 8.,
max_memory_size: int = 30_000):
"""Compute intrinsic rewards for exploration via episodic memory.
This method is adopted from the intrinsic reward computation used in "Never
Give Up: Learning Directed Exploration Strategies" by Puigdomènech Badia et
al., (2020) (https://arxiv.org/abs/2003.13350) and "Agent57: Outperforming the
Atari Human Benchmark" by Puigdomènech Badia et al., (2020)
(https://arxiv.org/abs/2002.06038).
From an embedding, we compute the intra-episode intrinsic reward with respect
to a pre-existing set of embeddings.
NOTE: For this function to be jittable, static_argnums=[1,] must be passed, as
the internal jax.lax.top_k(neg_distances, num_neighbors) computation in
knn_query cannot be jitted with a dynamic num_neighbors that is passed as an
argument.
Args:
embeddings: Array, shaped [M, D] for number of new state embeddings M and
feature dim D.
num_neighbors: int for K neighbors used in kNN query
reward_scale: The β term used in the Agent57 paper to scale the reward.
intrinsic_reward_state: An IntrinsicRewardState namedtuple, containing
memory, next_memory_index, distance_sum, and distance_count.
NOTE- On (only) the first call to episodic_memory_intrinsic_rewards, the
intrinsic_reward_state is optional, if None is given, an
IntrinsicRewardState will be initialized with default parameters,
specifically, the memory will be initialized to an array of jnp.inf of
shape [max_memory_size x feature dim D], and default values of 0 will be
provided for next_memory_index, distance_sum, and distance_count.
constant: float; small constant used for numerical stability used during
normalizing distances.
epsilon: float; small constant used for numerical stability when computing
kernel output.
cluster_distance: float; the ξ term used in the Agent57 paper to bound the
distance rate used in the kernel computation.
max_similarity: float; max limit of similarity; used to zero rewards when
similarity between memories is too high to be considered 'useful' for an
agent.
max_memory_size: int; the maximum number of memories to store. Note that
performance will be marginally faster if max_memory_size is an exact
multiple of M (the number of embeddings to add to memory per call to
episodic_memory_intrinsic_reward).
Returns:
Intrinsic reward for each embedding computed by using similarity measure to
memories and next IntrinsicRewardState.
"""
# Initialize IntrinsicRewardState if not provided to default values.
if not intrinsic_reward_state:
intrinsic_reward_state = IntrinsicRewardState(
memory=jnp.inf * jnp.ones(shape=(max_memory_size,
embeddings.shape[-1])))
# Pad the first num_neighbors entries with zeros.
padding = jnp.zeros((num_neighbors, embeddings.shape[-1]))
intrinsic_reward_state.memory = (
intrinsic_reward_state.memory.at[:num_neighbors, :].set(padding))
else:
chex.assert_shape(intrinsic_reward_state.memory,
(max_memory_size, embeddings.shape[-1]))
# Compute the KNN from the embeddings using the square distances from
# the KNN d²(xₖ, x). Results are not guaranteed to be ordered.
jit_knn_query = jax.jit(episodic_memory.knn_query, static_argnums=[2,])
knn_query_result = jit_knn_query(intrinsic_reward_state.memory, embeddings,
num_neighbors)
# Insert embeddings into memory in a ring buffer fashion.
memory = intrinsic_reward_state.memory
start_index = intrinsic_reward_state.next_memory_index % memory.shape[0]
indices = (jnp.arange(embeddings.shape[0]) + start_index) % memory.shape[0]
memory = jnp.asarray(memory).at[indices].set(embeddings)
nn_distances_sq = knn_query_result.neighbor_neg_distances
# Unpack running distance statistics, and update the running mean dₘ²
distance_sum = intrinsic_reward_state.distance_sum
distance_sum += jnp.sum(nn_distances_sq)
distance_counts = intrinsic_reward_state.distance_count
distance_counts += nn_distances_sq.size
# We compute the sum of a kernel similarity with the KNN and set to zero
# the reward when this similarity exceeds a given value (max_similarity)
# Compute rate = d(xₖ, x)² / dₘ²
mean_distance = distance_sum / distance_counts
distance_rate = nn_distances_sq / (mean_distance + constant)
# The distance rate becomes 0 if already small: r <- max(r-ξ, 0).
distance_rate = jnp.maximum(distance_rate - cluster_distance,
jnp.zeros_like(distance_rate))
# Compute the Kernel value K(xₖ, x) = ε/(rate + ε).
kernel_output = epsilon / (distance_rate + epsilon)
# Compute the similarity for the embedding x:
# s = √(Σ_{xₖ ∈ Nₖ} K(xₖ, x)) + c
similarity = jnp.sqrt(jnp.sum(kernel_output, axis=-1)) + constant
# Compute the intrinsic reward:
# r = 1 / s.
reward_new = jnp.ones_like(embeddings[..., 0]) / similarity
# Zero the reward if similarity is greater than max_similarity
# r <- 0 if s > sₘₐₓ otherwise r.
max_similarity_reached = similarity > max_similarity
reward = jnp.where(max_similarity_reached, 0, reward_new)
# r <- β * r
reward *= reward_scale
return reward, IntrinsicRewardState(
memory=memory,
next_memory_index=start_index + embeddings.shape[0] % max_memory_size,
distance_sum=distance_sum,
distance_count=distance_counts)
| rlax-master | rlax/_src/exploration.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for episodic_memory.py."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import numpy as np
from rlax._src import episodic_memory
class KNNQueryTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.data = np.array([[0., 0.], [7.5, 1.], [40., 40.]])
self.query_points = np.array([[2.0, 1.3], [7.5, 0.0]])
@chex.all_variants()
def test_small_k_query(self):
num_neighbors = 2
expected_neighbors = np.array([[[0., 0.], [7.5, 1.]],
[[7.5, 1.], [0., 0.]]])
expected_neg_distances = np.array([[-5.69, -30.34], [-1., -56.25]])
expected_neighbor_indices = np.array([[0., 1.], [1., 0.]])
@self.variant
def query_variant(data, points):
return episodic_memory.knn_query(data, points, num_neighbors)
actual = query_variant(self.data, self.query_points)
np.testing.assert_allclose(actual.neighbors,
expected_neighbors,
atol=1e-6)
np.testing.assert_allclose(actual.neighbor_indices,
expected_neighbor_indices,
atol=1e-6)
np.testing.assert_allclose(actual.neighbor_neg_distances,
expected_neg_distances,
atol=1e-6)
@chex.all_variants()
@parameterized.named_parameters(('3neighbors', 3),
('5neighbors', 5))
def test_big_k_query(self, num_neighbors):
expected_neighbors = np.array([[[0., 0.], [7.5, 1.], [40., 40.]],
[[7.5, 1.], [0., 0.], [40., 40.]]])
expected_neg_distances = np.array([[-5.69, -30.34, -2941.69],
[-1., -56.25, -2656.25]])
expected_neighbor_indices = np.array([[0, 1, 2], [1, 0, 2],])
@self.variant
def query_variant(data, points):
return episodic_memory.knn_query(data, points, num_neighbors)
actual = query_variant(self.data, self.query_points)
np.testing.assert_allclose(actual.neighbors,
expected_neighbors,
atol=1e-6)
np.testing.assert_allclose(actual.neighbor_indices,
expected_neighbor_indices,
atol=1e-6)
np.testing.assert_allclose(actual.neighbor_neg_distances,
expected_neg_distances,
atol=1e-6)
def test_empty(self):
data = np.array([])
self.query_points = np.array([])
with self.assertRaises(AssertionError):
episodic_memory.knn_query(data, self.query_points, num_neighbors=2)
if __name__ == '__main__':
jax.config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| rlax-master | rlax/_src/episodic_memory_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `value_learning.py`."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import numpy as np
from rlax._src import distributions
from rlax._src import vtrace
class VTraceTest(parameterized.TestCase):
def setUp(self):
super().setUp()
behavior_policy_logits = np.array(
[[[8.9, 0.7], [5.0, 1.0], [0.6, 0.1], [-0.9, -0.1]],
[[0.3, -5.0], [1.0, -8.0], [0.3, 1.7], [4.7, 3.3]]],
dtype=np.float32)
target_policy_logits = np.array(
[[[0.4, 0.5], [9.2, 8.8], [0.7, 4.4], [7.9, 1.4]],
[[1.0, 0.9], [1.0, -1.0], [-4.3, 8.7], [0.8, 0.3]]],
dtype=np.float32)
actions = np.array([[0, 1, 0, 0], [1, 0, 0, 1]], dtype=np.int32)
self._rho_tm1 = distributions.categorical_importance_sampling_ratios(
target_policy_logits, behavior_policy_logits, actions)
self._rewards = np.array(
[[-1.3, -1.3, 2.3, 42.0],
[1.3, 5.3, -3.3, -5.0]],
dtype=np.float32)
self._discounts = np.array(
[[0., 0.89, 0.85, 0.99],
[0.88, 1., 0.83, 0.95]],
dtype=np.float32)
self._values = np.array(
[[2.1, 1.1, -3.1, 0.0],
[3.1, 0.1, -1.1, 7.4]],
dtype=np.float32)
self._bootstrap_value = np.array([8.4, -1.2], dtype=np.float32)
self._inputs = [
self._rewards, self._discounts, self._rho_tm1,
self._values, self._bootstrap_value]
self._clip_rho_threshold = 1.0
self._clip_pg_rho_threshold = 5.0
self._lambda = 1.0
self._expected_td = np.array(
[[-1.6155143, -3.4973226, 1.8670533, 5.0316002e1],
[1.4662437, 3.6116405, -8.3327293e-5, -1.3540000e1]],
dtype=np.float32)
self._expected_pg = np.array(
[[-1.6155143, -3.4973226, 1.8670534, 5.0316002e1],
[1.4662433, 3.6116405, -8.3369283e-05, -1.3540000e+1]],
dtype=np.float32)
@chex.all_variants()
def test_vtrace_td_error_and_advantage(self):
"""Tests for a full batch."""
vtrace_td_error_and_advantage = self.variant(jax.vmap(functools.partial(
vtrace.vtrace_td_error_and_advantage,
clip_rho_threshold=self._clip_rho_threshold, lambda_=self._lambda)))
# Get function arguments.
r_t, discount_t, rho_tm1, v_tm1, bootstrap_value = self._inputs
v_t = np.concatenate([v_tm1[:, 1:], bootstrap_value[:, None]], axis=1)
# Compute vtrace output.
vtrace_output = vtrace_td_error_and_advantage(
v_tm1, v_t, r_t, discount_t, rho_tm1)
# Test output.
np.testing.assert_allclose(
self._expected_td, vtrace_output.errors, rtol=1e-3)
np.testing.assert_allclose(
self._expected_pg, vtrace_output.pg_advantage, rtol=1e-3)
@chex.all_variants()
@parameterized.named_parameters(
('scalar_lambda',
np.array([[0., 1., 1., 0., 0., 1., 1., 1.]], dtype=np.float32),
np.array([1.], dtype=np.float32)),
('vector_lambda',
np.array([[0., 1., 1., 0., 0., 1., 1., 1.]], dtype=np.float32),
np.array([[1., 1., 1., 1., 1., 1., 1., 1.]], dtype=np.float32)),
('vector_lambda_truncation',
np.array([[0., 1., 1., 1., 0., 1., 1., 1.]], dtype=np.float32),
np.array([[1., 1., 1., 0., 1., 1., 1., 1.]], dtype=np.float32)),
)
def test_vtrace_lambda_multiple_episodes_per_trace(self, discount_t, lambda_):
"""Tests for a full batch."""
vtrace_ = self.variant(
jax.vmap(
functools.partial(
vtrace.vtrace, clip_rho_threshold=self._clip_rho_threshold)))
# Get function arguments.
r_t, rho_tm1, v_tm1 = np.random.random((3, 1, 8))
bootstrap_value = np.array([10.], dtype=np.float32)
v_t = np.concatenate([v_tm1[:, 1:], bootstrap_value[:, None]], axis=1)
# Full trace.
vtrace_output = vtrace_(v_tm1, v_t, r_t, discount_t, rho_tm1, lambda_)
# First episode trace.
vtrace_output_ep1 = vtrace_(v_tm1[:4], v_t[:4], r_t[:4], discount_t[:4],
rho_tm1[:4], lambda_[:4])
# Second episode trace.
vtrace_output_ep2 = vtrace_(v_tm1[4:], v_t[4:], r_t[4:], discount_t[4:],
rho_tm1[4:], lambda_[4:])
# Test output.
np.testing.assert_allclose(vtrace_output[:4], vtrace_output_ep1, rtol=1e-3)
np.testing.assert_allclose(vtrace_output[4:], vtrace_output_ep2, rtol=1e-3)
@chex.all_variants()
def test_lambda_q_estimate(self):
"""Tests for a full batch."""
lambda_ = 0.8
vtrace_td_error_and_advantage = self.variant(jax.vmap(functools.partial(
vtrace.vtrace_td_error_and_advantage,
clip_rho_threshold=self._clip_rho_threshold, lambda_=lambda_)))
# Get function arguments.
r_t, discount_t, rho_tm1, v_tm1, bootstrap_value = self._inputs
v_t = np.concatenate([v_tm1[:, 1:], bootstrap_value[:, None]], axis=1)
# Compute vtrace output.
vtrace_output = vtrace_td_error_and_advantage(
v_tm1, v_t, r_t, discount_t, rho_tm1)
expected_vs = vtrace_output.errors + v_tm1
clipped_rho_tm1 = np.minimum(self._clip_rho_threshold, rho_tm1)
vs_from_q = v_tm1 + clipped_rho_tm1 * (vtrace_output.q_estimate - v_tm1)
# Test output.
np.testing.assert_allclose(expected_vs, vs_from_q, rtol=1e-3)
@chex.all_variants()
def test_leaky_and_non_leaky_vtrace(self):
"""Tests for a full batch."""
vtrace_fn = self.variant(jax.vmap(functools.partial(
vtrace.vtrace, lambda_=self._lambda)))
leaky_vtrace_fn = self.variant(jax.vmap(functools.partial(
vtrace.leaky_vtrace, alpha_=1., lambda_=self._lambda)))
# Get function arguments.
r_t, discount_t, rho_tm1, v_tm1, bootstrap_value = self._inputs
v_t = np.concatenate([v_tm1[:, 1:], bootstrap_value[:, None]], axis=1)
# Compute vtrace and leaky vtrace output.
vtrace_output = vtrace_fn(v_tm1, v_t, r_t, discount_t, rho_tm1)
leaky_vtrace_output = leaky_vtrace_fn(v_tm1, v_t, r_t, discount_t, rho_tm1)
# Test output.
np.testing.assert_allclose(vtrace_output, leaky_vtrace_output, rtol=1e-3)
if __name__ == '__main__':
jax.config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| rlax-master | rlax/_src/vtrace_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX functions for calculating multistep returns.
In this subpackage we expose a number of functions that may be used to compute
multistep truncated bootstrapped estimates of the return (the discounted sum of
rewards collected by an agent). These estimate compute returns from trajectories
of experience; trajectories are not assumed to align with episode boundaries,
and bootstrapping is used to estimate returns beyond the end of a trajectory.
"""
from typing import Union
import chex
import jax
import jax.numpy as jnp
from rlax._src import base
Array = chex.Array
Scalar = chex.Scalar
Numeric = chex.Numeric
def lambda_returns(
r_t: Array,
discount_t: Array,
v_t: Array,
lambda_: Numeric = 1.,
stop_target_gradients: bool = False,
) -> Array:
"""Estimates a multistep truncated lambda return from a trajectory.
Given a a trajectory of length `T+1`, generated under some policy π, for each
time-step `t` we can estimate a target return `G_t`, by combining rewards,
discounts, and state values, according to a mixing parameter `lambda`.
The parameter `lambda_` mixes the different multi-step bootstrapped returns,
corresponding to accumulating `k` rewards and then bootstrapping using `v_t`.
rₜ₊₁ + γₜ₊₁ vₜ₊₁
rₜ₊₁ + γₜ₊₁ rₜ₊₂ + γₜ₊₁ γₜ₊₂ vₜ₊₂
rₜ₊₁ + γₜ₊₁ rₜ₊₂ + γₜ₊₁ γₜ₊₂ rₜ₊₂ + γₜ₊₁ γₜ₊₂ γₜ₊₃ vₜ₊₃
The returns are computed recursively, from `G_{T-1}` to `G_0`, according to:
Gₜ = rₜ₊₁ + γₜ₊₁ [(1 - λₜ₊₁) vₜ₊₁ + λₜ₊₁ Gₜ₊₁].
In the `on-policy` case, we estimate a return target `G_t` for the same
policy π that was used to generate the trajectory. In this setting the
parameter `lambda_` is typically a fixed scalar factor. Depending
on how values `v_t` are computed, this function can be used to construct
targets for different multistep reinforcement learning updates:
TD(λ): `v_t` contains the state value estimates for each state under π.
Q(λ): `v_t = max(q_t, axis=-1)`, where `q_t` estimates the action values.
Sarsa(λ): `v_t = q_t[..., a_t]`, where `q_t` estimates the action values.
In the `off-policy` case, the mixing factor is a function of state, and
different definitions of `lambda` implement different off-policy corrections:
Per-decision importance sampling: λₜ = λ ρₜ = λ [π(aₜ|sₜ) / μ(aₜ|sₜ)]
V-trace, as instantiated in IMPALA: λₜ = min(1, ρₜ)
Note that the second option is equivalent to applying per-decision importance
sampling, but using an adaptive λ(ρₜ) = min(1/ρₜ, 1), such that the effective
bootstrap parameter at time t becomes λₜ = λ(ρₜ) * ρₜ = min(1, ρₜ).
This is the interpretation used in the ABQ(ζ) algorithm (Mahmood 2017).
Of course this can be augmented to include an additional factor λ. For
instance we could use V-trace with a fixed additional parameter λ = 0.9, by
setting λₜ = 0.9 * min(1, ρₜ) or, alternatively (but not equivalently),
λₜ = min(0.9, ρₜ).
Estimated return are then often used to define a td error, e.g.: ρₜ(Gₜ - vₜ).
See "Reinforcement Learning: An Introduction" by Sutton and Barto.
(http://incompleteideas.net/sutton/book/ebook/node74.html).
Args:
r_t: sequence of rewards rₜ for timesteps t in [1, T].
discount_t: sequence of discounts γₜ for timesteps t in [1, T].
v_t: sequence of state values estimates under π for timesteps t in [1, T].
lambda_: mixing parameter; a scalar or a vector for timesteps t in [1, T].
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Multistep lambda returns.
"""
chex.assert_rank([r_t, discount_t, v_t, lambda_], [1, 1, 1, {0, 1}])
chex.assert_type([r_t, discount_t, v_t, lambda_], float)
chex.assert_equal_shape([r_t, discount_t, v_t])
# If scalar make into vector.
lambda_ = jnp.ones_like(discount_t) * lambda_
# Work backwards to compute `G_{T-1}`, ..., `G_0`.
def _body(acc, xs):
returns, discounts, values, lambda_ = xs
acc = returns + discounts * ((1-lambda_) * values + lambda_ * acc)
return acc, acc
_, returns = jax.lax.scan(
_body, v_t[-1], (r_t, discount_t, v_t, lambda_), reverse=True)
return jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(returns),
returns)
def n_step_bootstrapped_returns(
r_t: Array,
discount_t: Array,
v_t: Array,
n: int,
lambda_t: Numeric = 1.,
stop_target_gradients: bool = False,
) -> Array:
"""Computes strided n-step bootstrapped return targets over a sequence.
The returns are computed according to the below equation iterated `n` times:
Gₜ = rₜ₊₁ + γₜ₊₁ [(1 - λₜ₊₁) vₜ₊₁ + λₜ₊₁ Gₜ₊₁].
When lambda_t == 1. (default), this reduces to
Gₜ = rₜ₊₁ + γₜ₊₁ * (rₜ₊₂ + γₜ₊₂ * (... * (rₜ₊ₙ + γₜ₊ₙ * vₜ₊ₙ ))).
Args:
r_t: rewards at times [1, ..., T].
discount_t: discounts at times [1, ..., T].
v_t: state or state-action values to bootstrap from at time [1, ...., T].
n: number of steps over which to accumulate reward before bootstrapping.
lambda_t: lambdas at times [1, ..., T]. Shape is [], or [T-1].
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
estimated bootstrapped returns at times [0, ...., T-1]
"""
chex.assert_rank([r_t, discount_t, v_t, lambda_t], [1, 1, 1, {0, 1}])
chex.assert_type([r_t, discount_t, v_t, lambda_t], float)
chex.assert_equal_shape([r_t, discount_t, v_t])
seq_len = r_t.shape[0]
# Maybe change scalar lambda to an array.
lambda_t = jnp.ones_like(discount_t) * lambda_t
# Shift bootstrap values by n and pad end of sequence with last value v_t[-1].
pad_size = min(n - 1, seq_len)
targets = jnp.concatenate([v_t[n - 1:], jnp.array([v_t[-1]] * pad_size)])
# Pad sequences. Shape is now (T + n - 1,).
r_t = jnp.concatenate([r_t, jnp.zeros(n - 1)])
discount_t = jnp.concatenate([discount_t, jnp.ones(n - 1)])
lambda_t = jnp.concatenate([lambda_t, jnp.ones(n - 1)])
v_t = jnp.concatenate([v_t, jnp.array([v_t[-1]] * (n - 1))])
# Work backwards to compute n-step returns.
for i in reversed(range(n)):
r_ = r_t[i:i + seq_len]
discount_ = discount_t[i:i + seq_len]
lambda_ = lambda_t[i:i + seq_len]
v_ = v_t[i:i + seq_len]
targets = r_ + discount_ * ((1. - lambda_) * v_ + lambda_ * targets)
return jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(targets), targets)
def discounted_returns(
r_t: Array,
discount_t: Array,
v_t: Array,
stop_target_gradients: bool = False,
) -> Array:
"""Calculates a discounted return from a trajectory.
The returns are computed recursively, from `G_{T-1}` to `G_0`, according to:
Gₜ = rₜ₊₁ + γₜ₊₁ Gₜ₊₁.
See "Reinforcement Learning: An Introduction" by Sutton and Barto.
(http://incompleteideas.net/sutton/book/ebook/node61.html).
Args:
r_t: reward sequence at time t.
discount_t: discount sequence at time t.
v_t: value sequence or scalar at time t.
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Discounted returns.
"""
chex.assert_rank([r_t, discount_t, v_t], [1, 1, {0, 1}])
chex.assert_type([r_t, discount_t, v_t], float)
# If scalar make into vector.
bootstrapped_v = jnp.ones_like(discount_t) * v_t
return lambda_returns(r_t, discount_t, bootstrapped_v, lambda_=1.,
stop_target_gradients=stop_target_gradients)
def importance_corrected_td_errors(
r_t: Array,
discount_t: Array,
rho_tm1: Array,
lambda_: Array,
values: Array,
stop_target_gradients: bool = False,
) -> Array:
"""Computes the multistep td errors with per decision importance sampling.
Given a trajectory of length `T+1`, generated under some policy π, for each
time-step `t` we can estimate a multistep temporal difference error δₜ(ρ,λ),
by combining rewards, discounts, and state values, according to a mixing
parameter `λ` and importance sampling ratios ρₜ = π(aₜ|sₜ) / μ(aₜ|sₜ):
td-errorₜ = ρₜ δₜ(ρ,λ)
δₜ(ρ,λ) = δₜ + ρₜ₊₁ λₜ₊₁ γₜ₊₁ δₜ₊₁(ρ,λ),
where δₜ = rₜ₊₁ + γₜ₊₁ vₜ₊₁ - vₜ is the one step, temporal difference error
for the agent's state value estimates. This is equivalent to computing
the λ-return with λₜ = ρₜ (e.g. using the `lambda_returns` function from
above), and then computing errors as td-errorₜ = ρₜ(Gₜ - vₜ).
See "A new Q(λ) with interim forward view and Monte Carlo equivalence"
by Sutton et al. (http://proceedings.mlr.press/v32/sutton14.html).
Args:
r_t: sequence of rewards rₜ for timesteps t in [1, T].
discount_t: sequence of discounts γₜ for timesteps t in [1, T].
rho_tm1: sequence of importance ratios for all timesteps t in [0, T-1].
lambda_: mixing parameter; scalar or have per timestep values in [1, T].
values: sequence of state values under π for all timesteps t in [0, T].
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Off-policy estimates of the multistep td errors.
"""
chex.assert_rank([r_t, discount_t, rho_tm1, values], [1, 1, 1, 1])
chex.assert_type([r_t, discount_t, rho_tm1, values], float)
chex.assert_equal_shape([r_t, discount_t, rho_tm1, values[1:]])
v_tm1 = values[:-1] # Predictions to compute errors for.
v_t = values[1:] # Values for bootstrapping.
rho_t = jnp.concatenate((rho_tm1[1:], jnp.array([1.]))) # Unused dummy value.
lambda_ = jnp.ones_like(discount_t) * lambda_ # If scalar, make into vector.
# Compute the one step temporal difference errors.
one_step_delta = r_t + discount_t * v_t - v_tm1
# Work backwards to compute `delta_{T-1}`, ..., `delta_0`.
def _body(acc, xs):
deltas, discounts, rho_t, lambda_ = xs
acc = deltas + discounts * rho_t * lambda_ * acc
return acc, acc
_, errors = jax.lax.scan(
_body, 0.0, (one_step_delta, discount_t, rho_t, lambda_), reverse=True)
errors = rho_tm1 * errors
return jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(errors + v_tm1) - v_tm1, errors)
def truncated_generalized_advantage_estimation(
r_t: Array,
discount_t: Array,
lambda_: Union[Array, Scalar],
values: Array,
stop_target_gradients: bool = False,
) -> Array:
"""Computes truncated generalized advantage estimates for a sequence length k.
The advantages are computed in a backwards fashion according to the equation:
Âₜ = δₜ + (γλ) * δₜ₊₁ + ... + ... + (γλ)ᵏ⁻ᵗ⁺¹ * δₖ₋₁
where δₜ = rₜ₊₁ + γₜ₊₁ * v(sₜ₊₁) - v(sₜ).
See Proximal Policy Optimization Algorithms, Schulman et al.:
https://arxiv.org/abs/1707.06347
Note: This paper uses a different notation than the RLax standard
convention that follows Sutton & Barto. We use rₜ₊₁ to denote the reward
received after acting in state sₜ, while the PPO paper uses rₜ.
Args:
r_t: Sequence of rewards at times [1, k]
discount_t: Sequence of discounts at times [1, k]
lambda_: Mixing parameter; a scalar or sequence of lambda_t at times [1, k]
values: Sequence of values under π at times [0, k]
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Multistep truncated generalized advantage estimation at times [0, k-1].
"""
chex.assert_rank([r_t, values, discount_t], 1)
chex.assert_type([r_t, values, discount_t], float)
lambda_ = jnp.ones_like(discount_t) * lambda_ # If scalar, make into vector.
delta_t = r_t + discount_t * values[1:] - values[:-1]
# Iterate backwards to calculate advantages.
def _body(acc, xs):
deltas, discounts, lambda_ = xs
acc = deltas + discounts * lambda_ * acc
return acc, acc
_, advantage_t = jax.lax.scan(
_body, 0.0, (delta_t, discount_t, lambda_), reverse=True)
return jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(advantage_t),
advantage_t)
def general_off_policy_returns_from_action_values(
q_t: Array,
a_t: Array,
r_t: Array,
discount_t: Array,
c_t: Array,
pi_t: Array,
stop_target_gradients: bool = False,
) -> Array:
"""Calculates targets for various off-policy correction algorithms.
Given a window of experience of length `K`, generated by a behaviour policy μ,
for each time-step `t` we can estimate the return `G_t` from that step
onwards, under some target policy π, using the rewards in the trajectory, the
actions selected by μ and the action-values under π, according to equation:
Gₜ = rₜ₊₁ + γₜ₊₁ * (E[q(aₜ₊₁)] - cₜ * q(aₜ₊₁) + cₜ * Gₜ₊₁),
where, depending on the choice of `c_t`, the algorithm implements:
Importance Sampling c_t = π(x_t, a_t) / μ(x_t, a_t),
Harutyunyan's et al. Q(lambda) c_t = λ,
Precup's et al. Tree-Backup c_t = π(x_t, a_t),
Munos' et al. Retrace c_t = λ min(1, π(x_t, a_t) / μ(x_t, a_t)).
See "Safe and Efficient Off-Policy Reinforcement Learning" by Munos et al.
(https://arxiv.org/abs/1606.02647).
Args:
q_t: Q-values at times [1, ..., K - 1].
a_t: action index at times [1, ..., K - 1].
r_t: reward at times [1, ..., K - 1].
discount_t: discount at times [1, ..., K - 1].
c_t: importance weights at times [1, ..., K - 1].
pi_t: target policy probs at times [1, ..., K - 1].
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Off-policy estimates of the generalized returns from states visited at times
[0, ..., K - 1].
"""
chex.assert_rank([q_t, a_t, r_t, discount_t, c_t, pi_t], [2, 1, 1, 1, 1, 2])
chex.assert_type([q_t, a_t, r_t, discount_t, c_t, pi_t],
[float, int, float, float, float, float])
chex.assert_equal_shape(
[q_t[..., 0], a_t, r_t, discount_t, c_t, pi_t[..., 0]])
# Get the expected values and the values of actually selected actions.
exp_q_t = (pi_t * q_t).sum(axis=-1)
# The generalized returns are independent of Q-values and cs at the final
# state.
q_a_t = base.batched_index(q_t, a_t)[:-1]
c_t = c_t[:-1]
return general_off_policy_returns_from_q_and_v(
q_a_t, exp_q_t, r_t, discount_t, c_t, stop_target_gradients)
def general_off_policy_returns_from_q_and_v(
q_t: Array,
v_t: Array,
r_t: Array,
discount_t: Array,
c_t: Array,
stop_target_gradients: bool = False,
) -> Array:
"""Calculates targets for various off-policy evaluation algorithms.
Given a window of experience of length `K+1`, generated by a behaviour policy
μ, for each time-step `t` we can estimate the return `G_t` from that step
onwards, under some target policy π, using the rewards in the trajectory, the
values under π of states and actions selected by μ, according to equation:
Gₜ = rₜ₊₁ + γₜ₊₁ * (vₜ₊₁ - cₜ₊₁ * q(aₜ₊₁) + cₜ₊₁* Gₜ₊₁),
where, depending on the choice of `c_t`, the algorithm implements:
Importance Sampling c_t = π(x_t, a_t) / μ(x_t, a_t),
Harutyunyan's et al. Q(lambda) c_t = λ,
Precup's et al. Tree-Backup c_t = π(x_t, a_t),
Munos' et al. Retrace c_t = λ min(1, π(x_t, a_t) / μ(x_t, a_t)).
See "Safe and Efficient Off-Policy Reinforcement Learning" by Munos et al.
(https://arxiv.org/abs/1606.02647).
Args:
q_t: Q-values under π of actions executed by μ at times [1, ..., K - 1].
v_t: Values under π at times [1, ..., K].
r_t: rewards at times [1, ..., K].
discount_t: discounts at times [1, ..., K].
c_t: weights at times [1, ..., K - 1].
stop_target_gradients: bool indicating whether or not to apply stop gradient
to targets.
Returns:
Off-policy estimates of the generalized returns from states visited at times
[0, ..., K - 1].
"""
chex.assert_rank([q_t, v_t, r_t, discount_t, c_t], 1)
chex.assert_type([q_t, v_t, r_t, discount_t, c_t], float)
chex.assert_equal_shape([q_t, v_t[:-1], r_t[:-1], discount_t[:-1], c_t])
g = r_t[-1] + discount_t[-1] * v_t[-1] # G_K-1.
def _body(acc, xs):
reward, discount, c, v, q = xs
acc = reward + discount * (v - c * q + c * acc)
return acc, acc
_, returns = jax.lax.scan(
_body, g, (r_t[:-1], discount_t[:-1], c_t, v_t[:-1], q_t), reverse=True)
returns = jnp.concatenate([returns, g[jnp.newaxis]], axis=0)
return jax.lax.select(stop_target_gradients,
jax.lax.stop_gradient(returns),
returns)
| rlax-master | rlax/_src/multistep.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX functions implementing PopArt."""
import collections
from typing import Mapping, Tuple
import chex
import jax
import jax.numpy as jnp
from rlax._src import base
Array = chex.Array
LinearParams = Mapping[str, Array]
PopArtState = collections.namedtuple(
"PopArtState", ["shift", "scale", "second_moment"])
PopArtOutput = collections.namedtuple(
"PopArtOutput", ["normalized", "unnormalized"])
def _cross_replica_scatter_add(source: Array, indices: Array, updates: Array,
axis_name):
"""tf.scatter_add, but with JAX, cross replica, and without state.
Args:
source: An array of shape [O].
indices: An array indicating which index each update is for.
updates: The updates to apply to `source`. Of same shape as indices.
axis_name: What axis to aggregate over, if str. If passed an iterable,
aggregates over multiple axes. Defaults to no aggregation, i.e. None.
Returns:
An array of shape [O], which is source + the scattered updates from all
replicas.
"""
assert updates.shape == indices.shape
assert jnp.issubdtype(indices.dtype, jnp.integer)
assert source.ndim == 1
# Flatten indices, updates.
num_classes = source.shape[0]
indices = jnp.reshape(indices, [-1])
updates = jnp.reshape(updates, [-1])
# Scatter updates according to value of indices.
updates_at_idxs = updates[..., None] * base.one_hot(indices, num_classes)
# Aggregate locally first, then across replicas.
total_updates = jnp.sum(updates_at_idxs, axis=0)
if axis_name is not None:
axis_names = (axis_name,) if isinstance(axis_name, str) else axis_name
for a_name in axis_names:
total_updates = jax.lax.psum(total_updates, axis_name=a_name)
return source + total_updates
def normalize(state: PopArtState, unnormalized: Array, indices: Array) -> Array:
"""Returns normalized values.
Args:
state: The PopArt summary stats.
unnormalized: unnormalized values that we applied PopArt to.
indices: Which scale and shifts to use
Returns:
Normalized PopArt values.
"""
scale = state.scale[indices]
shift = state.shift[indices]
normalized = (unnormalized - shift) / scale
return normalized
def unnormalize(state: PopArtState, normalized: Array, indices: Array) -> Array:
"""Returns unnormalized values.
Args:
state: The PopArt summary stats.
normalized: normalized values that we apply PopArt to.
indices: Which scale and shifts to use
Returns:
Unnormalized PopArt values.
"""
scale = state.scale[indices]
shift = state.shift[indices]
unnormalized = scale * normalized + shift
return unnormalized
def unnormalize_linear(state: PopArtState, inputs: Array,
indices: Array) -> PopArtOutput:
"""Selects and unnormalizes output of a Linear.
Args:
state: The PopArt summary stats.
inputs: The (normalized) output of the Linear that we apply PopArt to.
indices: Which indices of `inputs` to use.
Returns:
PopArtOutput, a tuple of the normalized and unnormalized PopArt values.
"""
assert jnp.issubdtype(indices.dtype, jnp.integer)
assert indices.shape == inputs.shape[:-1]
normalized = jnp.take_along_axis(inputs, indices[..., None], axis=-1)
normalized = jnp.squeeze(normalized, axis=-1)
return PopArtOutput(normalized, unnormalize(state, normalized, indices))
def art(state: PopArtState,
targets: Array,
indices: Array,
step_size: float,
scale_lb: float,
scale_ub: float,
axis_name=None) -> PopArtState:
"""Adaptively rescale targets.
Args:
state: The PopArt summary stats.
targets: targets which are rescaled.
indices: Which indices of the state to use.
step_size: The step size for learning the scale & shift parameters.
scale_lb: Lower bound for the scale.
scale_ub: Upper bound for the scale.
axis_name: What axis to aggregate over, if str. If passed an iterable,
aggregates over multiple axes. Defaults to no aggregation, i.e. None.
Returns:
New popart state which can be used to rescale targets.
"""
assert targets.shape == indices.shape
assert jnp.issubdtype(indices.dtype, jnp.integer)
# Update shift.
shift_gather = state.shift[indices]
shift_update = step_size * (targets - shift_gather)
shift_new = _cross_replica_scatter_add(state.shift, indices, shift_update,
axis_name)
# Update second moment.
second_moment_gather = state.second_moment[indices]
second_moment_update = step_size * (
jnp.square(targets) - second_moment_gather)
second_moment_new = _cross_replica_scatter_add(state.second_moment, indices,
second_moment_update,
axis_name)
# Derive scale (stdev) from second moment and mean.
scale_sq = second_moment_new - jnp.square(shift_new)
scale_sq = jnp.clip(scale_sq, scale_lb**2, scale_ub**2)
scale_new = jnp.sqrt(scale_sq)
state_new = PopArtState(shift_new, scale_new, second_moment_new)
# Prevent gradients propagating back through the state.
state_new = jax.tree_map(jax.lax.stop_gradient, state_new)
return state_new
def pop(params: LinearParams, old: PopArtState, new: PopArtState):
"""Preserves outputs precisely.
Args:
params: The parameters of the linear to preserve.
old: The old PopArt state.
new: The new PopArt state.
Returns:
new parameters.
"""
w_new = params["w"] * jnp.broadcast_to(old.scale / new.scale,
params["w"].shape)
b_new = (old.scale * params["b"] + old.shift - new.shift) / new.scale
params_new = dict(w=w_new, b=b_new)
return params_new
def popart(num_outputs: int,
step_size: float,
scale_lb: float,
scale_ub: float,
axis_name=None):
"""Generates functions giving initial PopArt state and update rule.
Args:
num_outputs: The number of outputs generated by the linear we're preserving.
step_size: The step size for learning the scale & shift parameters.
scale_lb: Lower bound for the scale.
scale_ub: Upper bound for the scale.
axis_name: What axis to aggregate over, if str. If passed an iterable,
aggregates over multiple axes. Defaults to no aggregation, i.e. None.
Returns:
A tuple of:
initial_state: A function returning the initial PopArt state.
popart_update: A function updating the PopArt state and parameters
of the preceding linear.
"""
def initial_state():
return PopArtState(
jnp.zeros([num_outputs]), jnp.ones([num_outputs]),
jnp.ones([num_outputs]))
def popart_update(params: LinearParams, state: PopArtState, targets: Array,
indices: Array) -> Tuple[LinearParams, PopArtState]:
"""Computes the PopArt update.
Args:
params: The parameters of the linear to preserve.
state: The current PopArt state.
targets: Values whose distribution to learn.
indices: For each target, which shift and scale element to adjust.
Returns:
A tuple of:
new_params: The new parameters of the linear, preserving outputs.
new_state: The new PopArt state.
"""
# Disables Popart if step_size is None
if step_size is None:
return params, state
# Adaptively rescale targets.
state_new = art(state, targets, indices, step_size, scale_lb, scale_ub,
axis_name)
# Preserve outputs precisely.
params_new = pop(params, state, state_new)
return params_new, state_new
return initial_state, popart_update
| rlax-master | rlax/_src/pop_art.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for `clipping.py`."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import jax.numpy as jnp
import numpy as np
from rlax._src import clipping
class HuberLossTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.delta = 1.
self.xs = jnp.array([-2, -1, -0.5, 0, 0.5, 1, 2])
self.ys = jnp.array([1.5, 0.5, 0.125, 0, 0.125, 0.5, 1.5])
self.dys = jnp.array([-1, -1, -0.5, 0, 0.5, 1, 1])
self.loss_fn = functools.partial(clipping.huber_loss, delta=self.delta)
@chex.all_variants()
def test_huber_loss_scalar(self):
huber_loss = self.variant(self.loss_fn)
x = jnp.array(0.5)
# Test output.
np.testing.assert_allclose(huber_loss(x), 0.125)
@chex.all_variants()
def test_huber_loss_vector(self):
huber_loss = self.variant(self.loss_fn)
xs = self.xs
# Compute transformation.
actual = huber_loss(xs)
# test output.
np.testing.assert_allclose(actual, self.ys)
@chex.all_variants()
def test_gradients(self):
huber_loss = self.variant(self.loss_fn)
xs = self.xs
# Compute gradient in batch
batch_grad_func = jax.vmap(jax.grad(huber_loss), (0))
actual = batch_grad_func(xs)
np.testing.assert_allclose(actual, self.dys)
class ClipGradientsTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.xs = jnp.array([-2, -1, -0.5, 0, 0.5, 1, 2])
@chex.all_variants()
def test_clip_gradient(self):
clip_gradient = self.variant(clipping.clip_gradient)
x = jnp.array(0.5)
# Test output.
actual = clip_gradient(x, -1., 1.)
np.testing.assert_allclose(actual, 0.5)
@chex.all_variants()
def test_clip_gradient_vector(self):
clip_gradient = self.variant(clipping.clip_gradient)
xs = self.xs
# Test output.
actual = clip_gradient(xs, -1., 1.)
np.testing.assert_allclose(actual, self.xs)
class EquivalenceTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.large_delta = 5.
self.xs = jnp.array([-2, -1, -0.5, 0, 0.5, 1, 2])
@chex.all_variants()
@parameterized.named_parameters(
('10', 10.),
('0.5', 0.5))
def test_clip_huber_equivalence(self, td_error):
@self.variant
def td_error_with_clip(x):
return 0.5 * jnp.square(
clipping.clip_gradient(x, -self.large_delta, self.large_delta))
@self.variant
def td_error_with_huber(x):
return clipping.huber_loss(x, self.large_delta)
td_error = jnp.array(td_error)
# Compute gradient in batch
clip_grad = jax.grad(td_error_with_clip)(td_error)
huber_grad = jax.grad(td_error_with_huber)(td_error)
np.testing.assert_allclose(clip_grad, huber_grad)
if __name__ == '__main__':
jax.config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| rlax-master | rlax/_src/clipping_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for `transforms.py`."""
import functools
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import jax.numpy as jnp
import numpy as np
from rlax._src import transforms
TWO_HOT_BINS = 5
TWO_HOT_SCALARS = [-5.0, -3.0, -1.0, -0.4, 0.0, 0.3, 1.0, 4.5, 10.0]
TWO_HOT_PROBABILITIES = [
[1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.8, 0.2, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.4, 0.6, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0]
]
class TransformsTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.x = 0.5
self.xs = jnp.array([-2, -1, -0.5, 0, 0.5, 1, 2])
@chex.all_variants()
def test_identity_scalar(self):
identity = self.variant(transforms.identity)
x = jnp.array(self.x)
# Test output.
np.testing.assert_allclose(identity(x), self.x)
@chex.all_variants()
def test_identity_vector(self):
identity = self.variant(transforms.identity)
# Test output.
np.testing.assert_allclose(identity(self.xs), self.xs)
@chex.all_variants()
def test_sigmoid_scalar(self):
sigmoid = self.variant(transforms.sigmoid)
logit = self.variant(transforms.logit)
x = jnp.array(self.x)
# Test output.
np.testing.assert_allclose(logit(sigmoid(x)), self.x, atol=1e-3)
@chex.all_variants()
def test_sigmoid_vector(self):
sigmoid = self.variant(transforms.sigmoid)
logit = self.variant(transforms.logit)
# Test output.
np.testing.assert_allclose(logit(sigmoid(self.xs)), self.xs, atol=1e-3)
@chex.all_variants()
def test_signed_log_exp_transform_scalar(self):
signed_logp1 = self.variant(transforms.signed_logp1)
signed_expm1 = self.variant(transforms.signed_expm1)
x = jnp.array(self.x)
# Test inverse.
np.testing.assert_allclose(signed_expm1(signed_logp1(x)), self.x, atol=1e-3)
@chex.all_variants()
def test_signed_log_exp_transform_vector(self):
signed_logp1 = self.variant(transforms.signed_logp1)
signed_expm1 = self.variant(transforms.signed_expm1)
# Test inverse.
np.testing.assert_allclose(
signed_expm1(signed_logp1(self.xs)), self.xs, atol=1e-3)
@chex.all_variants()
def test_signed_hyper_parabolic_transform_scalar(self):
signed_hyperbolic = self.variant(transforms.signed_hyperbolic)
signed_parabolic = self.variant(transforms.signed_parabolic)
x = jnp.array(self.x)
# Test inverse.
np.testing.assert_allclose(
signed_parabolic(signed_hyperbolic(x)), self.x, atol=1e-3)
@chex.all_variants()
def test_signed_hyper_parabolic_transform_vector(self):
signed_hyperbolic = self.variant(transforms.signed_hyperbolic)
signed_parabolic = self.variant(transforms.signed_parabolic)
# Test inverse.
np.testing.assert_allclose(
signed_parabolic(signed_hyperbolic(self.xs)), self.xs, atol=1e-3)
@chex.all_variants()
def test_signed_power_transform_scalar(self):
square = self.variant(functools.partial(transforms.power, p=2.))
sqrt = self.variant(functools.partial(transforms.power, p=1/2.))
x = jnp.array(self.x)
# Test inverse.
np.testing.assert_allclose(square(sqrt(x)), self.x, atol=1e-3)
@chex.all_variants()
def test_signed_power_transform_vector(self):
square = self.variant(functools.partial(transforms.power, p=2.))
sqrt = self.variant(functools.partial(transforms.power, p=1/2.))
# Test inverse.
np.testing.assert_allclose(square(sqrt(self.xs)), self.xs, atol=1e-3)
@chex.all_variants()
def test_hyperbolic_sin_transform_scalar(self):
sinh = self.variant(transforms.hyperbolic_sin)
arcsinh = self.variant(transforms.hyperbolic_arcsin)
x = jnp.array(self.x)
# Test inverse.
np.testing.assert_allclose(sinh(arcsinh(x)), self.x, atol=1e-3)
np.testing.assert_allclose(arcsinh(sinh(x)), self.x, atol=1e-3)
@chex.all_variants()
def test_hyperbolic_sin_transform_vector(self):
sinh = self.variant(transforms.hyperbolic_sin)
arcsinh = self.variant(transforms.hyperbolic_arcsin)
# Test inverse.
np.testing.assert_allclose(sinh(arcsinh(self.xs)), self.xs, atol=1e-3)
np.testing.assert_allclose(arcsinh(sinh(self.xs)), self.xs, atol=1e-3)
def test_transform_to_2hot(self):
y = transforms.transform_to_2hot(
scalar=jnp.array(TWO_HOT_SCALARS),
min_value=-1.0,
max_value=1.0,
num_bins=TWO_HOT_BINS)
np.testing.assert_allclose(y, np.array(TWO_HOT_PROBABILITIES), atol=1e-4)
def test_transform_from_2hot(self):
y = transforms.transform_from_2hot(
probs=jnp.array(TWO_HOT_PROBABILITIES),
min_value=-1.0,
max_value=1.0,
num_bins=TWO_HOT_BINS)
np.testing.assert_allclose(
y, np.clip(np.array(TWO_HOT_SCALARS), -1, 1), atol=1e-4)
def test_2hot_roundtrip(self):
min_value = -1.0
max_value = 1.0
num_bins = 11
value = np.arange(min_value, max_value, 0.01)
transformed = transforms.transform_to_2hot(
value, min_value, max_value, num_bins)
restored = transforms.transform_from_2hot(
transformed, min_value, max_value, num_bins)
np.testing.assert_almost_equal(value, restored, decimal=5)
if __name__ == '__main__':
jax.config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| rlax-master | rlax/_src/transforms_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| rlax-master | rlax/_src/__init__.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to support model learning."""
from typing import Optional
import chex
import jax
import jax.numpy as jnp
def extract_subsequences(
trajectories: chex.Array,
start_indices: chex.Array,
subsequence_len: int = 1,
max_valid_start_idx: Optional[int] = None,
) -> chex.Array:
"""Extract (potentially overlapping) subsequences from batch of trajectories.
WARNING: If `max_valid_start_idx` is not provided, or incorrectly set,
the function cannot check the validity of the chosen `start_idx` and no error
will be raised if indexing outside the data boundaries.
Args:
trajectories: A batch of trajectories, shape `[T, B, ...]`.
start_indices: Time indices of start points, shape `[B, num_start_indices]`.
subsequence_len: The length of subsequences extracted from `trajectories`.
max_valid_start_idx: the maximum valid start index, therefore the
`start_indices` should be from {0, ..., max_valid_start_idx}.
Returns:
A batch of subsequences, with
`trajectories[start_indices[i, j]:start_indices[i, j] + n]` for each start
index. Output shape is: `[subsequence_len, B, num_start_indices, ...]`.
"""
if max_valid_start_idx is not None:
min_len = max_valid_start_idx + subsequence_len
traj_len = trajectories.shape[0]
if traj_len < min_len:
raise AssertionError(
f'Expected len >= {min_len}, but trajectories length is: {traj_len}.')
batch_size = start_indices.shape[0]
batch_range = jnp.arange(batch_size)
num_subs = start_indices.shape[1]
idx_arr = jnp.arange(subsequence_len)[:, None, None] * jnp.ones(
(subsequence_len, batch_size, num_subs), dtype=jnp.int32) + start_indices
return trajectories[idx_arr, batch_range[None, :, None], ...]
def sample_start_indices(
rng_key: chex.PRNGKey,
batch_size: int,
num_start_indices: int,
max_valid_start_idx: int
) -> chex.Array:
"""Sampling `batch_size x num_start_indices` starting indices.
Args:
rng_key: a pseudo random number generator's key.
batch_size: the size of the batch of trajectories to index in.
num_start_indices: how many starting points per trajectory in the batch.
max_valid_start_idx: maximum valid time index for all starting points.
Returns:
an array of starting points with shape `[B, num_start_indices]`
"""
@jax.vmap
def _vchoose(key, entries):
return jax.random.choice(
key, entries, shape=(num_start_indices,), replace=False)
rollout_window = jnp.arange(max_valid_start_idx + 1)
return _vchoose(
jax.random.split(rng_key, batch_size),
jnp.tile(rollout_window, (batch_size, 1)))
| rlax-master | rlax/_src/model_learning.py |
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Maximum A Posteriori Policy Optimization (MPO/V-MPO) ops.
Maximum a Posteriori Policy Optimisation.
https://openreview.net/forum?id=S1ANxQW0b
Relative Entropy Regularized Policy Iteration.
https://arxiv.org/abs/1812.02256
V-MPO: On-Policy Maximum a Posteriori Policy Optimization
for Discrete and Continuous Control.
https://openreview.net/forum?id=SylOlp4FvH
Since these functions are calculated per-example (with some aggregation over
all examples), they work with many input shapes as long
as input shapes are consistent across inputs. We use E* to denote the shape of
the examples. For example, E* could be [T, B], [B, T], [T], etc as long as E* is
consistent across all function inputs, and function output shape will also
depend on E*.
"""
import functools
from typing import Callable, NamedTuple, Optional, Sequence, Tuple
from absl import logging
import chex
import jax
import jax.numpy as jnp
from rlax._src import base
Array = chex.Array
Numeric = chex.Numeric
Scalar = chex.Scalar
ProjectionOperatorFn = Callable[[Array], Array]
class LagrangePenalty(NamedTuple):
# Dual variable responsible for modulating the penalty for this constraint.
alpha: Array
# Bound for this constraint.
epsilon: Scalar
# Whether to constrain each dimension separately with its own bound epsilon.
per_dimension: bool = False
class MpoOutputs(NamedTuple):
"""Additional outputs for mpo loss functions."""
# In VMPO temperature loss is computed across all data so should be scalar, in
# MPO this is per example with shape E*
temperature_loss: Numeric
# These losses are per example with shape E*.
policy_loss: Array
kl_loss: Array
alpha_loss: Array
# Other outputs.
normalized_weights: Array
num_samples: Numeric
_EPSILON = 1e-10 # For numerical stability.
_INFINITY = 1e6
def mpo_loss(
sample_log_probs: Array,
sample_q_values: Array,
temperature_constraint: LagrangePenalty,
kl_constraints: Sequence[Tuple[Array, LagrangePenalty]],
projection_operator: ProjectionOperatorFn = functools.partial(
jnp.clip, a_min=_EPSILON),
policy_loss_weight: float = 1.0,
temperature_loss_weight: float = 1.0,
kl_loss_weight: float = 1.0,
alpha_loss_weight: float = 1.0,
sample_axis: int = 0,
use_stop_gradient: bool = True,
) -> Tuple[Array, MpoOutputs]:
"""Implements the MPO loss with a KL bound.
This loss implements the MPO algorithm for policies with a bound for the KL
between the current and target policy.
Note: This is a per-example loss which works on any shape inputs as long as
they are consistent. We denote this shape E* for ease of reference. Args
sample_log_probs and sample_q_values are shape E + an extra sample axis that
contains the sampled actions' log probs and q values respectively. For
example, if sample_axis = 0, the shapes expected will be [S, E*]. Or if
E* = [T, B] and sample_axis = 1, the shapes expected will be [T, S, B].
Args:
sample_log_probs: An array of shape E* + a sample axis inserted at
sample_axis containing the log probabilities of the sampled actions under
the current policy.
sample_q_values: An array of shape E* + a sample axis inserted at
sample_axis containing the q function values evaluated on the sampled
actions.
temperature_constraint: Lagrange constraint for the E-step temperature
optimization.
kl_constraints: KL and variables for applying Lagrangian penalties to bound
them in the M-step, KLs are [E*, A?]. Here A is the action dimension
in the case of per-dimension KL constraints.
projection_operator: Function to project dual variables (temperature and kl
constraint alphas) into the positive range.
policy_loss_weight: Weight for the policy loss.
temperature_loss_weight: Weight for the temperature loss.
kl_loss_weight: Weight for the KL loss.
alpha_loss_weight: Weight for the alpha loss.
sample_axis: Axis in sample_log_probs and sample_q_values that contains the
sampled actions' log probs and q values respectively. For example, if
sample_axis = 0, the shapes expected will be [S, E*]. Or if E* = [T, B]
and sample_axis = 1, the shapes expected will be [T, S, B].
use_stop_gradient: bool indicating whether or not to apply stop gradient.
Returns:
Per example `loss` with shape E*, and additional data including
the components of this loss and the normalized weights in the
AdditionalOutputs.
"""
chex.assert_equal_shape([sample_log_probs, sample_q_values])
chex.assert_rank(temperature_constraint.epsilon, 0)
chex.assert_type([
sample_log_probs, sample_q_values, temperature_constraint.alpha,
temperature_constraint.epsilon], float)
for kl, penalty in kl_constraints:
chex.assert_rank(penalty.epsilon, 0)
chex.assert_type([kl, penalty.alpha, penalty.epsilon], float)
if penalty.per_dimension:
chex.assert_rank(kl, sample_q_values.ndim)
else:
chex.assert_rank(kl, sample_q_values.ndim - 1)
if sample_axis < 0:
sample_axis += sample_q_values.ndim
if not 0 <= sample_axis < sample_q_values.ndim:
raise ValueError(
f"`sample_axis` {sample_axis} not in array rank {sample_q_values.ndim}")
# E-Step. Compute temperature loss, weights, and temperature.
temperature_loss, norm_weights, num_samples = (
mpo_compute_weights_and_temperature_loss(
sample_q_values, temperature_constraint, projection_operator,
sample_axis=sample_axis))
norm_weights = jax.lax.select(
use_stop_gradient, jax.lax.stop_gradient(norm_weights), norm_weights)
# M-Step. Supervised learning on reweighted probabilities using the weights
# from the E-Step under an additional KL constraint.
policy_loss = -jnp.sum(norm_weights * sample_log_probs, axis=sample_axis)
kl_loss, alpha_loss = compute_parametric_kl_penalty_and_dual_loss(
kl_constraints, projection_operator, use_stop_gradient)
chex.assert_equal_shape([policy_loss, kl_loss, alpha_loss])
# Combine all loss components. The final loss is of shape E*.
loss = (policy_loss_weight * policy_loss +
temperature_loss_weight * temperature_loss +
kl_loss_weight * kl_loss +
alpha_loss_weight * alpha_loss)
return loss, MpoOutputs(
temperature_loss=temperature_loss, policy_loss=policy_loss,
kl_loss=kl_loss, alpha_loss=alpha_loss, normalized_weights=norm_weights,
num_samples=num_samples)
def mpo_compute_weights_and_temperature_loss(
sample_q_values: Array,
temperature_constraint: LagrangePenalty,
projection_operator: ProjectionOperatorFn,
sample_axis: int = 0,
) -> Tuple[Array, Array, Scalar]:
"""Computes the weights and temperature loss for MPO.
The E-Step computes a non-parameteric sample-based approximation of the
current policy by reweighting the state-action value function.
Here, we compute this nonparametric policy and optimize the temperature
parameter used in the reweighting.
Args:
sample_q_values: An array of shape E* + a sample axis inserted at
sample_axis containing the q function values evaluated on the sampled
actions.
temperature_constraint: Lagrange constraint for the E-step temperature
optimization.
projection_operator: Function to project temperature into the positive
range.
sample_axis: Axis in sample_q_values containing sampled actions.
Returns:
The temperature loss, normalized weights and number of actions samples per
state.
"""
chex.assert_rank(temperature_constraint.epsilon, 0)
chex.assert_type([sample_q_values, temperature_constraint.alpha,
temperature_constraint.epsilon], float)
if sample_axis < 0:
sample_axis += sample_q_values.ndim
if not 0 <= sample_axis < sample_q_values.ndim:
raise ValueError(
f"`sample_axis` {sample_axis} not in array rank {sample_q_values.ndim}")
n_action_samples = sample_q_values.shape[sample_axis]
# Clip the temperature value (temperature must be positive).
temperature = projection_operator(temperature_constraint.alpha)
epsilon = temperature_constraint.epsilon
# Scale the Q-values.
scaled_sample_q_values = sample_q_values / temperature
# Temperature optimization.
q_logsumexp = jax.scipy.special.logsumexp(
scaled_sample_q_values, axis=sample_axis, keepdims=True)
# The temperature loss encourages the current and previous policy to stay
# close. This loss optimizes the convex dual of an upper bound on the average
# KL (epsilon) between the current and previous state-action values.
temperature_loss = (
temperature * epsilon +
(temperature * (jnp.squeeze(q_logsumexp, axis=sample_axis)
- jnp.log(n_action_samples))))
# The weights corresponds to a softmax over state-action values.
weights = jnp.exp(scaled_sample_q_values - q_logsumexp)
# Normalize the weights before the M-Step
norm_weights = weights / jnp.sum(weights, axis=sample_axis, keepdims=True)
return temperature_loss, norm_weights, n_action_samples
def compute_parametric_kl_penalty_and_dual_loss(
kl_constraints: Sequence[Tuple[Array, LagrangePenalty]],
projection_operator: ProjectionOperatorFn,
use_stop_gradient: bool = True,
) -> Tuple[Array, Array]:
"""Optimize hard KL constraints between the current and previous policies."""
for kl, penalty in kl_constraints:
chex.assert_rank(penalty.epsilon, 0)
chex.assert_type([kl, penalty.alpha, penalty.epsilon], float)
kl_losses, alpha_losses = [], []
for kl, penalty in kl_constraints:
kl_loss, alpha_loss, _ = kl_constraint_loss(
kl, penalty, projection_operator, use_stop_gradient)
kl_losses.append(kl_loss)
alpha_losses.append(alpha_loss)
kl_loss, alpha_loss = sum(kl_losses), sum(alpha_losses)
return kl_loss, alpha_loss
def vmpo_loss(
sample_log_probs: Array,
advantages: Array,
temperature_constraint: LagrangePenalty,
kl_constraints: Sequence[Tuple[Array, LagrangePenalty]],
projection_operator: ProjectionOperatorFn = functools.partial(
jnp.clip, a_min=_EPSILON),
restarting_weights: Optional[Array] = None,
importance_weights: Optional[Array] = None,
top_k_fraction: float = 0.5,
policy_loss_weight: float = 1.0,
temperature_loss_weight: float = 1.0,
kl_loss_weight: float = 1.0,
alpha_loss_weight: float = 1.0,
axis_name: Optional[str] = None,
use_stop_gradient: bool = True,
) -> Tuple[Array, MpoOutputs]:
"""Calculates the V-MPO policy improvement loss.
Note: This is a per-example loss which works on any shape inputs as long as
they are consistent. We denote the shape of the examples E* for ease of
reference.
Args:
sample_log_probs: Log probabilities of actions for each example. Shape E*.
advantages: Advantages for the E-step. Shape E*.
temperature_constraint: Lagrange constraint for the E-step temperature
optimization.
kl_constraints: KL and variables for applying Lagrangian penalties to bound
them in the M-step, KLs are E* or [E*, A]. Here A is the action dimension
in the case of per-dimension KL constraints.
projection_operator: Function to project dual variables (temperature and kl
constraint alphas) into the positive range.
restarting_weights: Optional restarting weights, shape E*, 0 means that this
step is the start of a new episode and we ignore losses at this step
because the agent cannot influence these.
importance_weights: Optional importance weights, shape E*.
top_k_fraction: Fraction of samples to use in the E-step.
policy_loss_weight: Weight for the policy loss.
temperature_loss_weight: Weight for the temperature loss.
kl_loss_weight: Weight for the KL loss.
alpha_loss_weight: Weight for the alpha loss.
axis_name: Optional axis name for `pmap`. If `None`, computations
are performed locally on each device.
use_stop_gradient: bool indicating whether or not to apply stop gradient.
Returns:
Per example `loss` with same shape E* as array inputs, and additional data
including the components of this loss and the normalized weights in the
AdditionalOutputs.
"""
# Define default restarting weights and importance weights.
if restarting_weights is None:
restarting_weights = jnp.ones_like(sample_log_probs)
if importance_weights is None:
importance_weights = jnp.ones_like(sample_log_probs)
# Check shapes.
chex.assert_equal_shape(
[advantages, sample_log_probs, restarting_weights, importance_weights])
chex.assert_rank(temperature_constraint.epsilon, 0)
chex.assert_type([
sample_log_probs, advantages, restarting_weights, importance_weights,
temperature_constraint.alpha, temperature_constraint.epsilon], float)
for kl, penalty in kl_constraints:
chex.assert_rank(penalty.epsilon, 0)
chex.assert_type([kl, penalty.alpha, penalty.epsilon], float)
if penalty.per_dimension:
chex.assert_rank(kl, advantages.ndim + 1)
chex.assert_equal_shape_prefix([kl, advantages], advantages.ndim)
else:
chex.assert_equal_shape([kl, advantages])
# E-step: Calculate the reweighting and the temperature loss.
temperature_loss, norm_weights, num_samples = (
vmpo_compute_weights_and_temperature_loss(
advantages, restarting_weights, importance_weights,
temperature_constraint, projection_operator, top_k_fraction,
axis_name=axis_name, use_stop_gradient=use_stop_gradient))
# M-step: Supervised learning of reweighted trajectories using the weights
# from the E-step, with additional KL constraints.
# The weights are normalized so that the sum is 1. We multiply by the number
# of examples so that we can give a policy loss per example and take the mean,
# and we assume `restarting_weights` are already included.
if axis_name:
num_examples = jax.lax.all_gather(
sample_log_probs, axis_name=axis_name).size
else:
num_examples = sample_log_probs.size
policy_loss = -sample_log_probs * norm_weights * num_examples
kl_loss, alpha_loss = compute_parametric_kl_penalty_and_dual_loss(
kl_constraints, projection_operator, use_stop_gradient)
chex.assert_equal_shape([policy_loss, kl_loss, alpha_loss])
# Calculate the total policy improvement loss.
loss = (policy_loss_weight * policy_loss +
temperature_loss_weight * temperature_loss +
kl_loss_weight * kl_loss +
alpha_loss_weight * alpha_loss)
return loss, MpoOutputs(
temperature_loss=temperature_loss, policy_loss=policy_loss,
kl_loss=kl_loss, alpha_loss=alpha_loss, normalized_weights=norm_weights,
num_samples=num_samples)
def get_top_k_weights(
top_k_fraction: float,
restarting_weights: Array,
scaled_advantages: Array,
axis_name: Optional[str] = None,
use_stop_gradient: bool = True,
) -> Array:
"""Get the weights for the top top_k_fraction of advantages.
Args:
top_k_fraction: The fraction of weights to use.
restarting_weights: Restarting weights, shape E*, 0 means that this step is
the start of a new episode and we ignore losses at this step because the
agent cannot influence these.
scaled_advantages: The advantages for each example (shape E*), scaled by
temperature.
axis_name: Optional axis name for `pmap`. If `None`, computations are
performed locally on each device.
use_stop_gradient: bool indicating whether or not to apply stop gradient.
Returns:
Weights for the top top_k_fraction of advantages
"""
chex.assert_equal_shape([scaled_advantages, restarting_weights])
chex.assert_type([scaled_advantages, restarting_weights], float)
if not 0.0 < top_k_fraction <= 1.0:
raise ValueError(
f"`top_k_fraction` must be in (0, 1], got {top_k_fraction}")
logging.info("[vmpo_e_step] top_k_fraction: %f", top_k_fraction)
if top_k_fraction < 1.0:
# Don't include the restarting samples in the determination of top-k.
valid_scaled_advantages = scaled_advantages - (
1.0 - restarting_weights) * _INFINITY
# Determine the minimum top-k value across all devices,
if axis_name:
all_valid_scaled_advantages = jax.lax.all_gather(
valid_scaled_advantages, axis_name=axis_name)
else:
all_valid_scaled_advantages = valid_scaled_advantages
top_k = int(top_k_fraction * jnp.size(all_valid_scaled_advantages))
if top_k == 0:
raise ValueError(
"top_k_fraction too low to get any valid scaled advantages.")
# TODO(b/160450251): Use jnp.partition(all_valid_scaled_advantages, top_k)
# when this is implemented in jax.
top_k_min = jnp.sort(jnp.reshape(all_valid_scaled_advantages, [-1]))[-top_k]
# Fold the top-k into the restarting weights.
top_k_weights = jnp.greater_equal(valid_scaled_advantages,
top_k_min).astype(jnp.float32)
top_k_weights = jax.lax.select(
use_stop_gradient, jax.lax.stop_gradient(top_k_weights), top_k_weights)
top_k_restarting_weights = restarting_weights * top_k_weights
else:
top_k_restarting_weights = restarting_weights
return top_k_restarting_weights
def vmpo_compute_weights_and_temperature_loss(
advantages: Array,
restarting_weights: Array,
importance_weights: Array,
temperature_constraint: LagrangePenalty,
projection_operator: ProjectionOperatorFn,
top_k_fraction: float,
axis_name: Optional[str] = None,
use_stop_gradient: bool = True,
) -> Tuple[Array, Array, Array]:
"""Computes the weights and temperature loss for V-MPO.
Args:
advantages: Advantages for the E-step. Shape E*.
restarting_weights: Restarting weights, 0 means that this
step is the start of a new episode and we ignore losses at this step
because the agent cannot influence these. Shape E*.
importance_weights: Optional importance weights. Shape E*
temperature_constraint: Lagrange constraint for the E-step temperature
optimization.
projection_operator: Function to project dual variables (temperature and kl
constraint alphas) into the positive range.
top_k_fraction: Fraction of samples to use in the E-step.
axis_name: Optional axis name for `pmap` or 'vmap'. If `None`, computations
are performed locally on each device.
use_stop_gradient: bool indicating whether or not to apply stop gradient.
Returns:
The temperature loss, normalized weights and number of samples used.
"""
chex.assert_equal_shape([advantages, restarting_weights, importance_weights])
chex.assert_rank(temperature_constraint.epsilon, 0)
chex.assert_type([
advantages, restarting_weights, importance_weights,
temperature_constraint.alpha, temperature_constraint.epsilon], float)
importance_weights = jax.lax.select(
use_stop_gradient, jax.lax.stop_gradient(importance_weights),
importance_weights)
# Lagrange constraint.
temperature = projection_operator(temperature_constraint.alpha)
epsilon_temperature = temperature_constraint.epsilon
# Scale the advantages.
scaled_advantages = restarting_weights * advantages / temperature
max_scaled_advantage = jnp.max(scaled_advantages)
# If the axis_name is not None find the maximum across all devices.
if axis_name:
assert use_stop_gradient # Cant differentiate through pmax.
max_scaled_advantage = jax.lax.stop_gradient(max_scaled_advantage)
max_scaled_advantage = jax.lax.pmax(
max_scaled_advantage, axis_name=axis_name)
else:
max_scaled_advantage = jax.lax.select(
use_stop_gradient, jax.lax.stop_gradient(max_scaled_advantage),
max_scaled_advantage)
# Maybe don't use all of the advantages.
top_k_restarting_weights = get_top_k_weights(
top_k_fraction, restarting_weights, scaled_advantages, axis_name,
use_stop_gradient)
all_sum = base.AllSum(axis_name)
# Reweight the old trajectories.
unnormalized_weights = (top_k_restarting_weights * importance_weights
* jnp.exp(scaled_advantages - max_scaled_advantage))
# If the axis_name is not None these sums will be taken across all devices.
sum_weights = all_sum(unnormalized_weights) + _EPSILON
num_samples = all_sum(top_k_restarting_weights) + _EPSILON
normalized_weights = unnormalized_weights / sum_weights
normalized_weights = jax.lax.select(use_stop_gradient,
jax.lax.stop_gradient(normalized_weights),
normalized_weights)
# Calculate the temperature loss.
log_mean_weights = (jnp.log(sum_weights) + max_scaled_advantage
- jnp.log(num_samples))
temperature_loss = temperature * (epsilon_temperature + log_mean_weights)
return temperature_loss, normalized_weights, num_samples
def kl_constraint_loss(
kl: Array,
penalty: LagrangePenalty,
projection_operator: ProjectionOperatorFn,
use_stop_gradient: bool = True,
) -> Tuple[Array, Array, Array]:
"""Implements a hard KL constraint.
The optimization proceeds in two phases. First, we optimize the weighting
term `alpha` keeping the KL constant and then we optimize the KL keeping
`alpha` constant. Each phase is implemented by appropriately using a
stop_gradient.
If `bound` - `kl` > 0, then `alpha` is pushed toward `min_alpha`. However
this also means the policy is free to change more since `kl` < `bound `.
This eventually leads to `bound` - `kl` < 0 which pressures alpha to get to
a high positive value. This coordinate ascent results in `kl` staying close
to `bound`.
Args:
kl: The kl per example which is being constrained.
penalty: The dual variable used to impose a penalty and parameters of the
constraint.
projection_operator: Function to project dual variables kl constraint alphas
into the positive range.
use_stop_gradient: bool indicating whether or not to apply stop gradient.
Returns:
A `tuple` consisting of three arrays: `kl_loss`, `alpha_loss` and
`clipped_alpha`. The first two terms represent the losses for the two phases
of computation. `clipped_alpha` is the clipped lagrangian multiplier `alpha`
that is learnt.
"""
chex.assert_type([kl, penalty.alpha, penalty.epsilon], float)
alpha = projection_operator(penalty.alpha)
alpha_constant = jax.lax.select(
use_stop_gradient, jax.lax.stop_gradient(alpha), alpha)
# First step: Optimize w.r.t. alphas
alpha_loss = alpha * (
penalty.epsilon -
jax.lax.select(use_stop_gradient, jax.lax.stop_gradient(kl), kl))
# Second step: KL loss.
kl_loss = alpha_constant * kl
# If kl_loss and alpha_loss are per dimension then at this point sum over
# dimensions.
if penalty.per_dimension:
kl_loss = jnp.sum(kl_loss, axis=-1)
alpha_loss = jnp.sum(alpha_loss, axis=-1)
return kl_loss, alpha_loss, alpha
def kl_alpha_loss(
restarting_weights: Array,
kl_constraints: Sequence[Tuple[Array, LagrangePenalty]] = (),
axis_name: Optional[str] = None):
"""Calculates the losses for multiple KL constraints.
Args:
restarting_weights: Restarting weights, shape E*, 0 means that this step is
the start of a new episode and we ignore losses at this step because the
agent cannot influence these.
kl_constraints: KL and variables for applying Lagrangian penalties to bound
them in the M-step, KLs are [E*, A?]. Here A is the action dimension
in the case of per-dimension KL constraints.
axis_name: Optional axis name for `pmap`. If `None`, computations are
performed locally on each device.
Returns:
The kl loss and dual variable loss both shape E*.
"""
chex.assert_type(restarting_weights, float)
if kl_constraints:
for kl, penalty in kl_constraints:
chex.assert_rank(penalty.epsilon, 0)
chex.assert_type([kl, penalty.alpha, penalty.epsilon], float)
chex.assert_equal_shape_prefix([kl, restarting_weights],
restarting_weights.ndim)
# Implement decoupled KL constraints.
kl_alpha_losses = [kl_constraint_loss(kl, penalty, lambda x: x)[:2]
for kl, penalty in kl_constraints]
kl_loss, alpha_loss = [sum(losses) for losses in zip(*kl_alpha_losses)]
all_sum = base.AllSum(axis_name)
num_samples = all_sum(restarting_weights) + _EPSILON
# Reshape in case KL is per dimension.
kl_loss = all_sum(kl_loss * restarting_weights) / num_samples
alpha_loss = all_sum(alpha_loss * restarting_weights) / num_samples
else:
# No M-step constraint.
kl_loss = jnp.asarray(0.0)
alpha_loss = jnp.asarray(0.0)
return kl_loss, alpha_loss
| rlax-master | rlax/_src/mpo_ops.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for embedding.py."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import numpy as np
from rlax._src import embedding
class EmbeddingTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._features = np.array([[1., 2.], [3., 2.]])
self._num_actions = 3
self._actions = np.array([1, 2])
self._rewards = np.array([1., 1])
def test_embed_zeros(self):
# Embedding zero feature array.
zero_features = np.zeros_like(self._features)
emb = embedding.embed_oar(zero_features, self._actions, self._rewards,
self._num_actions)
np.testing.assert_array_equal(emb[:, :self._features.shape[-1]],
zero_features)
def test_embed_shape(self):
# Test output shape [T?, B, D+A+1].
emb = embedding.embed_oar(self._features, self._actions, self._rewards,
self._num_actions)
np.testing.assert_array_equal(
emb.shape[-1], self._features.shape[-1] + self._num_actions + 1)
if __name__ == '__main__':
jax.config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| rlax-master | rlax/_src/embedding_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for correct handling of interruptions."""
import chex
import dm_env
import jax.numpy as jnp
def fix_step_type_on_interruptions(step_type: chex.Array):
"""Returns step_type with a LAST step before almost every FIRST step.
If the environment crashes or is interrupted while a trajectory is being
written, the LAST step can be missing before a FIRST step. We add the LAST
step before each FIRST step, if the step before the FIRST step is a MID step,
to signal to the agent that the next observation is not connected to the
current stream of data. Note that the agent must still then appropriately
handle both `terminations` (e.g. game over in a game) and `interruptions` (a
timeout or a reset for system maintenance): the value of the discount on LAST
step will be > 0 on `interruptions`, while it will be 0 on `terminations`.
Similar issues arise in hierarchical RL systems as well.
Args:
step_type: an array of `dm_env` step types, with shape `[T, B]`.
Returns:
Fixed step_type.
"""
chex.assert_rank(step_type, 2)
next_step_type = jnp.concatenate([
step_type[1:],
jnp.full(
step_type[:1].shape, int(dm_env.StepType.MID), dtype=step_type.dtype),
],
axis=0)
return jnp.where(
jnp.logical_and(
jnp.equal(next_step_type, int(dm_env.StepType.FIRST)),
jnp.equal(step_type, int(dm_env.StepType.MID)),
), int(dm_env.StepType.LAST), step_type)
| rlax-master | rlax/_src/interruptions.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX functions for implementing forms of gradient clipping.
Gradient clipping is commonly used to avoid taking too large steps in parameter
space when updating the parameters of an agent's policy, value or model. Certain
forms of gradient clipping can be conveniently expressed as transformations of
the loss function optimized by a suitable gradient descent algorithm.
"""
import chex
import jax
import jax.numpy as jnp
from jax.tree_util import tree_map
Array = chex.Array
def huber_loss(x: Array, delta: float = 1.) -> Array:
"""Huber loss, similar to L2 loss close to zero, L1 loss away from zero.
See "Robust Estimation of a Location Parameter" by Huber.
(https://projecteuclid.org/download/pdf_1/euclid.aoms/1177703732).
Args:
x: a vector of arbitrary shape.
delta: the bounds for the huber loss transformation, defaults at 1.
Note `grad(huber_loss(x))` is equivalent to `grad(0.5 * clip_gradient(x)**2)`.
Returns:
a vector of same shape of `x`.
"""
chex.assert_type(x, float)
# 0.5 * x^2 if |x| <= d
# 0.5 * d^2 + d * (|x| - d) if |x| > d
abs_x = jnp.abs(x)
quadratic = jnp.minimum(abs_x, delta)
# Same as max(abs_x - delta, 0) but avoids potentially doubling gradient.
linear = abs_x - quadratic
return 0.5 * quadratic**2 + delta * linear
@jax.custom_gradient
def clip_gradient(x, gradient_min: float, gradient_max: float):
"""Identity but the gradient in the backward pass is clipped.
See "Human-level control through deep reinforcement learning" by Mnih et al,
(https://www.nature.com/articles/nature14236)
Note `grad(0.5 * clip_gradient(x)**2)` is equivalent to `grad(huber_loss(x))`.
Note: x cannot be properly annotated because pytype does not support recursive
types; we would otherwise use the chex.ArrayTree pytype annotation here. Most
often x will be a single array of arbitrary shape, but the implementation
supports pytrees.
Args:
x: a pytree of arbitrary shape.
gradient_min: min elementwise size of the gradient.
gradient_max: max elementwise size of the gradient.
Returns:
a vector of same shape of `x`.
"""
chex.assert_type(x, float)
def _compute_gradient(g):
return (tree_map(lambda g: jnp.clip(g, gradient_min, gradient_max),
g), 0., 0.)
return x, _compute_gradient
| rlax-master | rlax/_src/clipping.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX functions for working with probability distributions.
Reinforcement learning algorithms often require to estimate suitably
parametrized probability distributions. In this subpackage a distribution is
represented as a collection of functions that may be used to sample from a
distribution, to evaluate its probability mass (or density) function, and
to compute statistics such as its entropy.
"""
import collections
import warnings
import chex
import distrax
import jax.numpy as jnp
Array = chex.Array
Numeric = chex.Numeric
DiscreteDistribution = collections.namedtuple(
"DiscreteDistribution", ["sample", "probs", "logprob", "entropy", "kl"])
ContinuousDistribution = collections.namedtuple(
"ContinuousDistribution", ["sample", "prob", "logprob", "entropy",
"kl_to_standard_normal", "kl"])
def categorical_sample(key, probs):
"""Sample from a set of discrete probabilities."""
warnings.warn(
"Rlax categorical_sample will be deprecated. "
"Please use distrax.Categorical.sample instead.",
PendingDeprecationWarning, stacklevel=2
)
return distrax.Categorical(probs=probs).sample(seed=key)
def softmax(temperature=1.):
"""A softmax distribution."""
warnings.warn(
"Rlax softmax will be deprecated. "
"Please use distrax.Softmax instead.",
PendingDeprecationWarning, stacklevel=2
)
def sample_fn(key: Array, logits: Array):
return distrax.Softmax(logits, temperature).sample(seed=key)
def probs_fn(logits: Array):
return distrax.Softmax(logits, temperature).probs
def logprob_fn(sample: Array, logits: Array):
return distrax.Softmax(logits, temperature).log_prob(sample)
def entropy_fn(logits: Array):
return distrax.Softmax(logits, temperature).entropy()
def kl_fn(p_logits: Array, q_logits: Array):
return categorical_kl_divergence(p_logits, q_logits, temperature)
return DiscreteDistribution(sample_fn, probs_fn, logprob_fn, entropy_fn,
kl_fn)
def clipped_entropy_softmax(temperature=1., entropy_clip=1.):
"""A softmax distribution with clipped entropy (1 is eq to not clipping)."""
warnings.warn(
"Rlax clipped_entropy_softmax will be deprecated. "
"Please use distrax.Softmax instead.",
PendingDeprecationWarning, stacklevel=2
)
def sample_fn(key: Array, logits: Array, action_spec=None):
del action_spec
return distrax.Softmax(logits, temperature).sample(seed=key)
def probs_fn(logits: Array, action_spec=None):
del action_spec
return distrax.Softmax(logits, temperature).probs
def logprob_fn(sample: Array, logits: Array, action_spec=None):
del action_spec
return distrax.Softmax(logits, temperature).log_prob(sample)
def entropy_fn(logits: Array):
return jnp.minimum(
distrax.Softmax(logits, temperature).entropy(),
entropy_clip * jnp.log(logits.shape[-1]))
def kl_fn(p_logits: Array, q_logits: Array):
return categorical_kl_divergence(p_logits, q_logits, temperature)
return DiscreteDistribution(sample_fn, probs_fn, logprob_fn, entropy_fn,
kl_fn)
def greedy():
"""A greedy distribution."""
warnings.warn(
"Rlax greedy will be deprecated. "
"Please use distrax.Greedy instead.",
PendingDeprecationWarning, stacklevel=2
)
def sample_fn(key: Array, preferences: Array):
return distrax.Greedy(preferences).sample(seed=key)
def probs_fn(preferences: Array):
return distrax.Greedy(preferences).probs
def log_prob_fn(sample: Array, preferences: Array):
return distrax.Greedy(preferences).log_prob(sample)
def entropy_fn(preferences: Array):
return distrax.Greedy(preferences).entropy()
return DiscreteDistribution(sample_fn, probs_fn, log_prob_fn, entropy_fn,
None)
def epsilon_greedy(epsilon=None):
"""An epsilon-greedy distribution."""
warnings.warn(
"Rlax epsilon_greedy will be deprecated. "
"Please use distrax.EpsilonGreedy instead.",
PendingDeprecationWarning, stacklevel=2
)
def sample_fn(key: Array, preferences: Array, epsilon=epsilon):
return distrax.EpsilonGreedy(preferences, epsilon).sample(seed=key)
def probs_fn(preferences: Array, epsilon=epsilon):
return distrax.EpsilonGreedy(preferences, epsilon).probs
def logprob_fn(sample: Array, preferences: Array, epsilon=epsilon):
return distrax.EpsilonGreedy(preferences, epsilon).log_prob(sample)
def entropy_fn(preferences: Array, epsilon=epsilon):
return distrax.EpsilonGreedy(preferences, epsilon).entropy()
return DiscreteDistribution(sample_fn, probs_fn, logprob_fn, entropy_fn, None)
def gaussian_diagonal(sigma=None):
"""A gaussian distribution with diagonal covariance matrix."""
warnings.warn(
"Rlax gaussian_diagonal will be deprecated. "
"Please use distrax MultivariateNormalDiag instead.",
PendingDeprecationWarning, stacklevel=2
)
def sample_fn(key: Array, mu: Array, sigma: Array = sigma):
return distrax.MultivariateNormalDiag(
mu, jnp.ones_like(mu) * sigma).sample(seed=key)
def prob_fn(sample: Array, mu: Array, sigma: Array = sigma):
return distrax.MultivariateNormalDiag(
mu, jnp.ones_like(mu) * sigma).prob(sample)
def logprob_fn(sample: Array, mu: Array, sigma: Array = sigma):
return distrax.MultivariateNormalDiag(
mu, jnp.ones_like(mu) * sigma).log_prob(sample)
def entropy_fn(mu: Array, sigma: Array = sigma):
return distrax.MultivariateNormalDiag(
mu, jnp.ones_like(mu) * sigma).entropy()
def kl_to_standard_normal_fn(mu: Array, sigma: Array = sigma):
return distrax.MultivariateNormalDiag(
mu, jnp.ones_like(mu) * sigma).kl_divergence(
distrax.MultivariateNormalDiag(
jnp.zeros_like(mu), jnp.ones_like(mu)))
def kl_fn(mu_0: Array, sigma_0: Numeric, mu_1: Array, sigma_1: Numeric):
return distrax.MultivariateNormalDiag(
mu_0, jnp.ones_like(mu_0) * sigma_0).kl_divergence(
distrax.MultivariateNormalDiag(mu_1, jnp.ones_like(mu_1) * sigma_1))
return ContinuousDistribution(sample_fn, prob_fn, logprob_fn, entropy_fn,
kl_to_standard_normal_fn, kl_fn)
def squashed_gaussian(sigma_min=-4, sigma_max=0.):
"""A squashed gaussian distribution with diagonal covariance matrix."""
warnings.warn(
"Rlax squashed_gaussian will be deprecated. "
"Please use distrax Transformed MultivariateNormalDiag distribution "
"with chained Tanh/ScalarAffine bijector instead.",
PendingDeprecationWarning, stacklevel=2
)
def sigma_activation(sigma, sigma_min=sigma_min, sigma_max=sigma_max):
return jnp.exp(sigma_min + 0.5 * (sigma_max - sigma_min) *
(jnp.tanh(sigma) + 1.))
def mu_activation(mu):
return jnp.tanh(mu)
def get_squashed_gaussian_dist(mu, sigma, action_spec=None):
if action_spec is not None:
scale = 0.5 * (action_spec.maximum - action_spec.minimum)
shift = action_spec.minimum
bijector = distrax.Chain([distrax.ScalarAffine(shift=shift, scale=scale),
distrax.ScalarAffine(shift=1.0),
distrax.Tanh()])
else:
bijector = distrax.Tanh()
return distrax.Transformed(
distribution=distrax.MultivariateNormalDiag(
loc=mu_activation(mu), scale_diag=sigma_activation(sigma)),
bijector=distrax.Block(bijector, ndims=1))
def sample_fn(key: Array, mu: Array, sigma: Array, action_spec):
return get_squashed_gaussian_dist(mu, sigma, action_spec).sample(seed=key)
def prob_fn(sample: Array, mu: Array, sigma: Array, action_spec):
return get_squashed_gaussian_dist(mu, sigma, action_spec).prob(sample)
def logprob_fn(sample: Array, mu: Array, sigma: Array, action_spec):
return get_squashed_gaussian_dist(mu, sigma, action_spec).log_prob(sample)
def entropy_fn(mu: Array, sigma: Array):
return get_squashed_gaussian_dist(mu, sigma).distribution.entropy()
def kl_to_standard_normal_fn(mu: Array, sigma: Array):
return get_squashed_gaussian_dist(mu, sigma).distribution.kl_divergence(
distrax.MultivariateNormalDiag(
jnp.zeros_like(mu), jnp.ones_like(mu)))
def kl_fn(mu_0: Array, sigma_0: Numeric, mu_1: Array, sigma_1: Numeric):
return get_squashed_gaussian_dist(mu_0, sigma_0).distribution.kl_divergence(
get_squashed_gaussian_dist(mu_1, sigma_1).distribution)
return ContinuousDistribution(sample_fn, prob_fn, logprob_fn, entropy_fn,
kl_to_standard_normal_fn, kl_fn)
def categorical_importance_sampling_ratios(pi_logits_t: Array,
mu_logits_t: Array,
a_t: Array) -> Array:
"""Compute importance sampling ratios from logits.
Args:
pi_logits_t: unnormalized logits at time t for the target policy.
mu_logits_t: unnormalized logits at time t for the behavior policy.
a_t: actions at time t.
Returns:
importance sampling ratios.
"""
warnings.warn(
"Rlax categorical_importance_sampling_ratios will be deprecated. "
"Please use distrax.importance_sampling_ratios instead.",
PendingDeprecationWarning, stacklevel=2
)
return distrax.importance_sampling_ratios(distrax.Categorical(
pi_logits_t), distrax.Categorical(mu_logits_t), a_t)
def categorical_cross_entropy(
labels: Array,
logits: Array
) -> Array:
"""Computes the softmax cross entropy between sets of logits and labels.
See "Deep Learning" by Goodfellow et al.
(http://www.deeplearningbook.org/contents/prob.html). The computation is
equivalent to:
sum_i (labels_i * log_softmax(logits_i))
Args:
labels: a valid probability distribution (non-negative, sum to 1).
logits: unnormalized log probabilities.
Returns:
a scalar loss.
"""
warnings.warn(
"Rlax categorical_cross_entropy will be deprecated. "
"Please use distrax.Categorical.cross_entropy instead.",
PendingDeprecationWarning, stacklevel=2
)
return distrax.Categorical(probs=labels).cross_entropy(
distrax.Categorical(logits=logits))
def categorical_kl_divergence(
p_logits: Array,
q_logits: Array,
temperature: float = 1.
) -> Array:
"""Compute the KL between two categorical distributions from their logits.
Args:
p_logits: unnormalized logits for the first distribution.
q_logits: unnormalized logits for the second distribution.
temperature: the temperature for the softmax distribution, defaults at 1.
Returns:
the kl divergence between the distributions.
"""
warnings.warn(
"Rlax categorical_kl_divergence will be deprecated. "
"Please use distrax.Softmax.kl_divergence instead.",
PendingDeprecationWarning, stacklevel=2
)
return distrax.Softmax(p_logits, temperature).kl_divergence(
distrax.Softmax(q_logits, temperature))
def multivariate_normal_kl_divergence(
mu_0: Array, sigma_0: Numeric, mu_1: Array, sigma_1: Numeric,
) -> Array:
"""Compute the KL between 2 gaussian distrs with diagonal covariance matrices.
Args:
mu_0: array like of mean values for policy 0
sigma_0: array like of std values for policy 0
mu_1: array like of mean values for policy 1
sigma_1: array like of std values for policy 1
Returns:
the kl divergence between the distributions.
"""
warnings.warn(
"Rlax multivariate_normal_kl_divergence will be deprecated."
"Please use distrax.MultivariateNormalDiag.kl_divergence instead.",
PendingDeprecationWarning, stacklevel=2
)
return distrax.MultivariateNormalDiag(mu_0, sigma_0).kl_divergence(
distrax.MultivariateNormalDiag(mu_1, sigma_1))
| rlax-master | rlax/_src/distributions.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX functions to construct and learn generalized value functions.
According to the reward hypothesis (Sutton et al., 2018) any goal might be
formulated as a suitable scalar `cumulant` to be maximized. Generalized value
functions (Sutton et al. 2011) extend the notion of value functions to include
estimates of discounted sums of `cumulants` different from the main task reward.
"""
import chex
import jax.numpy as jnp
Array = chex.Array
def pixel_control_rewards(
observations: Array,
cell_size: int,
) -> Array:
"""Calculates cumulants for pixel control tasks from an observation sequence.
The observations are first split in a grid of KxK cells. For each cell a
distinct pseudo reward is computed as the average absolute change in pixel
intensity across all pixels in the cell. The change in intensity is averaged
across both pixels and channels (e.g. RGB).
The `observations` provided to this function should be cropped suitably, to
ensure that the observations' height and width are a multiple of `cell_size`.
The values of the `observations` tensor should be rescaled to [0, 1].
See "Reinforcement Learning with Unsupervised Auxiliary Tasks" by Jaderberg,
Mnih, Czarnecki et al. (https://arxiv.org/abs/1611.05397).
Args:
observations: A tensor of shape `[T+1,H,W,C]`, where
* `T` is the sequence length,
* `H` is height,
* `W` is width,
* `C` is a channel dimension.
cell_size: The size of each cell.
Returns:
A tensor of pixel control rewards calculated from the observation. The
shape is `[T,H',W']`, where `H'=H/cell_size` and `W'=W/cell_size`.
"""
chex.assert_rank(observations, 4)
chex.assert_type(observations, float)
# Shape info.
h = observations.shape[1] // cell_size # new height.
w = observations.shape[2] // cell_size # new width.
# Calculate the absolute differences across the sequence.
abs_diff = jnp.abs(observations[1:] - observations[:-1])
# Average within cells to get the cumulants.
abs_diff = abs_diff.reshape(
(-1, h, cell_size, w, cell_size, observations.shape[3]))
return abs_diff.mean(axis=(2, 4, 5))
def feature_control_rewards(
features: Array,
cumulant_type='absolute_change',
discount=None,
) -> Array:
"""Calculates cumulants for feature control tasks from a sequence of features.
For each feature dimension, a distinct pseudo reward is computed based on the
change in the feature value between consecutive timesteps. Depending on
`cumulant_type`, cumulants may be equal the features themselves, the absolute
difference between their values in consecutive steps, their increase/decrease,
or may take the form of a potential-based reward discounted by `discount`.
See "Reinforcement Learning with Unsupervised Auxiliary Tasks" by Jaderberg,
Mnih, Czarnecki et al. (https://arxiv.org/abs/1611.05397).
Args:
features: A tensor of shape `[T+1,D]` of features.
cumulant_type: either 'feature' (feature is the reward), `absolute_change`
(the reward equals the absolute difference between consecutive
timesteps), `increase` (the reward equals the increase in the
value of the feature), `decrease` (the reward equals the decrease in the
value of the feature), or 'potential' (r=gamma*phi_{t+1} - phi_t).
discount: (optional) discount for potential based rewards.
Returns:
A tensor of cumulants calculated from the features. The shape is `[T,D]`.
"""
chex.assert_rank(features, 2)
chex.assert_type(features, float)
if cumulant_type == 'feature':
return features[1:]
elif cumulant_type == 'absolute_change':
return jnp.abs(features[1:] - features[:-1])
elif cumulant_type == 'increase':
return features[1:] - features[:-1]
elif cumulant_type == 'decrease':
return features[:-1] - features[1:]
elif cumulant_type == 'potential':
return discount * features[1:] - features[:-1]
else:
raise ValueError(f'Unknown cumulant_type {cumulant_type}')
| rlax-master | rlax/_src/general_value_functions.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX functions for V-Trace algorithm.
V-Trace is a form of importance sampling correction that was introduced by
Espeholt et al. in the context of an off-policy actor-critic agent (IMPALA).
This subpackage implements the specific targets used in IMPALA to implement
both the value and the policy. Note however that the V-Trace return estimate is
a special case of the multistep return estimates from `multistep.py`.
"""
import collections
import chex
import jax
import jax.numpy as jnp
Array = chex.Array
Numeric = chex.Numeric
VTraceOutput = collections.namedtuple(
'vtrace_output', ['errors', 'pg_advantage', 'q_estimate'])
def vtrace(
v_tm1: Array,
v_t: Array,
r_t: Array,
discount_t: Array,
rho_tm1: Array,
lambda_: Numeric = 1.0,
clip_rho_threshold: float = 1.0,
stop_target_gradients: bool = True,
) -> Array:
"""Calculates V-Trace errors from importance weights.
V-trace computes TD-errors from multistep trajectories by applying
off-policy corrections based on clipped importance sampling ratios.
See "IMPALA: Scalable Distributed Deep-RL with Importance Weighted Actor
Learner Architectures" by Espeholt et al. (https://arxiv.org/abs/1802.01561).
Args:
v_tm1: values at time t-1.
v_t: values at time t.
r_t: reward at time t.
discount_t: discount at time t.
rho_tm1: importance sampling ratios at time t-1.
lambda_: mixing parameter; a scalar or a vector for timesteps t.
clip_rho_threshold: clip threshold for importance weights.
stop_target_gradients: whether or not to apply stop gradient to targets.
Returns:
V-Trace error.
"""
chex.assert_rank([v_tm1, v_t, r_t, discount_t, rho_tm1, lambda_],
[1, 1, 1, 1, 1, {0, 1}])
chex.assert_type([v_tm1, v_t, r_t, discount_t, rho_tm1, lambda_],
[float, float, float, float, float, float])
chex.assert_equal_shape([v_tm1, v_t, r_t, discount_t, rho_tm1])
# Clip importance sampling ratios.
c_tm1 = jnp.minimum(1.0, rho_tm1) * lambda_
clipped_rhos_tm1 = jnp.minimum(clip_rho_threshold, rho_tm1)
# Compute the temporal difference errors.
td_errors = clipped_rhos_tm1 * (r_t + discount_t * v_t - v_tm1)
# Work backwards computing the td-errors.
def _body(acc, xs):
td_error, discount, c = xs
acc = td_error + discount * c * acc
return acc, acc
_, errors = jax.lax.scan(
_body, 0.0, (td_errors, discount_t, c_tm1), reverse=True)
# Return errors, maybe disabling gradient flow through bootstrap targets.
return jax.lax.select(
stop_target_gradients,
jax.lax.stop_gradient(errors + v_tm1) - v_tm1,
errors)
def leaky_vtrace(
v_tm1: Array,
v_t: Array,
r_t: Array,
discount_t: Array,
rho_tm1: Array,
alpha_: float = 1.0,
lambda_: Numeric = 1.0,
clip_rho_threshold: float = 1.0,
stop_target_gradients: bool = True):
"""Calculates Leaky V-Trace errors from importance weights.
Leaky-Vtrace is a combination of Importance sampling and V-trace, where the
degree of mixing is controlled by a scalar `alpha` (that may be meta-learnt).
See "Self-Tuning Deep Reinforcement Learning"
by Zahavy et al. (https://arxiv.org/abs/2002.12928)
Args:
v_tm1: values at time t-1.
v_t: values at time t.
r_t: reward at time t.
discount_t: discount at time t.
rho_tm1: importance weights at time t-1.
alpha_: mixing parameter for Importance Sampling and V-trace.
lambda_: mixing parameter; a scalar or a vector for timesteps t.
clip_rho_threshold: clip threshold for importance weights.
stop_target_gradients: whether or not to apply stop gradient to targets.
Returns:
Leaky V-Trace error.
"""
chex.assert_rank([v_tm1, v_t, r_t, discount_t, rho_tm1, lambda_],
[1, 1, 1, 1, 1, {0, 1}])
chex.assert_type([v_tm1, v_t, r_t, discount_t, rho_tm1, lambda_],
[float, float, float, float, float, float])
chex.assert_equal_shape([v_tm1, v_t, r_t, discount_t, rho_tm1])
# Mix clipped and unclipped importance sampling ratios.
c_tm1 = (
(1 - alpha_) * rho_tm1 + alpha_ * jnp.minimum(1.0, rho_tm1)) * lambda_
clipped_rhos_tm1 = (
(1 - alpha_) * rho_tm1 + alpha_ * jnp.minimum(clip_rho_threshold, rho_tm1)
)
# Compute the temporal difference errors.
td_errors = clipped_rhos_tm1 * (r_t + discount_t * v_t - v_tm1)
# Work backwards computing the td-errors.
def _body(acc, xs):
td_error, discount, c = xs
acc = td_error + discount * c * acc
return acc, acc
_, errors = jax.lax.scan(
_body, 0.0, (td_errors, discount_t, c_tm1), reverse=True)
# Return errors, maybe disabling gradient flow through bootstrap targets.
return jax.lax.select(
stop_target_gradients,
jax.lax.stop_gradient(errors + v_tm1) - v_tm1,
errors)
def vtrace_td_error_and_advantage(
v_tm1: Array,
v_t: Array,
r_t: Array,
discount_t: Array,
rho_tm1: Array,
lambda_: Numeric = 1.0,
clip_rho_threshold: float = 1.0,
clip_pg_rho_threshold: float = 1.0,
stop_target_gradients: bool = True,
) -> VTraceOutput:
"""Calculates V-Trace errors and PG advantage from importance weights.
This functions computes the TD-errors and policy gradient Advantage terms
as used by the IMPALA distributed actor-critic agent.
See "IMPALA: Scalable Distributed Deep-RL with Importance Weighted Actor
Learner Architectures" by Espeholt et al. (https://arxiv.org/abs/1802.01561)
Args:
v_tm1: values at time t-1.
v_t: values at time t.
r_t: reward at time t.
discount_t: discount at time t.
rho_tm1: importance weights at time t-1.
lambda_: mixing parameter; a scalar or a vector for timesteps t.
clip_rho_threshold: clip threshold for importance ratios.
clip_pg_rho_threshold: clip threshold for policy gradient importance ratios.
stop_target_gradients: whether or not to apply stop gradient to targets.
Returns:
a tuple of V-Trace error, policy gradient advantage, and estimated Q-values.
"""
chex.assert_rank([v_tm1, v_t, r_t, discount_t, rho_tm1, lambda_],
[1, 1, 1, 1, 1, {0, 1}])
chex.assert_type([v_tm1, v_t, r_t, discount_t, rho_tm1, lambda_],
[float, float, float, float, float, float])
chex.assert_equal_shape([v_tm1, v_t, r_t, discount_t, rho_tm1])
# If scalar make into vector.
lambda_ = jnp.ones_like(discount_t) * lambda_
errors = vtrace(
v_tm1, v_t, r_t, discount_t, rho_tm1,
lambda_, clip_rho_threshold, stop_target_gradients)
targets_tm1 = errors + v_tm1
q_bootstrap = jnp.concatenate([
lambda_[:-1] * targets_tm1[1:] + (1 - lambda_[:-1]) * v_tm1[1:],
v_t[-1:],
], axis=0)
q_estimate = r_t + discount_t * q_bootstrap
clipped_pg_rho_tm1 = jnp.minimum(clip_pg_rho_threshold, rho_tm1)
pg_advantages = clipped_pg_rho_tm1 * (q_estimate - v_tm1)
return VTraceOutput(
errors=errors, pg_advantage=pg_advantages, q_estimate=q_estimate)
def leaky_vtrace_td_error_and_advantage(
v_tm1: chex.Array,
v_t: chex.Array,
r_t: chex.Array,
discount_t: chex.Array,
rho_tm1: chex.Array,
alpha: float = 1.0,
lambda_: Numeric = 1.0,
clip_rho_threshold: float = 1.0,
clip_pg_rho_threshold: float = 1.0,
stop_target_gradients: bool = True,
) -> VTraceOutput:
"""Calculates Leaky V-Trace errors and PG advantage from importance weights.
This functions computes the Leaky V-Trace TD-errors and policy gradient
Advantage terms as used by the IMPALA distributed actor-critic agent.
Leaky-Vtrace is a combination of Importance sampling and V-trace, where the
degree of mixing is controlled by a scalar `alpha` (that may be meta-learnt).
See "Self-Tuning Deep Reinforcement Learning"
by Zahavy et al. (https://arxiv.org/abs/2002.12928) and
"IMPALA: Scalable Distributed Deep-RL with Importance Weighted Actor
Learner Architectures" by Espeholt et al. (https://arxiv.org/abs/1802.01561)
Args:
v_tm1: values at time t-1.
v_t: values at time t.
r_t: reward at time t.
discount_t: discount at time t.
rho_tm1: importance weights at time t-1.
alpha: mixing the clipped importance sampling weights with unclipped ones.
lambda_: mixing parameter; a scalar or a vector for timesteps t.
clip_rho_threshold: clip threshold for importance ratios.
clip_pg_rho_threshold: clip threshold for policy gradient importance ratios.
stop_target_gradients: whether or not to apply stop gradient to targets.
Returns:
a tuple of V-Trace error, policy gradient advantage, and estimated Q-values.
"""
chex.assert_rank([v_tm1, v_t, r_t, discount_t, rho_tm1, lambda_],
[1, 1, 1, 1, 1, {0, 1}])
chex.assert_type([v_tm1, v_t, r_t, discount_t, rho_tm1, lambda_],
[float, float, float, float, float, float])
chex.assert_equal_shape([v_tm1, v_t, r_t, discount_t, rho_tm1])
# If scalar make into vector.
lambda_ = jnp.ones_like(discount_t) * lambda_
errors = leaky_vtrace(
v_tm1, v_t, r_t, discount_t, rho_tm1, alpha,
lambda_, clip_rho_threshold, stop_target_gradients)
targets_tm1 = errors + v_tm1
q_bootstrap = jnp.concatenate([
lambda_[:-1] * targets_tm1[1:] + (1 - lambda_[:-1]) * v_tm1[1:],
v_t[-1:],
], axis=0)
q_estimate = r_t + discount_t * q_bootstrap
clipped_pg_rho_tm1 = ((1 - alpha) * rho_tm1 + alpha *
jnp.minimum(clip_pg_rho_threshold, rho_tm1))
pg_advantages = clipped_pg_rho_tm1 * (q_estimate - v_tm1)
return VTraceOutput(
errors=errors, pg_advantage=pg_advantages, q_estimate=q_estimate)
| rlax-master | rlax/_src/vtrace.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for exploration.py."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import numpy as np
from rlax._src import exploration
class GaussianTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._num_actions = 3
self._rng_key = jax.random.PRNGKey(42)
@chex.all_variants()
def test_deterministic(self):
"""Check that noisy and noisless actions match for zero stddev."""
add_noise = self.variant(exploration.add_gaussian_noise)
# Test that noisy and noisless actions match for zero stddev
for _ in range(10):
action = np.random.normal(0., 1., self._num_actions)
# Test output.
self._rng_key, key = jax.random.split(self._rng_key)
noisy_action = add_noise(key, action, 0.)
np.testing.assert_allclose(action, noisy_action)
class OrnsteinUhlenbeckTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._num_actions = 3
self._rng_key = jax.random.PRNGKey(42)
@chex.all_variants()
def test_deterministic(self):
"""Check that noisy and noisless actions match for zero stddev."""
add_noise = self.variant(exploration.add_ornstein_uhlenbeck_noise)
# Test that noisy and noisless actions match for zero stddev
noise_tm1 = np.zeros((self._num_actions,))
for _ in range(10):
action = np.random.normal(0., 1., self._num_actions)
# Test output.
self._rng_key, key = jax.random.split(self._rng_key)
noisy_action = add_noise(key, action, noise_tm1, 1., 0.)
noise_tm1 = action - noisy_action
np.testing.assert_allclose(action, noisy_action)
class DirichletNoiseTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._batch_size = 5
self._num_actions = 10
self._rng_key = jax.random.PRNGKey(42)
@chex.all_variants()
def test_deterministic(self):
"""Check that noisy and noisless actions match for zero stddev."""
add_noise = self.variant(exploration.add_dirichlet_noise)
# Test that noisy and noisless actions match for zero Dirichlet noise
for _ in range(10):
prior = np.random.normal(0., 1., (self._batch_size, self._num_actions))
# Test output.
self._rng_key, key = jax.random.split(self._rng_key)
noisy_prior = add_noise(
key, prior, dirichlet_alpha=0.3, dirichlet_fraction=0.)
np.testing.assert_allclose(prior, noisy_prior)
class EMIntrinsicRewardTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.num_neighbors = 2
self.reward_scale = 1.
@chex.all_variants()
def test_novelty_reward(self):
"""Check reward is higher for novel embed than those identical to memory."""
@self.variant
def episodic_memory_intrinsic_rewards(embeddings, reward_scale):
return exploration.episodic_memory_intrinsic_rewards(
embeddings, self.num_neighbors, reward_scale, max_memory_size=10)
# Memory starts out as all zeros, if we try to add more zeros we should get
# a lower reward than if we try to add 2 novel embeddings.
identical_embeddings = np.array([[0., 0.], [0., 0.]])
novel_embeddings = np.array([[1.3, 2.7], [-10.4, 16.01]])
low_reward, state = episodic_memory_intrinsic_rewards(
identical_embeddings, self.reward_scale)
np.testing.assert_equal(np.array(state.distance_sum), 0)
high_reward, _ = episodic_memory_intrinsic_rewards(
novel_embeddings, self.reward_scale)
np.testing.assert_array_less(low_reward, high_reward)
@chex.all_variants()
def test_custom_memory(self):
"""Check that embeddings are added appropriately to a custom memory."""
@self.variant
def episodic_memory_intrinsic_rewards(embeddings, memory, reward_scale):
return exploration.episodic_memory_intrinsic_rewards(
embeddings, self.num_neighbors, reward_scale,
exploration.IntrinsicRewardState(memory=memory, next_memory_index=2),
max_memory_size=4)
embeddings = np.array([[2., 2.], [3., 3.], [4., 4.]])
memory = np.array([[-1., -1.,], [1., 1.], [0., 0.], [0., 0.]])
_, intrinsic_reward_state = episodic_memory_intrinsic_rewards(
embeddings, memory, self.reward_scale)
np.testing.assert_array_equal(
intrinsic_reward_state.memory,
# Embeddings should have been added in a ring buffer way.
np.array([[4., 4.,], [1., 1.], [2., 2.], [3., 3.]]))
if __name__ == '__main__':
jax.config.update('jax_numpy_rank_promotion', 'raise')
absltest.main()
| rlax-master | rlax/_src/exploration_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.