python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers for Perceiver IO and HiP construction."""
import enum
import math
from typing import Any, List, Optional, Sequence, Tuple
import chex
from einshape import jax_einshape as einshape
import haiku as hk
import jax
from jax import numpy as jnp
import numpy as np
@enum.unique
class ModelOutputKeys(str, enum.Enum):
INPUT_RECONSTRUCTION = 'input_reconstruction'
LATENTS = 'latents'
def padding_to_make_divisible(index_dim: int, num_groups: int) -> int:
return num_groups * math.ceil(index_dim / num_groups) - index_dim
def conv_1d(
output_channels: int,
init_scale: float = 1.0,
with_bias: bool = True,
name: Optional[str] = None) -> hk.Linear:
"""A 1D convolution."""
return hk.Linear(
output_size=output_channels,
with_bias=with_bias,
w_init=hk.initializers.VarianceScaling(init_scale),
name=name)
def f32_softmax(x: chex.Array) -> chex.Array:
if x.dtype in [jnp.bfloat16, jnp.float16]:
return jax.nn.softmax(x.astype(jnp.float32)).astype(x.dtype)
else:
return jax.nn.softmax(x)
def layer_norm(x: chex.Array, name: Optional[str] = None) -> jax.Array:
return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True,
name=name)(x)
def get_activation(activation_name: str) -> Any:
if activation_name == 'sq_relu':
return lambda x: jax.nn.relu(x)**2
else:
return getattr(jax.nn, activation_name)
def attend(q, k, v, dropout_prob=0.0, attention_mask=None):
"""Computes multi-head attention using a query, key and value.
... indicates multiple batch / group dimensions.
Args:
q: Query with shape [..., q_indices, num_heads, head_dim].
k: Key with shape [..., kv_indices, num_heads, head_dim].
v: Value with shape [..., kv_indices, num_heads, head_dim].
dropout_prob: dropout probability on the attention weights.
attention_mask: Array of shape [..., q_indices, kv_indices] indicating
which keys/vals each query can attend to.
Returns:
Output of the attention with shape [..., q_indices, hiddens]
"""
num_head_channels = q.shape[-1]
attention = jnp.einsum('...nhc,...mhc->...hnm', q, k)
attention *= 1. / math.sqrt(num_head_channels)
if attention_mask is not None:
# Use large_k instead of np.NINF because np.NINF breaks for causal-masked
# left-padded sampling. For more, see the colab below.
# //experimental/users/tycai/lrl/NINF_NaN_investigation.ipynb
large_k = jnp.array(1e4 if attention.dtype == jnp.float16 else 1e30,
dtype=attention.dtype)
attention = jnp.where(
# Add a dummy head dimension to the attention mask.
attention_mask[..., None, :, :],
attention,
-large_k)
normalized = f32_softmax(attention)
if dropout_prob > 0:
normalized = hk.dropout(hk.next_rng_key(), dropout_prob, normalized)
summed = jnp.einsum('...hnm,...mhd->...nhd', normalized, v)
# Concatenate heads:
summed = einshape('...nhd->...n(hd)', summed)
if attention_mask is not None:
# Zero out the output of queries that attend to no keys or values.
# -> [..., q_indices, 1]
wipe_attn = jnp.all(attention_mask == 0, axis=-1, keepdims=True)
summed = jnp.where(wipe_attn, jnp.zeros_like(summed), summed)
return summed
def assign_groups_to_modalities(
num_groups: int, index_dim_per_modality: Sequence[int]
) -> Tuple[List[int], int]:
"""Computes the number of groups assigned to each modality."""
num_modalities = len(index_dim_per_modality)
if num_modalities > num_groups:
raise ValueError(
f'{num_modalities} > {num_groups}.'
'Can\'t yet deal with groups that have '
'multiple modalities.')
extra_groups = num_groups - num_modalities
# Assign extra groups to each modality proportionally to the number of points
# it contains (i.e. its index dimension). We do this by greedily assigning
# groups to each modality so that all groups are used and the largest number
# of points assigned to any group is minimized.
num_groups_per_modality = [1] * num_modalities
index_dim_per_group = list(index_dim_per_modality)
for _ in range(extra_groups):
modality = np.argmax(index_dim_per_group)
num_groups_per_modality[modality] += 1
index_dim_per_group[modality] = (
index_dim_per_modality[modality] / num_groups_per_modality[modality])
index_dim_per_group = math.ceil(max(index_dim_per_group))
return num_groups_per_modality, index_dim_per_group
class TrainablePositionEncoding(hk.Module):
"""Trainable position encoding."""
def __init__(self,
index_dim: int,
num_channels: int = 128,
init_scale: float = 1.0,
name: Optional[str] = None):
super().__init__(name=name)
self._index_dim = index_dim
self._num_channels = num_channels
self._init_scale = init_scale
def __call__(self,
batch_size: Optional[int]) -> jnp.ndarray:
pos_embs = hk.get_parameter(
'pos_embs', [self._index_dim, self._num_channels],
init=hk.initializers.VarianceScaling(scale=self._init_scale))
if batch_size is not None:
pos_embs = jnp.broadcast_to(
pos_embs[None, :, :], (batch_size,) + pos_embs.shape)
return pos_embs
class StochasticDepth(hk.Module):
"""Batchwise Dropout used in EfficientNet/NfNet, optionally sans rescaling."""
def __init__(self,
drop_rate: float,
scale_by_keep: bool = False,
name: Optional[str] = None):
super().__init__(name=name)
self.drop_rate = drop_rate
self.scale_by_keep = scale_by_keep
def __call__(self, x: chex.Array, is_training: bool) -> jnp.ndarray:
if not is_training:
return x # pytype: disable=bad-return-type # numpy-scalars
batch_size = x.shape[0]
r = jax.random.uniform(
hk.next_rng_key(),
[batch_size] + [1] * (x.ndim - 1),
dtype=x.dtype)
keep_prob = 1. - self.drop_rate
binary_tensor = jnp.floor(keep_prob + r)
if self.scale_by_keep:
x = x / keep_prob
return x * binary_tensor
class Dense(hk.Module):
"""A Transformer-style dense module to follow attention."""
def __init__(self,
widening_factor: int = 4,
dropout_prob: float = 0.0,
init_scale: float = 1.,
activation_name: str = 'sq_relu',
name: Optional[str] = None):
super().__init__(name=name)
self._widening_factor = widening_factor
self._dropout_prob = dropout_prob
self._init_scale = init_scale
self._activation_name = activation_name
def __call__(self, x: chex.Array, is_training: bool = True) -> chex.Array:
dropout_prob = self._dropout_prob if is_training else 0.0
output_channels = x.shape[-1]
x = conv_1d(
output_channels=self._widening_factor * output_channels,
init_scale=self._init_scale,
name='mlp_hidden_linear')(x)
x = get_activation(self._activation_name)(x)
x = conv_1d(
output_channels=output_channels,
init_scale=self._init_scale,
name='mlp_output_linear')(x)
return hk.dropout(hk.next_rng_key(), dropout_prob, x)
class Attention(hk.Module):
"""Multi-headed {cross, self}-attention."""
def __init__(self,
num_heads: int = 8,
init_scale: float = 1.0,
with_final_bias: bool = True,
dropout_prob: float = 0.0,
qk_channels: Optional[int] = None,
v_channels: Optional[int] = None,
output_channels: Optional[int] = None,
name: Optional[str] = None):
super().__init__(name=name)
self._num_heads = num_heads
self._init_scale = init_scale
self._with_final_bias = with_final_bias
self._dropout_prob = dropout_prob
# If none of these are passed, the Q input determines the output shape:
self._qk_channels = qk_channels
self._v_channels = v_channels
self._output_channels = output_channels
def __call__(self, inputs_q, inputs_kv, attention_mask=None):
# Q and K must have the same number of channels.
# Default to preserving Q's input's shape.
if self._qk_channels is None:
self._qk_channels = inputs_q.shape[-1]
# V's num_channels determines the shape of the output of QKV-attention.
# Default to the same number of channels used in the key-query operation.
if self._v_channels is None:
self._v_channels = self._qk_channels
# Project the output of QKV attention to a desired number of channels.
# Default to the same number as the output of the QKV attention operation.
if self._output_channels is None:
self._output_channels = self._v_channels
assert self._qk_channels % self._num_heads == 0
assert self._v_channels % self._num_heads == 0
qk_channels_per_head = self._qk_channels // self._num_heads
v_channels_per_head = self._v_channels // self._num_heads
# Project QKV to a common feature dimension.
q = conv_1d(
self._qk_channels,
init_scale=self._init_scale,
name='query_linear')(inputs_q)
k = conv_1d(
self._qk_channels,
init_scale=self._init_scale,
name='key_linear')(inputs_kv)
v = conv_1d(
self._v_channels,
init_scale=self._init_scale,
name='value_linear')(inputs_kv)
# Reshape channels for multi-head attention.
q = einshape('...m(hc)->...mhc', q,
h=self._num_heads, c=qk_channels_per_head)
k = einshape('...n(hc)->...nhc', k,
h=self._num_heads, c=qk_channels_per_head)
v = einshape('...n(hd)->...nhd', v,
h=self._num_heads, d=v_channels_per_head)
result = attend(q, k, v, dropout_prob=self._dropout_prob,
attention_mask=attention_mask)
return conv_1d(
self._output_channels,
with_bias=self._with_final_bias,
init_scale=self._init_scale,
name='attention_output_linear')(result)
| hierarchical_perceiver-main | perceiver_helpers.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Install script for setuptools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
from setuptools import find_packages
from setuptools import setup
setup(
name='dm-fast-mapping',
version=imp.load_source('_version',
'dm_fast_mapping/_version.py').__version__,
description=('DeepMind Fast Language Learning Tasks, a set of Unity-based'
'machine-learning research tasks.'),
author='DeepMind',
license='Apache License, Version 2.0',
keywords='reinforcement-learning python machine learning language',
packages=find_packages(exclude=['examples']),
install_requires=[
'absl-py',
'dm-env',
'dm-env-rpc',
'docker',
'grpcio',
'numpy',
'portpicker',
],
tests_require=['nose'],
python_requires='>=3.7',
extras_require={'examples': ['pygame']},
test_suite='nose.collector',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| dm_fast_mapping-master | setup.py |
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Python utility functions for loading DeepMind Fast Language Learning Tasks."""
import codecs
import collections
import json
import os
import re
import subprocess
import time
import typing
from absl import logging
import dm_env
import docker
import grpc
import numpy as np
import portpicker
from dm_env_rpc.v1 import connection as dm_env_rpc_connection
from dm_env_rpc.v1 import dm_env_adaptor
from dm_env_rpc.v1 import dm_env_rpc_pb2
from dm_env_rpc.v1 import error
from dm_env_rpc.v1 import tensor_utils
# Maximum number of times to attempt gRPC connection.
_MAX_CONNECTION_ATTEMPTS = 10
# Port to expect the docker environment to internally listen on.
_DOCKER_INTERNAL_GRPC_PORT = 10000
_DEFAULT_DOCKER_IMAGE_NAME = 'gcr.io/deepmind-environments/dm_fast_mapping:v1.1.1'
_FAST_MAPPING_TASK_OBSERVATIONS = ['RGB_INTERLEAVED', 'TEXT']
FAST_MAPPING_TASK_LEVEL_NAMES = frozenset((
'architecture_comparison/fast_map_three_objs',
'fast_slow/fast_map_three_objs_bed_tray',
'fast_slow/fast_map_three_objs_bed_tray_putting_near',
'fast_slow/fast_map_three_objs_bed_tray_putting_on',
'fast_slow/fast_map_three_objs',
'fast_slow/slow_learn_three_objs_bed_tray_lifting',
'fast_slow/slow_learn_three_objs_bed_tray_putting_near',
'fast_slow/slow_learn_three_objs_bed_tray_putting_on',
'fast_slow/test_holdout_fast_map_three_objs_bed_tray_putting_on',
'fast_slow/two_phase_slow_learn_three_objs_bed_tray_putting_near',
'fast_slow/two_phase_slow_learn_three_objs_bed_tray_putting_on',
'intrinsic_motivation/fast_map_three_objs_no_shaping_reward',
'new_obj_generalization/fast_map_heldout_test_objs',
'new_obj_generalization/fast_map_three_objs_global_five',
'new_obj_generalization/fast_map_three_objs_global_ten',
'new_obj_generalization/fast_map_three_objs_global_three',
'new_obj_generalization/fast_map_three_objs_global_twenty',
'num_generalization/fast_map_eight_objs',
'num_generalization/fast_map_five_objs',
'num_generalization/fast_map_three_objs',
'with_distractors/eval_fast_map_two_episodes_three_objs_five_distractor',
'with_distractors/eval_fast_map_three_episodes_three_objs_five_distractor',
'with_distractors/eval_fast_map_four_episodes_three_objs_no_distractor',
'with_distractors/eval_fast_map_four_episodes_three_objs_one_distractor',
'with_distractors/eval_fast_map_three_objs_ten_distractor',
'with_distractors/eval_fast_map_three_objs_twenty_distractor',
'with_distractors/fast_map_three_objs_no_distractor',
'with_distractors/fast_map_three_objs_one_distractor',
'with_distractors/fast_map_three_objs_two_distractor',
))
_ConnectionDetails = collections.namedtuple('_ConnectionDetails',
['channel', 'connection', 'specs'])
class _FastMappingTasksEnv(dm_env_adaptor.DmEnvAdaptor):
"""An implementation of dm_env_rpc.DmEnvAdaptor for Fast Language Learning tasks."""
def __init__(self, connection_details, requested_observations,
num_action_repeats):
super(_FastMappingTasksEnv,
self).__init__(connection_details.connection,
connection_details.specs, requested_observations)
self._channel = connection_details.channel
self._num_action_repeats = num_action_repeats
def close(self):
super(_FastMappingTasksEnv, self).close()
self._channel.close()
def step(self, action):
"""Implementation of dm_env.step that supports repeated actions."""
timestep = None
discount = None
reward = None
for _ in range(self._num_action_repeats):
next_timestep = super(_FastMappingTasksEnv, self).step(action)
# Accumulate reward per timestep.
if next_timestep.reward is not None:
reward = (reward or 0.) + next_timestep.reward
# Calculate the product for discount.
if next_timestep.discount is not None:
discount = discount if discount else []
discount.append(next_timestep.discount)
timestep = dm_env.TimeStep(next_timestep.step_type, reward,
# Note: np.product(None) returns None.
np.product(discount),
next_timestep.observation)
if timestep.last():
return timestep
return timestep
class _FastMappingTasksContainerEnv(_FastMappingTasksEnv):
"""An implementation of _FastMappingTasksEnv.
Ensures that the provided Docker container is closed on exit.
"""
def __init__(self, connection_details, requested_observations,
num_action_repeats, container):
super(_FastMappingTasksContainerEnv,
self).__init__(connection_details, requested_observations,
num_action_repeats)
self._container = container
def close(self):
super(_FastMappingTasksContainerEnv, self).close()
try:
self._container.kill()
except docker.errors.NotFound:
pass # Ignore, container has already been closed.
class _FastMappingTasksProcessEnv(_FastMappingTasksEnv):
"""An implementation of _FastMappingTasksEnv.
Ensure that the provided running process is closed on exit.
"""
def __init__(self, connection_details, requested_observations,
num_action_repeats, process):
super(_FastMappingTasksProcessEnv,
self).__init__(connection_details, requested_observations,
num_action_repeats)
self._process = process
def close(self):
super(_FastMappingTasksProcessEnv, self).close()
self._process.terminate()
self._process.wait()
def _check_grpc_channel_ready(channel):
"""Helper function to check the gRPC channel is ready N times."""
for _ in range(_MAX_CONNECTION_ATTEMPTS - 1):
try:
return grpc.channel_ready_future(channel).result(timeout=1)
except grpc.FutureTimeoutError:
pass
return grpc.channel_ready_future(channel).result(timeout=1)
def _can_send_message(connection):
"""Returns if `connection` is healthy and able to process requests."""
try:
# This should return a response with an error unless the server isn't yet
# receiving requests.
connection.send(dm_env_rpc_pb2.StepRequest())
except error.DmEnvRpcError:
return True
except grpc.RpcError:
return False
def _create_channel_and_connection(port):
"""Returns a tuple of `(channel, connection)`."""
for _ in range(_MAX_CONNECTION_ATTEMPTS):
channel = grpc.secure_channel('localhost:{}'.format(port),
grpc.local_channel_credentials())
_check_grpc_channel_ready(channel)
connection = dm_env_rpc_connection.Connection(channel)
if _can_send_message(connection):
break
else:
# A gRPC server running within Docker sometimes reports that the channel
# is ready but transitively returns an error (status code 14) on first
# use. Giving the server some time to breath and retrying often fixes the
# problem.
connection.close()
channel.close()
time.sleep(1.0)
return channel, connection
def _parse_exception_message(message):
"""Returns a human-readable version of a dm_env_rpc json error message."""
try:
match = re.match(r'^message\:\ \"(.*)\"$', message)
json_data = codecs.decode(match.group(1), 'unicode-escape')
parsed_json_data = json.loads(json_data)
return ValueError(json.dumps(parsed_json_data, indent=4))
except: # pylint: disable=bare-except
return message
def _wrap_send(send):
"""Wraps `send` in order to reformat exceptions."""
try:
return send()
except ValueError as e:
e.args = [_parse_exception_message(e.args[0])]
raise
def _connect_to_environment(port, settings):
"""Helper function for connecting to a running dm_fast_mapping environment."""
if settings.level_name not in FAST_MAPPING_TASK_LEVEL_NAMES:
raise ValueError(
'Level named "{}" is not a valid dm_fast_mapping level.'.format(
settings.level_name))
channel, connection = _create_channel_and_connection(port)
original_send = connection.send
connection.send = lambda request: _wrap_send(lambda: original_send(request))
world_name = connection.send(
dm_env_rpc_pb2.CreateWorldRequest(
settings={
'seed': tensor_utils.pack_tensor(settings.seed),
'episodeId': tensor_utils.pack_tensor(0),
'levelName': tensor_utils.pack_tensor(settings.level_name),
})).world_name
join_world_settings = {
'width':
tensor_utils.pack_tensor(settings.width),
'height':
tensor_utils.pack_tensor(settings.height),
'EpisodeLengthSeconds':
tensor_utils.pack_tensor(settings.episode_length_seconds),
'ShowReachabilityHUD': tensor_utils.pack_tensor(False),
}
specs = connection.send(
dm_env_rpc_pb2.JoinWorldRequest(
world_name=world_name, settings=join_world_settings)).specs
return _ConnectionDetails(channel=channel, connection=connection, specs=specs)
class EnvironmentSettings(typing.NamedTuple):
"""Collection of settings used to start a specific Fast Language Learning task.
Required attributes:
seed: Seed to initialize the environment's RNG.
level_name: Name of the level to load.
Optional attributes:
width: Width (in pixels) of the desired RGB observation; defaults to 96.
height: Height (in pixels) of the desired RGB observation; defaults to 72.
episode_length_seconds: Maximum episode length (in seconds); defaults to
120.
num_action_repeats: Number of times to step the environment with the
provided action in calls to `step()`.
"""
seed: int
level_name: str
width: int = 96
height: int = 72
episode_length_seconds: float = 120.0
num_action_repeats: int = 1
def _validate_environment_settings(settings):
"""Helper function to validate the provided environment settings."""
if settings.episode_length_seconds <= 0.0:
raise ValueError('episode_length_seconds must have a positive value.')
if settings.num_action_repeats <= 0:
raise ValueError('num_action_repeats must have a positive value.')
if settings.width <= 0 or settings.height <= 0:
raise ValueError('width and height must have a positive value.')
if ('with_distractors/' in settings.level_name and
settings.episode_length_seconds != 450.0):
raise ValueError(
'episode_length_seconds must be 450.0 for with_distractors/ levels.')
def load_from_disk(path, settings):
"""Load Fast Language Learning Tasks from disk.
Args:
path: Directory containing dm_fast_mapping environment.
settings: EnvironmentSettings required to start the environment.
Returns:
An implementation of dm_env.Environment.
Raises:
RuntimeError: If unable to start environment process.
"""
_validate_environment_settings(settings)
executable_path = os.path.join(path, 'Linux64Player')
libosmesa_path = os.path.join(path, 'external_libosmesa_llvmpipe.so')
if not os.path.exists(executable_path) or not os.path.exists(libosmesa_path):
raise RuntimeError(
'Cannot find dm_fast_mapping executable or dependent files at path: {}'
.format(path))
port = portpicker.pick_unused_port()
process_flags = [
executable_path,
# Unity command-line flags.
'-logfile',
'-batchmode',
'-noaudio',
# Other command-line flags.
'--logtostderr',
'--server_type=GRPC',
'--uri_address=[::]:{}'.format(port),
]
os.environ.update({
'UNITY_RENDERER': 'software',
'UNITY_OSMESA_PATH': libosmesa_path,
})
process = subprocess.Popen(
process_flags, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if process.poll() is not None:
raise RuntimeError('Failed to start dm_fast_mapping process correctly.')
return _FastMappingTasksProcessEnv(
_connect_to_environment(port, settings), _FAST_MAPPING_TASK_OBSERVATIONS,
settings.num_action_repeats, process)
def load_from_docker(settings, name=None):
"""Load Fast Language Learning Tasks from docker container.
Args:
settings: EnvironmentSettings required to start the environment.
name: Optional name of Docker image that contains the dm_fast_mapping
environment. If left unset, uses the dm_fast_mapping default name.
Returns:
An implementation of dm_env.Environment
"""
_validate_environment_settings(settings)
name = name or _DEFAULT_DOCKER_IMAGE_NAME
client = docker.from_env()
port = portpicker.pick_unused_port()
try:
client.images.get(name)
except docker.errors.ImageNotFound:
logging.info('Downloading docker image "%s"...', name)
client.images.pull(name)
logging.info('Download finished.')
container = client.containers.run(
name,
auto_remove=True,
detach=True,
ports={_DOCKER_INTERNAL_GRPC_PORT: port})
return _FastMappingTasksContainerEnv(
_connect_to_environment(port, settings), _FAST_MAPPING_TASK_OBSERVATIONS,
settings.num_action_repeats, container)
| dm_fast_mapping-master | dm_fast_mapping/_load_environment.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Package version for dm_fast_mapping.
Kept in separate file so it can be used during installation.
"""
__version__ = '1.0.0' # https://www.python.org/dev/peps/pep-0440/
| dm_fast_mapping-master | dm_fast_mapping/_version.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_fast_mapping.load_from_disk."""
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from dm_env import test_utils
import dm_fast_mapping
FLAGS = flags.FLAGS
flags.DEFINE_string('path', '',
'Directory that contains dm_fast_mapping environment.')
class LoadFromDiskTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return dm_fast_mapping.load_from_disk(
FLAGS.path,
settings=dm_fast_mapping.EnvironmentSettings(
seed=123, level_name='architecture_comparison/fast_map_three_objs'))
class FastMappingTaskTest(parameterized.TestCase):
@parameterized.parameters(dm_fast_mapping.FAST_MAPPING_TASK_LEVEL_NAMES)
def test_load_level(self, level_name):
self.assertIsNotNone(
dm_fast_mapping.load_from_disk(
FLAGS.path,
settings=dm_fast_mapping.EnvironmentSettings(
seed=123, level_name=level_name)))
if __name__ == '__main__':
absltest.main()
| dm_fast_mapping-master | dm_fast_mapping/load_from_disk_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Python utilities for running dm_fast_mapping."""
from dm_fast_mapping import _load_environment
from dm_fast_mapping._version import __version__
EnvironmentSettings = _load_environment.EnvironmentSettings
FAST_MAPPING_TASK_LEVEL_NAMES = _load_environment.FAST_MAPPING_TASK_LEVEL_NAMES
load_from_disk = _load_environment.load_from_disk
load_from_docker = _load_environment.load_from_docker
| dm_fast_mapping-master | dm_fast_mapping/__init__.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_fast_mapping.load_from_docker."""
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from dm_env import test_utils
import dm_fast_mapping
FLAGS = flags.FLAGS
flags.DEFINE_string(
'docker_image_name', None,
'Name of the Docker image that contains the Fast Language Learning Tasks. '
'If None, uses the default dm_fast_mapping name')
class LoadFromDockerTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return dm_fast_mapping.load_from_docker(
name=FLAGS.docker_image_name,
settings=dm_fast_mapping.EnvironmentSettings(
seed=123, level_name='architecture_comparison/fast_map_three_objs'))
class FastMappingTaskTest(parameterized.TestCase):
@parameterized.parameters(dm_fast_mapping.FAST_MAPPING_TASK_LEVEL_NAMES)
def test_load_level(self, level_name):
self.assertIsNotNone(
dm_fast_mapping.load_from_docker(
name=FLAGS.docker_image_name,
settings=dm_fast_mapping.EnvironmentSettings(
seed=123, level_name=level_name)))
if __name__ == '__main__':
absltest.main()
| dm_fast_mapping-master | dm_fast_mapping/load_from_docker_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example random agent for interacting with DeepMind Fast Mapping Tasks."""
from absl import app
from absl import flags
from absl import logging
from dm_env import specs
import dm_fast_mapping
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_string(
'docker_image_name', None,
'Name of the Docker image that contains the Fast Language Learning Tasks. '
'If None, uses the default dm_fast_mapping name')
flags.DEFINE_integer('seed', 123, 'Environment seed.')
flags.DEFINE_string('level_name', 'fast_slow/fast_map_three_objs',
'Name of task to run.')
class RandomAgent(object):
"""Basic random agent for DeepMind Fast Language Fast Language Learning Tasks."""
def __init__(self, action_spec):
self.action_spec = action_spec
def act(self):
action = {}
for name, spec in self.action_spec.items():
# Uniformly sample BoundedArray actions.
if isinstance(spec, specs.BoundedArray):
action[name] = np.random.uniform(spec.minimum, spec.maximum, spec.shape)
else:
action[name] = spec.generate_value()
return action
def main(_):
if 'with_distractors' in FLAGS.level_name: # for the tasks from the HTM paper
episode_length_seconds = 450.0
else:
episode_length_seconds = 120.0
env_settings = dm_fast_mapping.EnvironmentSettings(
seed=FLAGS.seed, level_name=FLAGS.level_name,
episode_length_seconds=episode_length_seconds)
with dm_fast_mapping.load_from_docker(
name=FLAGS.docker_image_name, settings=env_settings) as env:
agent = RandomAgent(env.action_spec())
timestep = env.reset()
score = 0
while not timestep.last():
action = agent.act()
timestep = env.step(action)
if timestep.reward:
score += timestep.reward
logging.info('Total score: %1.1f, reward: %1.1f', score,
timestep.reward)
if __name__ == '__main__':
app.run(main)
| dm_fast_mapping-master | examples/random_agent.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example human agent for interacting with DeepMind Fast Language Learning Tasks."""
from absl import app
from absl import flags
from absl import logging
import dm_fast_mapping
import numpy as np
import pygame
FLAGS = flags.FLAGS
flags.DEFINE_list(
'screen_size', [640, 480],
'Screen width/height in pixels. Scales the environment RGB observations to '
'fit the screen size.')
flags.DEFINE_string(
'docker_image_name', None,
'Name of the Docker image that contains the Fast Language Learning Tasks. '
'If None, uses the default dm_fast_mapping name')
flags.DEFINE_integer('seed', 123, 'Environment seed.')
flags.DEFINE_string('level_name', 'fast_slow/fast_map_three_objs',
'Name of task to run.')
_FRAMES_PER_SECOND = 30
_KEYS_TO_ACTION = {
pygame.K_w: {'MOVE_BACK_FORWARD': 1},
pygame.K_s: {'MOVE_BACK_FORWARD': -1},
pygame.K_a: {'STRAFE_LEFT_RIGHT': -1},
pygame.K_d: {'STRAFE_LEFT_RIGHT': 1},
pygame.K_UP: {'LOOK_DOWN_UP': -1},
pygame.K_DOWN: {'LOOK_DOWN_UP': 1},
pygame.K_LEFT: {'LOOK_LEFT_RIGHT': -1},
pygame.K_RIGHT: {'LOOK_LEFT_RIGHT': 1},
pygame.K_SPACE: {'HAND_GRIP': 1},
} # pyformat: disable
_NO_ACTION = {
'MOVE_BACK_FORWARD': 0,
'STRAFE_LEFT_RIGHT': 0,
'LOOK_LEFT_RIGHT': 0,
'LOOK_DOWN_UP': 0,
'HAND_GRIP': 0,
}
def main(_):
pygame.init()
try:
pygame.mixer.quit()
except NotImplementedError:
pass
pygame.display.set_caption('Fast Language Learning Tasks Human Agent')
if 'with_distractors' in FLAGS.level_name: # for the tasks from the HTM paper
episode_length_seconds = 450.0
else:
episode_length_seconds = 120.0
env_settings = dm_fast_mapping.EnvironmentSettings(
seed=FLAGS.seed, level_name=FLAGS.level_name,
episode_length_seconds=episode_length_seconds)
with dm_fast_mapping.load_from_docker(name=FLAGS.docker_image_name,
settings=env_settings) as env:
screen = pygame.display.set_mode(
(int(FLAGS.screen_size[0]), int(FLAGS.screen_size[1])))
rgb_spec = env.observation_spec()['RGB_INTERLEAVED']
surface = pygame.Surface((rgb_spec.shape[1], rgb_spec.shape[0]))
actions = _NO_ACTION
score = 0
clock = pygame.time.Clock()
while True:
# Do not close with CTRL-C as otherwise the docker container may be left
# running on exit.
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
return
key_actions = _KEYS_TO_ACTION.get(event.key, {})
for name, action in key_actions.items():
actions[name] += action
elif event.type == pygame.KEYUP:
key_actions = _KEYS_TO_ACTION.get(event.key, {})
for name, action in key_actions.items():
actions[name] -= action
timestep = env.step(actions)
frame = np.swapaxes(timestep.observation['RGB_INTERLEAVED'], 0, 1)
font = pygame.font.SysFont('Sans', 10)
pygame.surfarray.blit_array(surface, frame)
text = font.render(timestep.observation['TEXT'], True, (0, 0, 0))
surface.blit(text, (0, 0))
pygame.transform.smoothscale(surface, screen.get_size(), screen)
pygame.display.update()
if timestep.reward:
score += timestep.reward
logging.info('Total score: %1.1f, reward: %1.1f', score,
timestep.reward)
clock.tick(_FRAMES_PER_SECOND)
if __name__ == '__main__':
app.run(main)
| dm_fast_mapping-master | examples/human_agent.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script for setuptools."""
import os
from setuptools import find_namespace_packages
from setuptools import setup
_CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
def _get_version():
with open('mctx/__init__.py') as fp:
for line in fp:
if line.startswith('__version__') and '=' in line:
version = line[line.find('=') + 1:].strip(' \'"\n')
if version:
return version
raise ValueError('`__version__` not defined in `mctx/__init__.py`')
def _parse_requirements(path):
with open(os.path.join(_CURRENT_DIR, path)) as f:
return [
line.rstrip()
for line in f
if not (line.isspace() or line.startswith('#'))
]
setup(
name='mctx',
version=_get_version(),
url='https://github.com/deepmind/mctx',
license='Apache 2.0',
author='DeepMind',
description=('Monte Carlo tree search in JAX.'),
long_description=open(os.path.join(_CURRENT_DIR, 'README.md')).read(),
long_description_content_type='text/markdown',
author_email='[email protected]',
keywords='jax planning reinforcement-learning python machine learning',
packages=find_namespace_packages(exclude=['*_test.py']),
install_requires=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements.txt')),
tests_require=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements', 'requirements-test.txt')),
zip_safe=False, # Required for full installation.
python_requires='>=3.7',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| mctx-main | setup.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mctx: Monte Carlo tree search in JAX."""
from mctx._src.action_selection import gumbel_muzero_interior_action_selection
from mctx._src.action_selection import gumbel_muzero_root_action_selection
from mctx._src.action_selection import GumbelMuZeroExtraData
from mctx._src.action_selection import muzero_action_selection
from mctx._src.base import ChanceRecurrentFnOutput
from mctx._src.base import DecisionRecurrentFnOutput
from mctx._src.base import InteriorActionSelectionFn
from mctx._src.base import LoopFn
from mctx._src.base import PolicyOutput
from mctx._src.base import RecurrentFn
from mctx._src.base import RecurrentFnOutput
from mctx._src.base import RecurrentState
from mctx._src.base import RootActionSelectionFn
from mctx._src.base import RootFnOutput
from mctx._src.policies import gumbel_muzero_policy
from mctx._src.policies import muzero_policy
from mctx._src.policies import stochastic_muzero_policy
from mctx._src.qtransforms import qtransform_by_min_max
from mctx._src.qtransforms import qtransform_by_parent_and_siblings
from mctx._src.qtransforms import qtransform_completed_by_mix_value
from mctx._src.search import search
from mctx._src.tree import Tree
__version__ = "0.0.3"
__all__ = (
"ChanceRecurrentFnOutput",
"DecisionRecurrentFnOutput",
"GumbelMuZeroExtraData",
"InteriorActionSelectionFn",
"LoopFn",
"PolicyOutput",
"RecurrentFn",
"RecurrentFnOutput",
"RecurrentState",
"RootActionSelectionFn",
"RootFnOutput",
"Tree",
"gumbel_muzero_interior_action_selection",
"gumbel_muzero_policy",
"gumbel_muzero_root_action_selection",
"muzero_action_selection",
"muzero_policy",
"qtransform_by_min_max",
"qtransform_by_parent_and_siblings",
"qtransform_completed_by_mix_value",
"search",
"stochastic_muzero_policy",
)
# _________________________________________
# / Please don't use symbols in `_src` they \
# \ are not part of the Mctx public API. /
# -----------------------------------------
# \ ^__^
# \ (oo)\_______
# (__)\ )\/\
# ||----w |
# || ||
#
| mctx-main | mctx/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A data structure used to hold / inspect search data for a batch of inputs."""
from __future__ import annotations
from typing import Any, ClassVar, Generic, TypeVar
import chex
import jax
import jax.numpy as jnp
T = TypeVar("T")
@chex.dataclass(frozen=True)
class Tree(Generic[T]):
"""State of a search tree.
The `Tree` dataclass is used to hold and inspect search data for a batch of
inputs. In the fields below `B` denotes the batch dimension, `N` represents
the number of nodes in the tree, and `num_actions` is the number of discrete
actions.
node_visits: `[B, N]` the visit counts for each node.
raw_values: `[B, N]` the raw value for each node.
node_values: `[B, N]` the cumulative search value for each node.
parents: `[B, N]` the node index for the parents for each node.
action_from_parent: `[B, N]` action to take from the parent to reach each
node.
children_index: `[B, N, num_actions]` the node index of the children for each
action.
children_prior_logits: `[B, N, Anum_actions` the action prior logits of each
node.
children_visits: `[B, N, num_actions]` the visit counts for children for
each action.
children_rewards: `[B, N, num_actions]` the immediate reward for each action.
children_discounts: `[B, N, num_actions]` the discount between the
`children_rewards` and the `children_values`.
children_values: `[B, N, num_actions]` the value of the next node after the
action.
embeddings: `[B, N, ...]` the state embeddings of each node.
root_invalid_actions: `[B, num_actions]` a mask with invalid actions at the
root. In the mask, invalid actions have ones, and valid actions have zeros.
extra_data: `[B, ...]` extra data passed to the search.
"""
node_visits: chex.Array # [B, N]
raw_values: chex.Array # [B, N]
node_values: chex.Array # [B, N]
parents: chex.Array # [B, N]
action_from_parent: chex.Array # [B, N]
children_index: chex.Array # [B, N, num_actions]
children_prior_logits: chex.Array # [B, N, num_actions]
children_visits: chex.Array # [B, N, num_actions]
children_rewards: chex.Array # [B, N, num_actions]
children_discounts: chex.Array # [B, N, num_actions]
children_values: chex.Array # [B, N, num_actions]
embeddings: Any # [B, N, ...]
root_invalid_actions: chex.Array # [B, num_actions]
extra_data: T # [B, ...]
# The following attributes are class variables (and should not be set on
# Tree instances).
ROOT_INDEX: ClassVar[int] = 0
NO_PARENT: ClassVar[int] = -1
UNVISITED: ClassVar[int] = -1
@property
def num_actions(self):
return self.children_index.shape[-1]
@property
def num_simulations(self):
return self.node_visits.shape[-1] - 1
def qvalues(self, indices):
"""Compute q-values for any node indices in the tree."""
if jnp.asarray(indices).shape:
return jax.vmap(_unbatched_qvalues)(self, indices)
else:
return _unbatched_qvalues(self, indices)
def summary(self) -> SearchSummary:
"""Extract summary statistics for the root node."""
# Get state and action values for the root nodes.
chex.assert_rank(self.node_values, 2)
value = self.node_values[:, Tree.ROOT_INDEX]
batch_size, = value.shape
root_indices = jnp.full((batch_size,), Tree.ROOT_INDEX)
qvalues = self.qvalues(root_indices)
# Extract visit counts and induced probabilities for the root nodes.
visit_counts = self.children_visits[:, Tree.ROOT_INDEX].astype(value.dtype)
total_counts = jnp.sum(visit_counts, axis=-1, keepdims=True)
visit_probs = visit_counts / jnp.maximum(total_counts, 1)
visit_probs = jnp.where(total_counts > 0, visit_probs, 1 / self.num_actions)
# Return relevant stats.
return SearchSummary( # pytype: disable=wrong-arg-types # numpy-scalars
visit_counts=visit_counts,
visit_probs=visit_probs,
value=value,
qvalues=qvalues)
def infer_batch_size(tree: Tree) -> int:
"""Recovers batch size from `Tree` data structure."""
if tree.node_values.ndim != 2:
raise ValueError("Input tree is not batched.")
chex.assert_equal_shape_prefix(jax.tree_util.tree_leaves(tree), 1)
return tree.node_values.shape[0]
# A number of aggregate statistics and predictions are extracted from the
# search data and returned to the user for further processing.
@chex.dataclass(frozen=True)
class SearchSummary:
"""Stats from MCTS search."""
visit_counts: chex.Array
visit_probs: chex.Array
value: chex.Array
qvalues: chex.Array
def _unbatched_qvalues(tree: Tree, index: int) -> int:
chex.assert_rank(tree.children_discounts, 2)
return ( # pytype: disable=bad-return-type # numpy-scalars
tree.children_rewards[index]
+ tree.children_discounts[index] * tree.children_values[index]
)
| mctx-main | mctx/_src/tree.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A collection of action selection functions."""
from typing import Optional, TypeVar
import chex
import jax
import jax.numpy as jnp
from mctx._src import base
from mctx._src import qtransforms
from mctx._src import seq_halving
from mctx._src import tree as tree_lib
def switching_action_selection_wrapper(
root_action_selection_fn: base.RootActionSelectionFn,
interior_action_selection_fn: base.InteriorActionSelectionFn
) -> base.InteriorActionSelectionFn:
"""Wraps root and interior action selection fns in a conditional statement."""
def switching_action_selection_fn(
rng_key: chex.PRNGKey,
tree: tree_lib.Tree,
node_index: base.NodeIndices,
depth: base.Depth) -> chex.Array:
return jax.lax.cond(
depth == 0,
lambda x: root_action_selection_fn(*x[:3]),
lambda x: interior_action_selection_fn(*x),
(rng_key, tree, node_index, depth))
return switching_action_selection_fn
def muzero_action_selection(
rng_key: chex.PRNGKey,
tree: tree_lib.Tree,
node_index: chex.Numeric,
depth: chex.Numeric,
*,
pb_c_init: float = 1.25,
pb_c_base: float = 19652.0,
qtransform: base.QTransform = qtransforms.qtransform_by_parent_and_siblings,
) -> chex.Array:
"""Returns the action selected for a node index.
See Appendix B in https://arxiv.org/pdf/1911.08265.pdf for more details.
Args:
rng_key: random number generator state.
tree: _unbatched_ MCTS tree state.
node_index: scalar index of the node from which to select an action.
depth: the scalar depth of the current node. The root has depth zero.
pb_c_init: constant c_1 in the PUCT formula.
pb_c_base: constant c_2 in the PUCT formula.
qtransform: a monotonic transformation to convert the Q-values to [0, 1].
Returns:
action: the action selected from the given node.
"""
visit_counts = tree.children_visits[node_index]
node_visit = tree.node_visits[node_index]
pb_c = pb_c_init + jnp.log((node_visit + pb_c_base + 1.) / pb_c_base)
prior_logits = tree.children_prior_logits[node_index]
prior_probs = jax.nn.softmax(prior_logits)
policy_score = jnp.sqrt(node_visit) * pb_c * prior_probs / (visit_counts + 1)
chex.assert_shape([node_index, node_visit], ())
chex.assert_equal_shape([prior_probs, visit_counts, policy_score])
value_score = qtransform(tree, node_index)
# Add tiny bit of randomness for tie break
node_noise_score = 1e-7 * jax.random.uniform(
rng_key, (tree.num_actions,))
to_argmax = value_score + policy_score + node_noise_score
# Masking the invalid actions at the root.
return masked_argmax(to_argmax, tree.root_invalid_actions * (depth == 0))
@chex.dataclass(frozen=True)
class GumbelMuZeroExtraData:
"""Extra data for Gumbel MuZero search."""
root_gumbel: chex.Array
GumbelMuZeroExtraDataType = TypeVar( # pylint: disable=invalid-name
"GumbelMuZeroExtraDataType", bound=GumbelMuZeroExtraData)
def gumbel_muzero_root_action_selection(
rng_key: chex.PRNGKey,
tree: tree_lib.Tree[GumbelMuZeroExtraDataType],
node_index: chex.Numeric,
*,
num_simulations: chex.Numeric,
max_num_considered_actions: chex.Numeric,
qtransform: base.QTransform = qtransforms.qtransform_completed_by_mix_value,
) -> chex.Array:
"""Returns the action selected by Sequential Halving with Gumbel.
Initially, we sample `max_num_considered_actions` actions without replacement.
From these, the actions with the highest `gumbel + logits + qvalues` are
visited first.
Args:
rng_key: random number generator state.
tree: _unbatched_ MCTS tree state.
node_index: scalar index of the node from which to take an action.
num_simulations: the simulation budget.
max_num_considered_actions: the number of actions sampled without
replacement.
qtransform: a monotonic transformation for the Q-values.
Returns:
action: the action selected from the given node.
"""
del rng_key
chex.assert_shape([node_index], ())
visit_counts = tree.children_visits[node_index]
prior_logits = tree.children_prior_logits[node_index]
chex.assert_equal_shape([visit_counts, prior_logits])
completed_qvalues = qtransform(tree, node_index)
table = jnp.array(seq_halving.get_table_of_considered_visits(
max_num_considered_actions, num_simulations))
num_valid_actions = jnp.sum(
1 - tree.root_invalid_actions, axis=-1).astype(jnp.int32)
num_considered = jnp.minimum(
max_num_considered_actions, num_valid_actions)
chex.assert_shape(num_considered, ())
# At the root, the simulation_index is equal to the sum of visit counts.
simulation_index = jnp.sum(visit_counts, -1)
chex.assert_shape(simulation_index, ())
considered_visit = table[num_considered, simulation_index]
chex.assert_shape(considered_visit, ())
gumbel = tree.extra_data.root_gumbel
to_argmax = seq_halving.score_considered(
considered_visit, gumbel, prior_logits, completed_qvalues,
visit_counts)
# Masking the invalid actions at the root.
return masked_argmax(to_argmax, tree.root_invalid_actions)
def gumbel_muzero_interior_action_selection(
rng_key: chex.PRNGKey,
tree: tree_lib.Tree,
node_index: chex.Numeric,
depth: chex.Numeric,
*,
qtransform: base.QTransform = qtransforms.qtransform_completed_by_mix_value,
) -> chex.Array:
"""Selects the action with a deterministic action selection.
The action is selected based on the visit counts to produce visitation
frequencies similar to softmax(prior_logits + qvalues).
Args:
rng_key: random number generator state.
tree: _unbatched_ MCTS tree state.
node_index: scalar index of the node from which to take an action.
depth: the scalar depth of the current node. The root has depth zero.
qtransform: function to obtain completed Q-values for a node.
Returns:
action: the action selected from the given node.
"""
del rng_key, depth
chex.assert_shape([node_index], ())
visit_counts = tree.children_visits[node_index]
prior_logits = tree.children_prior_logits[node_index]
chex.assert_equal_shape([visit_counts, prior_logits])
completed_qvalues = qtransform(tree, node_index)
# The `prior_logits + completed_qvalues` provide an improved policy,
# because the missing qvalues are replaced by v_{prior_logits}(node).
to_argmax = _prepare_argmax_input(
probs=jax.nn.softmax(prior_logits + completed_qvalues),
visit_counts=visit_counts)
chex.assert_rank(to_argmax, 1)
return jnp.argmax(to_argmax, axis=-1)
def masked_argmax(
to_argmax: chex.Array,
invalid_actions: Optional[chex.Array]) -> chex.Array:
"""Returns a valid action with the highest `to_argmax`."""
if invalid_actions is not None:
chex.assert_equal_shape([to_argmax, invalid_actions])
# The usage of the -inf inside the argmax does not lead to NaN.
# Do not use -inf inside softmax, logsoftmax or cross-entropy.
to_argmax = jnp.where(invalid_actions, -jnp.inf, to_argmax)
# If all actions are invalid, the argmax returns action 0.
return jnp.argmax(to_argmax, axis=-1)
def _prepare_argmax_input(probs, visit_counts):
"""Prepares the input for the deterministic selection.
When calling argmax(_prepare_argmax_input(...)) multiple times
with updated visit_counts, the produced visitation frequencies will
approximate the probs.
For the derivation, see Section 5 "Planning at non-root nodes" in
"Policy improvement by planning with Gumbel":
https://openreview.net/forum?id=bERaNdoegnO
Args:
probs: a policy or an improved policy. Shape `[num_actions]`.
visit_counts: the existing visit counts. Shape `[num_actions]`.
Returns:
The input to an argmax. Shape `[num_actions]`.
"""
chex.assert_equal_shape([probs, visit_counts])
to_argmax = probs - visit_counts / (
1 + jnp.sum(visit_counts, keepdims=True, axis=-1))
return to_argmax
| mctx-main | mctx/_src/action_selection.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monotonic transformations for the Q-values."""
import chex
import jax
import jax.numpy as jnp
from mctx._src import tree as tree_lib
def qtransform_by_min_max(
tree: tree_lib.Tree,
node_index: chex.Numeric,
*,
min_value: chex.Numeric,
max_value: chex.Numeric,
) -> chex.Array:
"""Returns Q-values normalized by the given `min_value` and `max_value`.
Args:
tree: _unbatched_ MCTS tree state.
node_index: scalar index of the parent node.
min_value: given minimum value. Usually the `min_value` is minimum possible
untransformed Q-value.
max_value: given maximum value. Usually the `max_value` is maximum possible
untransformed Q-value.
Returns:
Q-values normalized by `(qvalues - min_value) / (max_value - min_value)`.
The unvisited actions will have zero Q-value. Shape `[num_actions]`.
"""
chex.assert_shape(node_index, ())
qvalues = tree.qvalues(node_index)
visit_counts = tree.children_visits[node_index]
value_score = jnp.where(visit_counts > 0, qvalues, min_value)
value_score = (value_score - min_value) / ((max_value - min_value))
return value_score
def qtransform_by_parent_and_siblings(
tree: tree_lib.Tree,
node_index: chex.Numeric,
*,
epsilon: chex.Numeric = 1e-8,
) -> chex.Array:
"""Returns qvalues normalized by min, max over V(node) and qvalues.
Args:
tree: _unbatched_ MCTS tree state.
node_index: scalar index of the parent node.
epsilon: the minimum denominator for the normalization.
Returns:
Q-values normalized to be from the [0, 1] interval. The unvisited actions
will have zero Q-value. Shape `[num_actions]`.
"""
chex.assert_shape(node_index, ())
qvalues = tree.qvalues(node_index)
visit_counts = tree.children_visits[node_index]
chex.assert_rank([qvalues, visit_counts, node_index], [1, 1, 0])
node_value = tree.node_values[node_index]
safe_qvalues = jnp.where(visit_counts > 0, qvalues, node_value)
chex.assert_equal_shape([safe_qvalues, qvalues])
min_value = jnp.minimum(node_value, jnp.min(safe_qvalues, axis=-1))
max_value = jnp.maximum(node_value, jnp.max(safe_qvalues, axis=-1))
completed_by_min = jnp.where(visit_counts > 0, qvalues, min_value)
normalized = (completed_by_min - min_value) / (
jnp.maximum(max_value - min_value, epsilon))
chex.assert_equal_shape([normalized, qvalues])
return normalized
def qtransform_completed_by_mix_value(
tree: tree_lib.Tree,
node_index: chex.Numeric,
*,
value_scale: chex.Numeric = 0.1,
maxvisit_init: chex.Numeric = 50.0,
rescale_values: bool = True,
use_mixed_value: bool = True,
epsilon: chex.Numeric = 1e-8,
) -> chex.Array:
"""Returns completed qvalues.
The missing Q-values of the unvisited actions are replaced by the
mixed value, defined in Appendix D of
"Policy improvement by planning with Gumbel":
https://openreview.net/forum?id=bERaNdoegnO
The Q-values are transformed by a linear transformation:
`(maxvisit_init + max(visit_counts)) * value_scale * qvalues`.
Args:
tree: _unbatched_ MCTS tree state.
node_index: scalar index of the parent node.
value_scale: scale for the Q-values.
maxvisit_init: offset to the `max(visit_counts)` in the scaling factor.
rescale_values: if True, scale the qvalues by `1 / (max_q - min_q)`.
use_mixed_value: if True, complete the Q-values with mixed value,
otherwise complete the Q-values with the raw value.
epsilon: the minimum denominator when using `rescale_values`.
Returns:
Completed Q-values. Shape `[num_actions]`.
"""
chex.assert_shape(node_index, ())
qvalues = tree.qvalues(node_index)
visit_counts = tree.children_visits[node_index]
# Computing the mixed value and producing completed_qvalues.
raw_value = tree.raw_values[node_index]
prior_probs = jax.nn.softmax(
tree.children_prior_logits[node_index])
if use_mixed_value:
value = _compute_mixed_value(
raw_value,
qvalues=qvalues,
visit_counts=visit_counts,
prior_probs=prior_probs)
else:
value = raw_value
completed_qvalues = _complete_qvalues(
qvalues, visit_counts=visit_counts, value=value)
# Scaling the Q-values.
if rescale_values:
completed_qvalues = _rescale_qvalues(completed_qvalues, epsilon)
maxvisit = jnp.max(visit_counts, axis=-1)
visit_scale = maxvisit_init + maxvisit
return visit_scale * value_scale * completed_qvalues
def _rescale_qvalues(qvalues, epsilon):
"""Rescales the given completed Q-values to be from the [0, 1] interval."""
min_value = jnp.min(qvalues, axis=-1, keepdims=True)
max_value = jnp.max(qvalues, axis=-1, keepdims=True)
return (qvalues - min_value) / jnp.maximum(max_value - min_value, epsilon)
def _complete_qvalues(qvalues, *, visit_counts, value):
"""Returns completed Q-values, with the `value` for unvisited actions."""
chex.assert_equal_shape([qvalues, visit_counts])
chex.assert_shape(value, [])
# The missing qvalues are replaced by the value.
completed_qvalues = jnp.where(
visit_counts > 0,
qvalues,
value)
chex.assert_equal_shape([completed_qvalues, qvalues])
return completed_qvalues
def _compute_mixed_value(raw_value, qvalues, visit_counts, prior_probs):
"""Interpolates the raw_value and weighted qvalues.
Args:
raw_value: an approximate value of the state. Shape `[]`.
qvalues: Q-values for all actions. Shape `[num_actions]`. The unvisited
actions have undefined Q-value.
visit_counts: the visit counts for all actions. Shape `[num_actions]`.
prior_probs: the action probabilities, produced by the policy network for
each action. Shape `[num_actions]`.
Returns:
An estimator of the state value. Shape `[]`.
"""
sum_visit_counts = jnp.sum(visit_counts, axis=-1)
# Ensuring non-nan weighted_q, even if the visited actions have zero
# prior probability.
prior_probs = jnp.maximum(jnp.finfo(prior_probs.dtype).tiny, prior_probs)
# Summing the probabilities of the visited actions.
sum_probs = jnp.sum(jnp.where(visit_counts > 0, prior_probs, 0.0),
axis=-1)
weighted_q = jnp.sum(jnp.where(
visit_counts > 0,
prior_probs * qvalues / jnp.where(visit_counts > 0, sum_probs, 1.0),
0.0), axis=-1)
return (raw_value + sum_visit_counts * weighted_q) / (sum_visit_counts + 1)
| mctx-main | mctx/_src/qtransforms.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| mctx-main | mctx/_src/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for Sequential Halving."""
import math
import chex
import jax.numpy as jnp
def score_considered(considered_visit, gumbel, logits, normalized_qvalues,
visit_counts):
"""Returns a score usable for an argmax."""
# We allow to visit a child, if it is the only considered child.
low_logit = -1e9
logits = logits - jnp.max(logits, keepdims=True, axis=-1)
penalty = jnp.where(
visit_counts == considered_visit,
0, -jnp.inf)
chex.assert_equal_shape([gumbel, logits, normalized_qvalues, penalty])
return jnp.maximum(low_logit, gumbel + logits + normalized_qvalues) + penalty
def get_sequence_of_considered_visits(max_num_considered_actions,
num_simulations):
"""Returns a sequence of visit counts considered by Sequential Halving.
Sequential Halving is a "pure exploration" algorithm for bandits, introduced
in "Almost Optimal Exploration in Multi-Armed Bandits":
http://proceedings.mlr.press/v28/karnin13.pdf
The visit counts allows to implement Sequential Halving by selecting the best
action from the actions with the currently considered visit count.
Args:
max_num_considered_actions: The maximum number of considered actions.
The `max_num_considered_actions` can be smaller than the number of
actions.
num_simulations: The total simulation budget.
Returns:
A tuple with visit counts. Length `num_simulations`.
"""
if max_num_considered_actions <= 1:
return tuple(range(num_simulations))
log2max = int(math.ceil(math.log2(max_num_considered_actions)))
sequence = []
visits = [0] * max_num_considered_actions
num_considered = max_num_considered_actions
while len(sequence) < num_simulations:
num_extra_visits = max(1, int(num_simulations / (log2max * num_considered)))
for _ in range(num_extra_visits):
sequence.extend(visits[:num_considered])
for i in range(num_considered):
visits[i] += 1
# Halving the number of considered actions.
num_considered = max(2, num_considered // 2)
return tuple(sequence[:num_simulations])
def get_table_of_considered_visits(max_num_considered_actions, num_simulations):
"""Returns a table of sequences of visit counts.
Args:
max_num_considered_actions: The maximum number of considered actions.
The `max_num_considered_actions` can be smaller than the number of
actions.
num_simulations: The total simulation budget.
Returns:
A tuple of sequences of visit counts.
Shape [max_num_considered_actions + 1, num_simulations].
"""
return tuple(
get_sequence_of_considered_visits(m, num_simulations)
for m in range(max_num_considered_actions + 1))
| mctx-main | mctx/_src/seq_halving.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A JAX implementation of batched MCTS."""
import functools
from typing import Any, NamedTuple, Optional, Tuple, TypeVar
import chex
import jax
import jax.numpy as jnp
from mctx._src import action_selection
from mctx._src import base
from mctx._src import tree as tree_lib
Tree = tree_lib.Tree
T = TypeVar("T")
def search(
params: base.Params,
rng_key: chex.PRNGKey,
*,
root: base.RootFnOutput,
recurrent_fn: base.RecurrentFn,
root_action_selection_fn: base.RootActionSelectionFn,
interior_action_selection_fn: base.InteriorActionSelectionFn,
num_simulations: int,
max_depth: Optional[int] = None,
invalid_actions: Optional[chex.Array] = None,
extra_data: Any = None,
loop_fn: base.LoopFn = jax.lax.fori_loop) -> Tree:
"""Performs a full search and returns sampled actions.
In the shape descriptions, `B` denotes the batch dimension.
Args:
params: params to be forwarded to root and recurrent functions.
rng_key: random number generator state, the key is consumed.
root: a `(prior_logits, value, embedding)` `RootFnOutput`. The
`prior_logits` are from a policy network. The shapes are
`([B, num_actions], [B], [B, ...])`, respectively.
recurrent_fn: a callable to be called on the leaf nodes and unvisited
actions retrieved by the simulation step, which takes as args
`(params, rng_key, action, embedding)` and returns a `RecurrentFnOutput`
and the new state embedding. The `rng_key` argument is consumed.
root_action_selection_fn: function used to select an action at the root.
interior_action_selection_fn: function used to select an action during
simulation.
num_simulations: the number of simulations.
max_depth: maximum search tree depth allowed during simulation, defined as
the number of edges from the root to a leaf node.
invalid_actions: a mask with invalid actions at the root. In the
mask, invalid actions have ones, and valid actions have zeros.
Shape `[B, num_actions]`.
extra_data: extra data passed to `tree.extra_data`. Shape `[B, ...]`.
loop_fn: Function used to run the simulations. It may be required to pass
hk.fori_loop if using this function inside a Haiku module.
Returns:
`SearchResults` containing outcomes of the search, e.g. `visit_counts`
`[B, num_actions]`.
"""
action_selection_fn = action_selection.switching_action_selection_wrapper(
root_action_selection_fn=root_action_selection_fn,
interior_action_selection_fn=interior_action_selection_fn
)
# Do simulation, expansion, and backward steps.
batch_size = root.value.shape[0]
batch_range = jnp.arange(batch_size)
if max_depth is None:
max_depth = num_simulations
if invalid_actions is None:
invalid_actions = jnp.zeros_like(root.prior_logits)
def body_fun(sim, loop_state):
rng_key, tree = loop_state
rng_key, simulate_key, expand_key = jax.random.split(rng_key, 3)
# simulate is vmapped and expects batched rng keys.
simulate_keys = jax.random.split(simulate_key, batch_size)
parent_index, action = simulate(
simulate_keys, tree, action_selection_fn, max_depth)
# A node first expanded on simulation `i`, will have node index `i`.
# Node 0 corresponds to the root node.
next_node_index = tree.children_index[batch_range, parent_index, action]
next_node_index = jnp.where(next_node_index == Tree.UNVISITED,
sim + 1, next_node_index)
tree = expand(
params, expand_key, tree, recurrent_fn, parent_index,
action, next_node_index)
tree = backward(tree, next_node_index)
loop_state = rng_key, tree
return loop_state
# Allocate all necessary storage.
tree = instantiate_tree_from_root(root, num_simulations,
root_invalid_actions=invalid_actions,
extra_data=extra_data)
_, tree = loop_fn(
0, num_simulations, body_fun, (rng_key, tree))
return tree
class _SimulationState(NamedTuple):
"""The state for the simulation while loop."""
rng_key: chex.PRNGKey
node_index: int
action: int
next_node_index: int
depth: int
is_continuing: bool
@functools.partial(jax.vmap, in_axes=[0, 0, None, None], out_axes=0)
def simulate(
rng_key: chex.PRNGKey,
tree: Tree,
action_selection_fn: base.InteriorActionSelectionFn,
max_depth: int) -> Tuple[chex.Array, chex.Array]:
"""Traverses the tree until reaching an unvisited action or `max_depth`.
Each simulation starts from the root and keeps selecting actions traversing
the tree until a leaf or `max_depth` is reached.
Args:
rng_key: random number generator state, the key is consumed.
tree: _unbatched_ MCTS tree state.
action_selection_fn: function used to select an action during simulation.
max_depth: maximum search tree depth allowed during simulation.
Returns:
`(parent_index, action)` tuple, where `parent_index` is the index of the
node reached at the end of the simulation, and the `action` is the action to
evaluate from the `parent_index`.
"""
def cond_fun(state):
return state.is_continuing
def body_fun(state):
# Preparing the next simulation state.
node_index = state.next_node_index
rng_key, action_selection_key = jax.random.split(state.rng_key)
action = action_selection_fn(action_selection_key, tree, node_index,
state.depth)
next_node_index = tree.children_index[node_index, action]
# The returned action will be visited.
depth = state.depth + 1
is_before_depth_cutoff = depth < max_depth
is_visited = next_node_index != Tree.UNVISITED
is_continuing = jnp.logical_and(is_visited, is_before_depth_cutoff)
return _SimulationState( # pytype: disable=wrong-arg-types # jax-types
rng_key=rng_key,
node_index=node_index,
action=action,
next_node_index=next_node_index,
depth=depth,
is_continuing=is_continuing)
node_index = jnp.array(Tree.ROOT_INDEX, dtype=jnp.int32)
depth = jnp.zeros((), dtype=tree.children_prior_logits.dtype)
initial_state = _SimulationState(
rng_key=rng_key,
node_index=tree.NO_PARENT,
action=tree.NO_PARENT,
next_node_index=node_index,
depth=depth,
is_continuing=jnp.array(True))
end_state = jax.lax.while_loop(cond_fun, body_fun, initial_state)
# Returning a node with a selected action.
# The action can be already visited, if the max_depth is reached.
return end_state.node_index, end_state.action
def expand(
params: chex.Array,
rng_key: chex.PRNGKey,
tree: Tree[T],
recurrent_fn: base.RecurrentFn,
parent_index: chex.Array,
action: chex.Array,
next_node_index: chex.Array) -> Tree[T]:
"""Create and evaluate child nodes from given nodes and unvisited actions.
Args:
params: params to be forwarded to recurrent function.
rng_key: random number generator state.
tree: the MCTS tree state to update.
recurrent_fn: a callable to be called on the leaf nodes and unvisited
actions retrieved by the simulation step, which takes as args
`(params, rng_key, action, embedding)` and returns a `RecurrentFnOutput`
and the new state embedding. The `rng_key` argument is consumed.
parent_index: the index of the parent node, from which the action will be
expanded. Shape `[B]`.
action: the action to expand. Shape `[B]`.
next_node_index: the index of the newly expanded node. This can be the index
of an existing node, if `max_depth` is reached. Shape `[B]`.
Returns:
tree: updated MCTS tree state.
"""
batch_size = tree_lib.infer_batch_size(tree)
batch_range = jnp.arange(batch_size)
chex.assert_shape([parent_index, action, next_node_index], (batch_size,))
# Retrieve states for nodes to be evaluated.
embedding = jax.tree_util.tree_map(
lambda x: x[batch_range, parent_index], tree.embeddings)
# Evaluate and create a new node.
step, embedding = recurrent_fn(params, rng_key, action, embedding)
chex.assert_shape(step.prior_logits, [batch_size, tree.num_actions])
chex.assert_shape(step.reward, [batch_size])
chex.assert_shape(step.discount, [batch_size])
chex.assert_shape(step.value, [batch_size])
tree = update_tree_node(
tree, next_node_index, step.prior_logits, step.value, embedding)
# Return updated tree topology.
return tree.replace(
children_index=batch_update(
tree.children_index, next_node_index, parent_index, action),
children_rewards=batch_update(
tree.children_rewards, step.reward, parent_index, action),
children_discounts=batch_update(
tree.children_discounts, step.discount, parent_index, action),
parents=batch_update(tree.parents, parent_index, next_node_index),
action_from_parent=batch_update(
tree.action_from_parent, action, next_node_index))
@jax.vmap
def backward(
tree: Tree[T],
leaf_index: chex.Numeric) -> Tree[T]:
"""Goes up and updates the tree until all nodes reached the root.
Args:
tree: the MCTS tree state to update, without the batch size.
leaf_index: the node index from which to do the backward.
Returns:
Updated MCTS tree state.
"""
def cond_fun(loop_state):
_, _, index = loop_state
return index != Tree.ROOT_INDEX
def body_fun(loop_state):
# Here we update the value of our parent, so we start by reversing.
tree, leaf_value, index = loop_state
parent = tree.parents[index]
count = tree.node_visits[parent]
action = tree.action_from_parent[index]
reward = tree.children_rewards[parent, action]
leaf_value = reward + tree.children_discounts[parent, action] * leaf_value
parent_value = (
tree.node_values[parent] * count + leaf_value) / (count + 1.0)
children_values = tree.node_values[index]
children_counts = tree.children_visits[parent, action] + 1
tree = tree.replace(
node_values=update(tree.node_values, parent_value, parent),
node_visits=update(tree.node_visits, count + 1, parent),
children_values=update(
tree.children_values, children_values, parent, action),
children_visits=update(
tree.children_visits, children_counts, parent, action))
return tree, leaf_value, parent
leaf_index = jnp.asarray(leaf_index, dtype=jnp.int32)
loop_state = (tree, tree.node_values[leaf_index], leaf_index)
tree, _, _ = jax.lax.while_loop(cond_fun, body_fun, loop_state)
return tree
# Utility function to set the values of certain indices to prescribed values.
# This is vmapped to operate seamlessly on batches.
def update(x, vals, *indices):
return x.at[indices].set(vals)
batch_update = jax.vmap(update)
def update_tree_node(
tree: Tree[T],
node_index: chex.Array,
prior_logits: chex.Array,
value: chex.Array,
embedding: chex.Array) -> Tree[T]:
"""Updates the tree at node index.
Args:
tree: `Tree` to whose node is to be updated.
node_index: the index of the expanded node. Shape `[B]`.
prior_logits: the prior logits to fill in for the new node, of shape
`[B, num_actions]`.
value: the value to fill in for the new node. Shape `[B]`.
embedding: the state embeddings for the node. Shape `[B, ...]`.
Returns:
The new tree with updated nodes.
"""
batch_size = tree_lib.infer_batch_size(tree)
batch_range = jnp.arange(batch_size)
chex.assert_shape(prior_logits, (batch_size, tree.num_actions))
# When using max_depth, a leaf can be expanded multiple times.
new_visit = tree.node_visits[batch_range, node_index] + 1
updates = dict( # pylint: disable=use-dict-literal
children_prior_logits=batch_update(
tree.children_prior_logits, prior_logits, node_index),
raw_values=batch_update(
tree.raw_values, value, node_index),
node_values=batch_update(
tree.node_values, value, node_index),
node_visits=batch_update(
tree.node_visits, new_visit, node_index),
embeddings=jax.tree_util.tree_map(
lambda t, s: batch_update(t, s, node_index),
tree.embeddings, embedding))
return tree.replace(**updates)
def instantiate_tree_from_root(
root: base.RootFnOutput,
num_simulations: int,
root_invalid_actions: chex.Array,
extra_data: Any) -> Tree:
"""Initializes tree state at search root."""
chex.assert_rank(root.prior_logits, 2)
batch_size, num_actions = root.prior_logits.shape
chex.assert_shape(root.value, [batch_size])
num_nodes = num_simulations + 1
data_dtype = root.value.dtype
batch_node = (batch_size, num_nodes)
batch_node_action = (batch_size, num_nodes, num_actions)
def _zeros(x):
return jnp.zeros(batch_node + x.shape[1:], dtype=x.dtype)
# Create a new empty tree state and fill its root.
tree = Tree(
node_visits=jnp.zeros(batch_node, dtype=jnp.int32),
raw_values=jnp.zeros(batch_node, dtype=data_dtype),
node_values=jnp.zeros(batch_node, dtype=data_dtype),
parents=jnp.full(batch_node, Tree.NO_PARENT, dtype=jnp.int32),
action_from_parent=jnp.full(
batch_node, Tree.NO_PARENT, dtype=jnp.int32),
children_index=jnp.full(
batch_node_action, Tree.UNVISITED, dtype=jnp.int32),
children_prior_logits=jnp.zeros(
batch_node_action, dtype=root.prior_logits.dtype),
children_values=jnp.zeros(batch_node_action, dtype=data_dtype),
children_visits=jnp.zeros(batch_node_action, dtype=jnp.int32),
children_rewards=jnp.zeros(batch_node_action, dtype=data_dtype),
children_discounts=jnp.zeros(batch_node_action, dtype=data_dtype),
embeddings=jax.tree_util.tree_map(_zeros, root.embedding),
root_invalid_actions=root_invalid_actions,
extra_data=extra_data)
root_index = jnp.full([batch_size], Tree.ROOT_INDEX)
tree = update_tree_node(
tree, root_index, root.prior_logits, root.value, root.embedding)
return tree
| mctx-main | mctx/_src/search.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Search policies."""
import functools
from typing import Optional, Tuple
import chex
import jax
import jax.numpy as jnp
from mctx._src import action_selection
from mctx._src import base
from mctx._src import qtransforms
from mctx._src import search
from mctx._src import seq_halving
def muzero_policy(
params: base.Params,
rng_key: chex.PRNGKey,
root: base.RootFnOutput,
recurrent_fn: base.RecurrentFn,
num_simulations: int,
invalid_actions: Optional[chex.Array] = None,
max_depth: Optional[int] = None,
loop_fn: base.LoopFn = jax.lax.fori_loop,
*,
qtransform: base.QTransform = qtransforms.qtransform_by_parent_and_siblings,
dirichlet_fraction: chex.Numeric = 0.25,
dirichlet_alpha: chex.Numeric = 0.3,
pb_c_init: chex.Numeric = 1.25,
pb_c_base: chex.Numeric = 19652,
temperature: chex.Numeric = 1.0) -> base.PolicyOutput[None]:
"""Runs MuZero search and returns the `PolicyOutput`.
In the shape descriptions, `B` denotes the batch dimension.
Args:
params: params to be forwarded to root and recurrent functions.
rng_key: random number generator state, the key is consumed.
root: a `(prior_logits, value, embedding)` `RootFnOutput`. The
`prior_logits` are from a policy network. The shapes are
`([B, num_actions], [B], [B, ...])`, respectively.
recurrent_fn: a callable to be called on the leaf nodes and unvisited
actions retrieved by the simulation step, which takes as args
`(params, rng_key, action, embedding)` and returns a `RecurrentFnOutput`
and the new state embedding. The `rng_key` argument is consumed.
num_simulations: the number of simulations.
invalid_actions: a mask with invalid actions. Invalid actions
have ones, valid actions have zeros in the mask. Shape `[B, num_actions]`.
max_depth: maximum search tree depth allowed during simulation.
loop_fn: Function used to run the simulations. It may be required to pass
hk.fori_loop if using this function inside a Haiku module.
qtransform: function to obtain completed Q-values for a node.
dirichlet_fraction: float from 0 to 1 interpolating between using only the
prior policy or just the Dirichlet noise.
dirichlet_alpha: concentration parameter to parametrize the Dirichlet
distribution.
pb_c_init: constant c_1 in the PUCT formula.
pb_c_base: constant c_2 in the PUCT formula.
temperature: temperature for acting proportionally to
`visit_counts**(1 / temperature)`.
Returns:
`PolicyOutput` containing the proposed action, action_weights and the used
search tree.
"""
rng_key, dirichlet_rng_key, search_rng_key = jax.random.split(rng_key, 3)
# Adding Dirichlet noise.
noisy_logits = _get_logits_from_probs(
_add_dirichlet_noise(
dirichlet_rng_key,
jax.nn.softmax(root.prior_logits),
dirichlet_fraction=dirichlet_fraction,
dirichlet_alpha=dirichlet_alpha))
root = root.replace(
prior_logits=_mask_invalid_actions(noisy_logits, invalid_actions))
# Running the search.
interior_action_selection_fn = functools.partial(
action_selection.muzero_action_selection,
pb_c_base=pb_c_base,
pb_c_init=pb_c_init,
qtransform=qtransform)
root_action_selection_fn = functools.partial(
interior_action_selection_fn,
depth=0)
search_tree = search.search(
params=params,
rng_key=search_rng_key,
root=root,
recurrent_fn=recurrent_fn,
root_action_selection_fn=root_action_selection_fn,
interior_action_selection_fn=interior_action_selection_fn,
num_simulations=num_simulations,
max_depth=max_depth,
invalid_actions=invalid_actions,
loop_fn=loop_fn)
# Sampling the proposed action proportionally to the visit counts.
summary = search_tree.summary()
action_weights = summary.visit_probs
action_logits = _apply_temperature(
_get_logits_from_probs(action_weights), temperature)
action = jax.random.categorical(rng_key, action_logits)
return base.PolicyOutput(
action=action,
action_weights=action_weights,
search_tree=search_tree)
def gumbel_muzero_policy(
params: base.Params,
rng_key: chex.PRNGKey,
root: base.RootFnOutput,
recurrent_fn: base.RecurrentFn,
num_simulations: int,
invalid_actions: Optional[chex.Array] = None,
max_depth: Optional[int] = None,
loop_fn: base.LoopFn = jax.lax.fori_loop,
*,
qtransform: base.QTransform = qtransforms.qtransform_completed_by_mix_value,
max_num_considered_actions: int = 16,
gumbel_scale: chex.Numeric = 1.,
) -> base.PolicyOutput[action_selection.GumbelMuZeroExtraData]:
"""Runs Gumbel MuZero search and returns the `PolicyOutput`.
This policy implements Full Gumbel MuZero from
"Policy improvement by planning with Gumbel".
https://openreview.net/forum?id=bERaNdoegnO
At the root of the search tree, actions are selected by Sequential Halving
with Gumbel. At non-root nodes (aka interior nodes), actions are selected by
the Full Gumbel MuZero deterministic action selection.
In the shape descriptions, `B` denotes the batch dimension.
Args:
params: params to be forwarded to root and recurrent functions.
rng_key: random number generator state, the key is consumed.
root: a `(prior_logits, value, embedding)` `RootFnOutput`. The
`prior_logits` are from a policy network. The shapes are
`([B, num_actions], [B], [B, ...])`, respectively.
recurrent_fn: a callable to be called on the leaf nodes and unvisited
actions retrieved by the simulation step, which takes as args
`(params, rng_key, action, embedding)` and returns a `RecurrentFnOutput`
and the new state embedding. The `rng_key` argument is consumed.
num_simulations: the number of simulations.
invalid_actions: a mask with invalid actions. Invalid actions
have ones, valid actions have zeros in the mask. Shape `[B, num_actions]`.
max_depth: maximum search tree depth allowed during simulation.
loop_fn: Function used to run the simulations. It may be required to pass
hk.fori_loop if using this function inside a Haiku module.
qtransform: function to obtain completed Q-values for a node.
max_num_considered_actions: the maximum number of actions expanded at the
root node. A smaller number of actions will be expanded if the number of
valid actions is smaller.
gumbel_scale: scale for the Gumbel noise. Evalution on perfect-information
games can use gumbel_scale=0.0.
Returns:
`PolicyOutput` containing the proposed action, action_weights and the used
search tree.
"""
# Masking invalid actions.
root = root.replace(
prior_logits=_mask_invalid_actions(root.prior_logits, invalid_actions))
# Generating Gumbel.
rng_key, gumbel_rng = jax.random.split(rng_key)
gumbel = gumbel_scale * jax.random.gumbel(
gumbel_rng, shape=root.prior_logits.shape, dtype=root.prior_logits.dtype)
# Searching.
extra_data = action_selection.GumbelMuZeroExtraData(root_gumbel=gumbel)
search_tree = search.search(
params=params,
rng_key=rng_key,
root=root,
recurrent_fn=recurrent_fn,
root_action_selection_fn=functools.partial(
action_selection.gumbel_muzero_root_action_selection,
num_simulations=num_simulations,
max_num_considered_actions=max_num_considered_actions,
qtransform=qtransform,
),
interior_action_selection_fn=functools.partial(
action_selection.gumbel_muzero_interior_action_selection,
qtransform=qtransform,
),
num_simulations=num_simulations,
max_depth=max_depth,
invalid_actions=invalid_actions,
extra_data=extra_data,
loop_fn=loop_fn)
summary = search_tree.summary()
# Acting with the best action from the most visited actions.
# The "best" action has the highest `gumbel + logits + q`.
# Inside the minibatch, the considered_visit can be different on states with
# a smaller number of valid actions.
considered_visit = jnp.max(summary.visit_counts, axis=-1, keepdims=True)
# The completed_qvalues include imputed values for unvisited actions.
completed_qvalues = jax.vmap(qtransform, in_axes=[0, None])( # pytype: disable=wrong-arg-types # numpy-scalars # pylint: disable=line-too-long
search_tree, search_tree.ROOT_INDEX)
to_argmax = seq_halving.score_considered(
considered_visit, gumbel, root.prior_logits, completed_qvalues,
summary.visit_counts)
action = action_selection.masked_argmax(to_argmax, invalid_actions)
# Producing action_weights usable to train the policy network.
completed_search_logits = _mask_invalid_actions(
root.prior_logits + completed_qvalues, invalid_actions)
action_weights = jax.nn.softmax(completed_search_logits)
return base.PolicyOutput(
action=action,
action_weights=action_weights,
search_tree=search_tree)
def stochastic_muzero_policy(
params: chex.ArrayTree,
rng_key: chex.PRNGKey,
root: base.RootFnOutput,
decision_recurrent_fn: base.DecisionRecurrentFn,
chance_recurrent_fn: base.ChanceRecurrentFn,
num_simulations: int,
num_actions: int,
num_chance_outcomes: int,
invalid_actions: Optional[chex.Array] = None,
max_depth: Optional[int] = None,
loop_fn: base.LoopFn = jax.lax.fori_loop,
*,
qtransform: base.QTransform = qtransforms.qtransform_by_parent_and_siblings,
dirichlet_fraction: chex.Numeric = 0.25,
dirichlet_alpha: chex.Numeric = 0.3,
pb_c_init: chex.Numeric = 1.25,
pb_c_base: chex.Numeric = 19652,
temperature: chex.Numeric = 1.0) -> base.PolicyOutput[None]:
"""Runs Stochastic MuZero search.
Implements search as described in the Stochastic MuZero paper:
(https://openreview.net/forum?id=X6D9bAHhBQ1).
In the shape descriptions, `B` denotes the batch dimension.
Args:
params: params to be forwarded to root and recurrent functions.
rng_key: random number generator state, the key is consumed.
root: a `(prior_logits, value, embedding)` `RootFnOutput`. The
`prior_logits` are from a policy network. The shapes are `([B,
num_actions], [B], [B, ...])`, respectively.
decision_recurrent_fn: a callable to be called on the leaf decision nodes
and unvisited actions retrieved by the simulation step, which takes as
args `(params, rng_key, action, state_embedding)` and returns a
`(DecisionRecurrentFnOutput, afterstate_embedding)`.
chance_recurrent_fn: a callable to be called on the leaf chance nodes and
unvisited actions retrieved by the simulation step, which takes as args
`(params, rng_key, chance_outcome, afterstate_embedding)` and returns a
`(ChanceRecurrentFnOutput, state_embedding)`.
num_simulations: the number of simulations.
num_actions: number of environment actions.
num_chance_outcomes: number of chance outcomes following an afterstate.
invalid_actions: a mask with invalid actions. Invalid actions have ones,
valid actions have zeros in the mask. Shape `[B, num_actions]`.
max_depth: maximum search tree depth allowed during simulation.
loop_fn: Function used to run the simulations. It may be required to pass
hk.fori_loop if using this function inside a Haiku module.
qtransform: function to obtain completed Q-values for a node.
dirichlet_fraction: float from 0 to 1 interpolating between using only the
prior policy or just the Dirichlet noise.
dirichlet_alpha: concentration parameter to parametrize the Dirichlet
distribution.
pb_c_init: constant c_1 in the PUCT formula.
pb_c_base: constant c_2 in the PUCT formula.
temperature: temperature for acting proportionally to `visit_counts**(1 /
temperature)`.
Returns:
`PolicyOutput` containing the proposed action, action_weights and the used
search tree.
"""
rng_key, dirichlet_rng_key, search_rng_key = jax.random.split(rng_key, 3)
# Adding Dirichlet noise.
noisy_logits = _get_logits_from_probs(
_add_dirichlet_noise(
dirichlet_rng_key,
jax.nn.softmax(root.prior_logits),
dirichlet_fraction=dirichlet_fraction,
dirichlet_alpha=dirichlet_alpha))
root = root.replace(
prior_logits=_mask_invalid_actions(noisy_logits, invalid_actions))
# construct a dummy afterstate embedding
batch_size = jax.tree_util.tree_leaves(root.embedding)[0].shape[0]
dummy_action = jnp.zeros([batch_size], dtype=jnp.int32)
_, dummy_afterstate_embedding = decision_recurrent_fn(params, rng_key,
dummy_action,
root.embedding)
root = root.replace(
# pad action logits with num_chance_outcomes so dim is A + C
prior_logits=jnp.concatenate([
root.prior_logits,
jnp.full([batch_size, num_chance_outcomes], fill_value=-jnp.inf)
], axis=-1),
# replace embedding with wrapper.
embedding=base.StochasticRecurrentState(
state_embedding=root.embedding,
afterstate_embedding=dummy_afterstate_embedding,
is_decision_node=jnp.ones([batch_size], dtype=bool)))
# Stochastic MuZero Change: We need to be able to tell if different nodes are
# decision or chance. This is accomplished by imposing a special structure
# on the embeddings stored in each node. Each embedding is an instance of
# StochasticRecurrentState which maintains this information.
recurrent_fn = _make_stochastic_recurrent_fn(
decision_node_fn=decision_recurrent_fn,
chance_node_fn=chance_recurrent_fn,
num_actions=num_actions,
num_chance_outcomes=num_chance_outcomes,
)
# Running the search.
interior_decision_node_selection_fn = functools.partial(
action_selection.muzero_action_selection,
pb_c_base=pb_c_base,
pb_c_init=pb_c_init,
qtransform=qtransform)
interior_action_selection_fn = _make_stochastic_action_selection_fn(
interior_decision_node_selection_fn, num_actions)
root_action_selection_fn = functools.partial(
interior_action_selection_fn, depth=0)
search_tree = search.search(
params=params,
rng_key=search_rng_key,
root=root,
recurrent_fn=recurrent_fn,
root_action_selection_fn=root_action_selection_fn,
interior_action_selection_fn=interior_action_selection_fn,
num_simulations=num_simulations,
max_depth=max_depth,
invalid_actions=invalid_actions,
loop_fn=loop_fn)
# Sampling the proposed action proportionally to the visit counts.
search_tree = _mask_tree(search_tree, num_actions, 'decision')
summary = search_tree.summary()
action_weights = summary.visit_probs
action_logits = _apply_temperature(
_get_logits_from_probs(action_weights), temperature)
action = jax.random.categorical(rng_key, action_logits)
return base.PolicyOutput(
action=action, action_weights=action_weights, search_tree=search_tree)
def _mask_invalid_actions(logits, invalid_actions):
"""Returns logits with zero mass to invalid actions."""
if invalid_actions is None:
return logits
chex.assert_equal_shape([logits, invalid_actions])
logits = logits - jnp.max(logits, axis=-1, keepdims=True)
# At the end of an episode, all actions can be invalid. A softmax would then
# produce NaNs, if using -inf for the logits. We avoid the NaNs by using
# a finite `min_logit` for the invalid actions.
min_logit = jnp.finfo(logits.dtype).min
return jnp.where(invalid_actions, min_logit, logits)
def _get_logits_from_probs(probs):
tiny = jnp.finfo(probs).tiny
return jnp.log(jnp.maximum(probs, tiny))
def _add_dirichlet_noise(rng_key, probs, *, dirichlet_alpha,
dirichlet_fraction):
"""Mixes the probs with Dirichlet noise."""
chex.assert_rank(probs, 2)
chex.assert_type([dirichlet_alpha, dirichlet_fraction], float)
batch_size, num_actions = probs.shape
noise = jax.random.dirichlet(
rng_key,
alpha=jnp.full([num_actions], fill_value=dirichlet_alpha),
shape=(batch_size,))
noisy_probs = (1 - dirichlet_fraction) * probs + dirichlet_fraction * noise
return noisy_probs
def _apply_temperature(logits, temperature):
"""Returns `logits / temperature`, supporting also temperature=0."""
# The max subtraction prevents +inf after dividing by a small temperature.
logits = logits - jnp.max(logits, keepdims=True, axis=-1)
tiny = jnp.finfo(logits.dtype).tiny
return logits / jnp.maximum(tiny, temperature)
def _make_stochastic_recurrent_fn(
decision_node_fn: base.DecisionRecurrentFn,
chance_node_fn: base.ChanceRecurrentFn,
num_actions: int,
num_chance_outcomes: int,
) -> base.RecurrentFn:
"""Make Stochastic Recurrent Fn."""
def stochastic_recurrent_fn(
params: base.Params,
rng: chex.PRNGKey,
action_or_chance: base.Action, # [B]
state: base.StochasticRecurrentState
) -> Tuple[base.RecurrentFnOutput, base.StochasticRecurrentState]:
batch_size = jax.tree_util.tree_leaves(state.state_embedding)[0].shape[0]
# Internally we assume that there are `A' = A + C` "actions";
# action_or_chance can take on values in `{0, 1, ..., A' - 1}`,.
# To interpret it as an action we can leave it as is:
action = action_or_chance - 0
# To interpret it as a chance outcome we subtract num_actions:
chance_outcome = action_or_chance - num_actions
decision_output, afterstate_embedding = decision_node_fn(
params, rng, action, state.state_embedding)
# Outputs from DecisionRecurrentFunction produce chance logits with
# dim `C`, to respect our internal convention that there are `A' = A + C`
# "actions" we pad with `A` dummy logits which are ultimately ignored:
# see `_mask_tree`.
output_if_decision_node = base.RecurrentFnOutput(
prior_logits=jnp.concatenate([
jnp.full([batch_size, num_actions], fill_value=-jnp.inf),
decision_output.chance_logits], axis=-1),
value=decision_output.afterstate_value,
reward=jnp.zeros_like(decision_output.afterstate_value),
discount=jnp.ones_like(decision_output.afterstate_value))
chance_output, state_embedding = chance_node_fn(params, rng, chance_outcome,
state.afterstate_embedding)
# Outputs from ChanceRecurrentFunction produce action logits with dim `A`,
# to respect our internal convention that there are `A' = A + C` "actions"
# we pad with `C` dummy logits which are ultimately ignored: see
# `_mask_tree`.
output_if_chance_node = base.RecurrentFnOutput(
prior_logits=jnp.concatenate([
chance_output.action_logits,
jnp.full([batch_size, num_chance_outcomes], fill_value=-jnp.inf)
], axis=-1),
value=chance_output.value,
reward=chance_output.reward,
discount=chance_output.discount)
new_state = base.StochasticRecurrentState(
state_embedding=state_embedding,
afterstate_embedding=afterstate_embedding,
is_decision_node=jnp.logical_not(state.is_decision_node))
def _broadcast_where(decision_leaf, chance_leaf):
extra_dims = [1] * (len(decision_leaf.shape) - 1)
expanded_is_decision = jnp.reshape(state.is_decision_node,
[-1] + extra_dims)
return jnp.where(
# ensure state.is_decision node has appropriate shape.
expanded_is_decision,
decision_leaf, chance_leaf)
output = jax.tree_map(_broadcast_where,
output_if_decision_node,
output_if_chance_node)
return output, new_state
return stochastic_recurrent_fn
def _mask_tree(tree: search.Tree, num_actions: int, mode: str) -> search.Tree:
"""Masks out parts of the tree based upon node type.
"Actions" in our tree can either be action or chance values: A' = A + C. This
utility function masks the parts of the tree containing dimensions of shape
A' to be either A or C depending upon `mode`.
Args:
tree: The tree to be masked.
num_actions: The number of environment actions A.
mode: Either "decision" or "chance".
Returns:
An appropriately masked tree.
"""
def _take_slice(x):
if mode == 'decision':
return x[..., :num_actions]
elif mode == 'chance':
return x[..., num_actions:]
else:
raise ValueError(f'Unknown mode: {mode}.')
return tree.replace(
children_index=_take_slice(tree.children_index),
children_prior_logits=_take_slice(tree.children_prior_logits),
children_visits=_take_slice(tree.children_visits),
children_rewards=_take_slice(tree.children_rewards),
children_discounts=_take_slice(tree.children_discounts),
children_values=_take_slice(tree.children_values),
root_invalid_actions=_take_slice(tree.root_invalid_actions))
def _make_stochastic_action_selection_fn(
decision_node_selection_fn: base.InteriorActionSelectionFn,
num_actions: int,
) -> base.InteriorActionSelectionFn:
"""Make Stochastic Action Selection Fn."""
# NOTE: trees are unbatched here.
def _chance_node_selection_fn(
tree: search.Tree,
node_index: chex.Array,
) -> chex.Array:
num_chance = tree.children_visits[node_index]
chance_logits = tree.children_prior_logits[node_index]
prob_chance = jax.nn.softmax(chance_logits)
argmax_chance = jnp.argmax(prob_chance / (num_chance + 1), axis=-1)
return argmax_chance
def _action_selection_fn(key: chex.PRNGKey, tree: search.Tree,
node_index: chex.Array,
depth: chex.Array) -> chex.Array:
is_decision = tree.embeddings.is_decision_node[node_index]
chance_selection = _chance_node_selection_fn(
tree=_mask_tree(tree, num_actions, 'chance'),
node_index=node_index) + num_actions
decision_selection = decision_node_selection_fn(
key, _mask_tree(tree, num_actions, 'decision'), node_index, depth)
return jax.lax.cond(is_decision, lambda: decision_selection,
lambda: chance_selection)
return _action_selection_fn
| mctx-main | mctx/_src/policies.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core types used in mctx."""
from typing import Any, Callable, Generic, TypeVar, Tuple
import chex
from mctx._src import tree
# Parameters are arbitrary nested structures of `chex.Array`.
# A nested structure is either a single object, or a collection (list, tuple,
# dictionary, etc.) of other nested structures.
Params = chex.ArrayTree
# The model used to search is expressed by a `RecurrentFn` function that takes
# `(params, rng_key, action, embedding)` and returns a `RecurrentFnOutput` and
# the new state embedding.
@chex.dataclass(frozen=True)
class RecurrentFnOutput:
"""The output of a `RecurrentFn`.
reward: `[B]` an approximate reward from the state-action transition.
discount: `[B]` the discount between the `reward` and the `value`.
prior_logits: `[B, num_actions]` the logits produced by a policy network.
value: `[B]` an approximate value of the state after the state-action
transition.
"""
reward: chex.Array
discount: chex.Array
prior_logits: chex.Array
value: chex.Array
Action = chex.Array
RecurrentState = Any
RecurrentFn = Callable[
[Params, chex.PRNGKey, Action, RecurrentState],
Tuple[RecurrentFnOutput, RecurrentState]]
@chex.dataclass(frozen=True)
class RootFnOutput:
"""The output of a representation network.
prior_logits: `[B, num_actions]` the logits produced by a policy network.
value: `[B]` an approximate value of the current state.
embedding: `[B, ...]` the inputs to the next `recurrent_fn` call.
"""
prior_logits: chex.Array
value: chex.Array
embedding: RecurrentState
# Action selection functions specify how to pick nodes to expand in the tree.
NodeIndices = chex.Array
Depth = chex.Array
RootActionSelectionFn = Callable[
[chex.PRNGKey, tree.Tree, NodeIndices], chex.Array]
InteriorActionSelectionFn = Callable[
[chex.PRNGKey, tree.Tree, NodeIndices, Depth],
chex.Array]
QTransform = Callable[[tree.Tree, chex.Array], chex.Array]
# LoopFn has the same interface as jax.lax.fori_loop.
LoopFn = Callable[
[int, int, Callable[[Any, Any], Any], Tuple[chex.PRNGKey, tree.Tree]],
Tuple[chex.PRNGKey, tree.Tree]]
T = TypeVar("T")
@chex.dataclass(frozen=True)
class PolicyOutput(Generic[T]):
"""The output of a policy.
action: `[B]` the proposed action.
action_weights: `[B, num_actions]` the targets used to train a policy network.
The action weights sum to one. Usually, the policy network is trained by
cross-entropy:
`cross_entropy(labels=stop_gradient(action_weights), logits=prior_logits)`.
search_tree: `[B, ...]` the search tree of the finished search.
"""
action: chex.Array
action_weights: chex.Array
search_tree: tree.Tree[T]
@chex.dataclass(frozen=True)
class DecisionRecurrentFnOutput:
"""Output of the function for expanding decision nodes.
Expanding a decision node takes an action and a state embedding and produces
an afterstate, which represents the state of the environment after an action
is taken but before the environment has updated its state. Accordingly, there
is no discount factor or reward for transitioning from state `s` to afterstate
`sa`.
Attributes:
chance_logits: `[B, C]` logits of `C` chance outcomes at the afterstate.
afterstate_value: `[B]` values of the afterstates `v(sa)`.
"""
chance_logits: chex.Array # [B, C]
afterstate_value: chex.Array # [B]
@chex.dataclass(frozen=True)
class ChanceRecurrentFnOutput:
"""Output of the function for expanding chance nodes.
Expanding a chance node takes a chance outcome and an afterstate embedding
and produces a state, which captures a potentially stochastic environment
transition. When this transition occurs reward and discounts are produced as
in a normal transition.
Attributes:
action_logits: `[B, A]` logits of different actions from the state.
value: `[B]` values of the states `v(s)`.
reward: `[B]` rewards at the states.
discount: `[B]` discounts at the states.
"""
action_logits: chex.Array # [B, A]
value: chex.Array # [B]
reward: chex.Array # [B]
discount: chex.Array # [B]
@chex.dataclass(frozen=True)
class StochasticRecurrentState:
"""Wrapper that enables different treatment of decision and chance nodes.
In Stochastic MuZero tree nodes can either be decision or chance nodes, these
nodes are treated differently during expansion, search and backup, and a user
could also pass differently structured embeddings for each type of node. This
wrapper enables treating chance and decision nodes differently and supports
potential differences between chance and decision node structures.
Attributes:
state_embedding: `[B ...]` an optionally meaningful state embedding.
afterstate_embedding: `[B ...]` an optionally meaningful afterstate
embedding.
is_decision_node: `[B]` whether the node is a decision or chance node. If it
is a decision node, `afterstate_embedding` is a dummy value. If it is a
chance node, `state_embedding` is a dummy value.
"""
state_embedding: chex.ArrayTree # [B, ...]
afterstate_embedding: chex.ArrayTree # [B, ...]
is_decision_node: chex.Array # [B]
RecurrentState = chex.ArrayTree
DecisionRecurrentFn = Callable[[Params, chex.PRNGKey, Action, RecurrentState],
Tuple[DecisionRecurrentFnOutput, RecurrentState]]
ChanceRecurrentFn = Callable[[Params, chex.PRNGKey, Action, RecurrentState],
Tuple[ChanceRecurrentFnOutput, RecurrentState]]
| mctx-main | mctx/_src/base.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `policies.py`."""
import functools
from absl.testing import absltest
import jax
import jax.numpy as jnp
import mctx
from mctx._src import policies
import numpy as np
def _make_bandit_recurrent_fn(rewards, dummy_embedding=()):
"""Returns a recurrent_fn with discount=0."""
def recurrent_fn(params, rng_key, action, embedding):
del params, rng_key, embedding
reward = rewards[jnp.arange(action.shape[0]), action]
return mctx.RecurrentFnOutput(
reward=reward,
discount=jnp.zeros_like(reward),
prior_logits=jnp.zeros_like(rewards),
value=jnp.zeros_like(reward),
), dummy_embedding
return recurrent_fn
def _make_bandit_decision_and_chance_fns(rewards, num_chance_outcomes):
def decision_recurrent_fn(params, rng_key, action, embedding):
del params, rng_key
batch_size = action.shape[0]
reward = rewards[jnp.arange(batch_size), action]
dummy_chance_logits = jnp.full([batch_size, num_chance_outcomes],
-jnp.inf).at[:, 0].set(1.0)
afterstate_embedding = (action, embedding)
return mctx.DecisionRecurrentFnOutput(
chance_logits=dummy_chance_logits,
afterstate_value=jnp.zeros_like(reward)), afterstate_embedding
def chance_recurrent_fn(params, rng_key, chance_outcome,
afterstate_embedding):
del params, rng_key, chance_outcome
afterstate_action, embedding = afterstate_embedding
batch_size = afterstate_action.shape[0]
reward = rewards[jnp.arange(batch_size), afterstate_action]
return mctx.ChanceRecurrentFnOutput(
action_logits=jnp.zeros_like(rewards),
value=jnp.zeros_like(reward),
discount=jnp.zeros_like(reward),
reward=reward), embedding
return decision_recurrent_fn, chance_recurrent_fn
def _get_deepest_leaf(tree, node_index):
"""Returns `(leaf, depth)` with maximum depth and visit count.
Args:
tree: _unbatched_ MCTS tree state.
node_index: the node of the inspected subtree.
Returns:
`(leaf, depth)` of a deepest leaf. If multiple leaves have the same depth,
the leaf with the highest visit count is returned.
"""
np.testing.assert_equal(len(tree.children_index.shape), 2)
leaf = node_index
max_found_depth = 0
for action in range(tree.children_index.shape[-1]):
next_node_index = tree.children_index[node_index, action]
if next_node_index != tree.UNVISITED:
found_leaf, found_depth = _get_deepest_leaf(tree, next_node_index)
if ((1 + found_depth, tree.node_visits[found_leaf]) >
(max_found_depth, tree.node_visits[leaf])):
leaf = found_leaf
max_found_depth = 1 + found_depth
return leaf, max_found_depth
class PoliciesTest(absltest.TestCase):
def test_apply_temperature_one(self):
"""Tests temperature=1."""
logits = jnp.arange(6, dtype=jnp.float32)
new_logits = policies._apply_temperature(logits, temperature=1.0)
np.testing.assert_allclose(logits - logits.max(), new_logits)
def test_apply_temperature_two(self):
"""Tests temperature=2."""
logits = jnp.arange(6, dtype=jnp.float32)
temperature = 2.0
new_logits = policies._apply_temperature(logits, temperature)
np.testing.assert_allclose((logits - logits.max()) / temperature,
new_logits)
def test_apply_temperature_zero(self):
"""Tests temperature=0."""
logits = jnp.arange(4, dtype=jnp.float32)
new_logits = policies._apply_temperature(logits, temperature=0.0)
np.testing.assert_allclose(
jnp.array([-2.552118e+38, -1.701412e+38, -8.507059e+37, 0.0]),
new_logits,
rtol=1e-3)
def test_apply_temperature_zero_on_large_logits(self):
"""Tests temperature=0 on large logits."""
logits = jnp.array([100.0, 3.4028235e+38, -jnp.inf, -3.4028235e+38])
new_logits = policies._apply_temperature(logits, temperature=0.0)
np.testing.assert_allclose(
jnp.array([-jnp.inf, 0.0, -jnp.inf, -jnp.inf]), new_logits)
def test_mask_invalid_actions(self):
"""Tests action masking."""
logits = jnp.array([1e6, -jnp.inf, 1e6 + 1, -100.0])
invalid_actions = jnp.array([0.0, 1.0, 0.0, 1.0])
masked_logits = policies._mask_invalid_actions(
logits, invalid_actions)
valid_probs = jax.nn.softmax(jnp.array([0.0, 1.0]))
np.testing.assert_allclose(
jnp.array([valid_probs[0], 0.0, valid_probs[1], 0.0]),
jax.nn.softmax(masked_logits))
def test_mask_all_invalid_actions(self):
"""Tests a state with no valid action."""
logits = jnp.array([-jnp.inf, -jnp.inf, -jnp.inf, -jnp.inf])
invalid_actions = jnp.array([1.0, 1.0, 1.0, 1.0])
masked_logits = policies._mask_invalid_actions(
logits, invalid_actions)
np.testing.assert_allclose(
jnp.array([0.25, 0.25, 0.25, 0.25]),
jax.nn.softmax(masked_logits))
def test_muzero_policy(self):
root = mctx.RootFnOutput(
prior_logits=jnp.array([
[-1.0, 0.0, 2.0, 3.0],
]),
value=jnp.array([0.0]),
embedding=(),
)
rewards = jnp.zeros_like(root.prior_logits)
invalid_actions = jnp.array([
[0.0, 0.0, 0.0, 1.0],
])
policy_output = mctx.muzero_policy(
params=(),
rng_key=jax.random.PRNGKey(0),
root=root,
recurrent_fn=_make_bandit_recurrent_fn(rewards),
num_simulations=1,
invalid_actions=invalid_actions,
dirichlet_fraction=0.0)
expected_action = jnp.array([2], dtype=jnp.int32)
np.testing.assert_array_equal(expected_action, policy_output.action)
expected_action_weights = jnp.array([
[0.0, 0.0, 1.0, 0.0],
])
np.testing.assert_allclose(expected_action_weights,
policy_output.action_weights)
def test_gumbel_muzero_policy(self):
root_value = jnp.array([-5.0])
root = mctx.RootFnOutput(
prior_logits=jnp.array([
[0.0, -1.0, 2.0, 3.0],
]),
value=root_value,
embedding=(),
)
rewards = jnp.array([
[20.0, 3.0, -1.0, 10.0],
])
invalid_actions = jnp.array([
[1.0, 0.0, 0.0, 1.0],
])
value_scale = 0.05
maxvisit_init = 60
num_simulations = 17
max_depth = 3
qtransform = functools.partial(
mctx.qtransform_completed_by_mix_value,
value_scale=value_scale,
maxvisit_init=maxvisit_init,
rescale_values=True)
policy_output = mctx.gumbel_muzero_policy(
params=(),
rng_key=jax.random.PRNGKey(0),
root=root,
recurrent_fn=_make_bandit_recurrent_fn(rewards),
num_simulations=num_simulations,
invalid_actions=invalid_actions,
max_depth=max_depth,
qtransform=qtransform,
gumbel_scale=1.0)
# Testing the action.
expected_action = jnp.array([1], dtype=jnp.int32)
np.testing.assert_array_equal(expected_action, policy_output.action)
# Testing the action_weights.
probs = jax.nn.softmax(jnp.where(
invalid_actions, -jnp.inf, root.prior_logits))
mix_value = 1.0 / (num_simulations + 1) * (root_value + num_simulations * (
probs[:, 1] * rewards[:, 1] + probs[:, 2] * rewards[:, 2]))
completed_qvalues = jnp.array([
[mix_value[0], rewards[0, 1], rewards[0, 2], mix_value[0]],
])
max_value = jnp.max(completed_qvalues, axis=-1, keepdims=True)
min_value = jnp.min(completed_qvalues, axis=-1, keepdims=True)
total_value_scale = (maxvisit_init + np.ceil(num_simulations / 2)
) * value_scale
rescaled_qvalues = total_value_scale * (completed_qvalues - min_value) / (
max_value - min_value)
expected_action_weights = jax.nn.softmax(
jnp.where(invalid_actions,
-jnp.inf,
root.prior_logits + rescaled_qvalues))
np.testing.assert_allclose(expected_action_weights,
policy_output.action_weights,
atol=1e-6)
# Testing the visit_counts.
summary = policy_output.search_tree.summary()
expected_visit_counts = jnp.array(
[[0.0, np.ceil(num_simulations / 2), num_simulations // 2, 0.0]])
np.testing.assert_array_equal(expected_visit_counts, summary.visit_counts)
# Testing max_depth.
leaf, max_found_depth = _get_deepest_leaf(
jax.tree_util.tree_map(lambda x: x[0], policy_output.search_tree),
policy_output.search_tree.ROOT_INDEX)
self.assertEqual(max_depth, max_found_depth)
self.assertEqual(6, policy_output.search_tree.node_visits[0, leaf])
def test_gumbel_muzero_policy_without_invalid_actions(self):
root_value = jnp.array([-5.0])
root = mctx.RootFnOutput(
prior_logits=jnp.array([
[0.0, -1.0, 2.0, 3.0],
]),
value=root_value,
embedding=(),
)
rewards = jnp.array([
[20.0, 3.0, -1.0, 10.0],
])
value_scale = 0.05
maxvisit_init = 60
num_simulations = 17
max_depth = 3
qtransform = functools.partial(
mctx.qtransform_completed_by_mix_value,
value_scale=value_scale,
maxvisit_init=maxvisit_init,
rescale_values=True)
policy_output = mctx.gumbel_muzero_policy(
params=(),
rng_key=jax.random.PRNGKey(0),
root=root,
recurrent_fn=_make_bandit_recurrent_fn(rewards),
num_simulations=num_simulations,
invalid_actions=None,
max_depth=max_depth,
qtransform=qtransform,
gumbel_scale=1.0)
# Testing the action.
expected_action = jnp.array([3], dtype=jnp.int32)
np.testing.assert_array_equal(expected_action, policy_output.action)
# Testing the action_weights.
summary = policy_output.search_tree.summary()
completed_qvalues = rewards
max_value = jnp.max(completed_qvalues, axis=-1, keepdims=True)
min_value = jnp.min(completed_qvalues, axis=-1, keepdims=True)
total_value_scale = (maxvisit_init + summary.visit_counts.max()
) * value_scale
rescaled_qvalues = total_value_scale * (completed_qvalues - min_value) / (
max_value - min_value)
expected_action_weights = jax.nn.softmax(
root.prior_logits + rescaled_qvalues)
np.testing.assert_allclose(expected_action_weights,
policy_output.action_weights,
atol=1e-6)
# Testing the visit_counts.
expected_visit_counts = jnp.array(
[[6, 2, 2, 7]])
np.testing.assert_array_equal(expected_visit_counts, summary.visit_counts)
def test_stochastic_muzero_policy(self):
"""Tests that SMZ is equivalent to MZ with a dummy chance function."""
root = mctx.RootFnOutput(
prior_logits=jnp.array([
[-1.0, 0.0, 2.0, 3.0],
[0.0, 2.0, 5.0, -4.0],
]),
value=jnp.array([1.0, 0.0]),
embedding=jnp.zeros([2, 4])
)
rewards = jnp.zeros_like(root.prior_logits)
invalid_actions = jnp.array([
[0.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 1.0, 0.0],
])
num_simulations = 10
policy_output = mctx.muzero_policy(
params=(),
rng_key=jax.random.PRNGKey(0),
root=root,
recurrent_fn=_make_bandit_recurrent_fn(
rewards,
dummy_embedding=jnp.zeros_like(root.embedding)),
num_simulations=num_simulations,
invalid_actions=invalid_actions,
dirichlet_fraction=0.0)
num_chance_outcomes = 5
decision_rec_fn, chance_rec_fn = _make_bandit_decision_and_chance_fns(
rewards, num_chance_outcomes)
stochastic_policy_output = mctx.stochastic_muzero_policy(
params=(),
rng_key=jax.random.PRNGKey(0),
root=root,
decision_recurrent_fn=decision_rec_fn,
chance_recurrent_fn=chance_rec_fn,
num_simulations=2 * num_simulations,
num_actions=4,
num_chance_outcomes=num_chance_outcomes,
invalid_actions=invalid_actions,
dirichlet_fraction=0.0)
np.testing.assert_array_equal(stochastic_policy_output.action,
policy_output.action)
np.testing.assert_allclose(stochastic_policy_output.action_weights,
policy_output.action_weights)
if __name__ == "__main__":
absltest.main()
| mctx-main | mctx/_src/tests/policies_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `qtransforms.py`."""
from absl.testing import absltest
import jax
import jax.numpy as jnp
from mctx._src import qtransforms
import numpy as np
class QtransformsTest(absltest.TestCase):
def test_mix_value(self):
"""Tests the output of _compute_mixed_value()."""
raw_value = jnp.array(-0.8)
prior_logits = jnp.array([-jnp.inf, -1.0, 2.0, -jnp.inf])
probs = jax.nn.softmax(prior_logits)
visit_counts = jnp.array([0, 4.0, 4.0, 0])
qvalues = 10.0 / 54 * jnp.array([20.0, 3.0, -1.0, 10.0])
mix_value = qtransforms._compute_mixed_value(
raw_value, qvalues, visit_counts, probs)
num_simulations = jnp.sum(visit_counts)
expected_mix_value = 1.0 / (num_simulations + 1) * (
raw_value + num_simulations *
(probs[1] * qvalues[1] + probs[2] * qvalues[2]))
np.testing.assert_allclose(expected_mix_value, mix_value)
def test_mix_value_with_zero_visits(self):
"""Tests that zero visit counts do not divide by zero."""
raw_value = jnp.array(-0.8)
prior_logits = jnp.array([-jnp.inf, -1.0, 2.0, -jnp.inf])
probs = jax.nn.softmax(prior_logits)
visit_counts = jnp.array([0, 0, 0, 0])
qvalues = jnp.zeros_like(probs)
with jax.debug_nans():
mix_value = qtransforms._compute_mixed_value(
raw_value, qvalues, visit_counts, probs)
np.testing.assert_allclose(raw_value, mix_value)
if __name__ == "__main__":
absltest.main()
| mctx-main | mctx/_src/tests/qtransforms_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A unit test comparing the search tree to an expected search tree."""
# pylint: disable=use-dict-literal
import functools
import json
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import chex
import jax
import jax.numpy as jnp
import mctx
import numpy as np
def _prepare_root(batch_size, num_actions):
"""Returns a root consistent with the stored expected trees."""
rng_key = jax.random.PRNGKey(0)
# Using a different rng_key inside each batch element.
rng_keys = [rng_key]
for i in range(1, batch_size):
rng_keys.append(jax.random.fold_in(rng_key, i))
embedding = jnp.stack(rng_keys)
output = jax.vmap(
functools.partial(_produce_prediction_output, num_actions=num_actions))(
embedding)
return mctx.RootFnOutput(
prior_logits=output["policy_logits"],
value=output["value"],
embedding=embedding,
)
def _produce_prediction_output(rng_key, num_actions):
"""Producing the model output as in the stored expected trees."""
policy_rng, value_rng, reward_rng = jax.random.split(rng_key, 3)
policy_rng, value_rng, reward_rng = jax.random.split(rng_key, 3)
del rng_key
# Producing value from [-1, +1).
value = jax.random.uniform(value_rng, shape=(), minval=-1.0, maxval=1.0)
# Producing reward from [-1, +1).
reward = jax.random.uniform(reward_rng, shape=(), minval=-1.0, maxval=1.0)
return dict(
policy_logits=jax.random.normal(policy_rng, shape=[num_actions]),
value=value,
reward=reward,
)
def _prepare_recurrent_fn(num_actions, *, discount, zero_reward):
"""Returns a dynamics function consistent with the expected trees."""
def recurrent_fn(params, rng_key, action, embedding):
del params, rng_key
# The embeddings serve as rng_keys.
embedding = jax.vmap(
functools.partial(_fold_action_in, num_actions=num_actions))(embedding,
action)
output = jax.vmap(
functools.partial(_produce_prediction_output, num_actions=num_actions))(
embedding)
reward = output["reward"]
if zero_reward:
reward = jnp.zeros_like(reward)
return mctx.RecurrentFnOutput(
reward=reward,
discount=jnp.full_like(reward, discount),
prior_logits=output["policy_logits"],
value=output["value"],
), embedding
return recurrent_fn
def _fold_action_in(rng_key, action, num_actions):
"""Returns a new rng key, selected by the given action."""
chex.assert_shape(action, ())
chex.assert_type(action, jnp.int32)
sub_rngs = jax.random.split(rng_key, num_actions)
return sub_rngs[action]
def tree_to_pytree(tree: mctx.Tree, batch_i: int = 0):
"""Converts the MCTS tree to nested dicts."""
nodes = {}
nodes[0] = _create_pynode(
tree, batch_i, 0, prior=1.0, action=None, reward=None)
children_prior_probs = jax.nn.softmax(tree.children_prior_logits, axis=-1)
for node_i in range(tree.num_simulations + 1):
for a_i in range(tree.num_actions):
prior = children_prior_probs[batch_i, node_i, a_i]
# Index of children, or -1 if not expanded
child_i = int(tree.children_index[batch_i, node_i, a_i])
if child_i >= 0:
reward = tree.children_rewards[batch_i, node_i, a_i]
child = _create_pynode(
tree, batch_i, child_i, prior=prior, action=a_i, reward=reward)
nodes[child_i] = child
else:
child = _create_bare_pynode(prior=prior, action=a_i)
# pylint: disable=line-too-long
nodes[node_i]["child_stats"].append(child) # pytype: disable=attribute-error
# pylint: enable=line-too-long
return nodes[0]
def _create_pynode(tree, batch_i, node_i, prior, action, reward):
"""Returns a dict with extracted search statistics."""
node = dict(
prior=_round_float(prior),
visit=int(tree.node_visits[batch_i, node_i]),
value_view=_round_float(tree.node_values[batch_i, node_i]),
raw_value_view=_round_float(tree.raw_values[batch_i, node_i]),
child_stats=[],
evaluation_index=node_i,
)
if action is not None:
node["action"] = action
if reward is not None:
node["reward"] = _round_float(reward)
return node
def _create_bare_pynode(prior, action):
return dict(
prior=_round_float(prior),
child_stats=[],
action=action,
)
def _round_float(value, ndigits=10):
return round(float(value), ndigits)
class TreeTest(parameterized.TestCase):
# Make sure to adjust the `shard_count` parameter in the build file to match
# the number of parameter configurations passed to test_tree.
# pylint: disable=line-too-long
@parameterized.named_parameters(
("muzero_norescale",
"../mctx/_src/tests/test_data/muzero_tree.json"),
("muzero_qtransform",
"../mctx/_src/tests/test_data/muzero_qtransform_tree.json"),
("gumbel_muzero_norescale",
"../mctx/_src/tests/test_data/gumbel_muzero_tree.json"),
("gumbel_muzero_reward",
"../mctx/_src/tests/test_data/gumbel_muzero_reward_tree.json"))
# pylint: enable=line-too-long
def test_tree(self, tree_data_path):
with open(tree_data_path, "rb") as fd:
tree = json.load(fd)
reproduced = self._reproduce_tree(tree)
chex.assert_trees_all_close(tree["tree"], reproduced, atol=1e-3)
def _reproduce_tree(self, tree):
"""Reproduces the given JSON tree by running a search."""
policy_fn = dict(
gumbel_muzero=mctx.gumbel_muzero_policy,
muzero=mctx.muzero_policy,
)[tree["algorithm"]]
env_config = tree["env_config"]
root = tree["tree"]
num_actions = len(root["child_stats"])
num_simulations = root["visit"] - 1
qtransform = functools.partial(
getattr(mctx, tree["algorithm_config"].pop("qtransform")),
**tree["algorithm_config"].pop("qtransform_kwargs", {}))
batch_size = 3
# To test the independence of the batch computation, we use different
# invalid actions for the other elements of the batch. The different batch
# elements will then have different search tree depths.
invalid_actions = np.zeros([batch_size, num_actions])
invalid_actions[1, 1:] = 1
invalid_actions[2, 2:] = 1
def run_policy():
return policy_fn(
params=(),
rng_key=jax.random.PRNGKey(1),
root=_prepare_root(batch_size=batch_size, num_actions=num_actions),
recurrent_fn=_prepare_recurrent_fn(num_actions, **env_config),
num_simulations=num_simulations,
qtransform=qtransform,
invalid_actions=invalid_actions,
**tree["algorithm_config"])
policy_output = jax.jit(run_policy)()
logging.info("Done search.")
return tree_to_pytree(policy_output.search_tree)
if __name__ == "__main__":
jax.config.update("jax_numpy_rank_promotion", "raise")
absltest.main()
| mctx-main | mctx/_src/tests/tree_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Mctx."""
from absl.testing import absltest
import mctx
class MctxTest(absltest.TestCase):
"""Test mctx can be imported correctly."""
def test_import(self):
self.assertTrue(hasattr(mctx, "gumbel_muzero_policy"))
self.assertTrue(hasattr(mctx, "muzero_policy"))
self.assertTrue(hasattr(mctx, "qtransform_by_min_max"))
self.assertTrue(hasattr(mctx, "qtransform_by_parent_and_siblings"))
self.assertTrue(hasattr(mctx, "qtransform_completed_by_mix_value"))
self.assertTrue(hasattr(mctx, "PolicyOutput"))
self.assertTrue(hasattr(mctx, "RootFnOutput"))
self.assertTrue(hasattr(mctx, "RecurrentFnOutput"))
if __name__ == "__main__":
absltest.main()
| mctx-main | mctx/_src/tests/mctx_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `seq_halving.py`."""
from absl.testing import absltest
from mctx._src import seq_halving
class SeqHalvingTest(absltest.TestCase):
def _check_visits(self, expected_results, max_num_considered_actions,
num_simulations):
"""Compares the expected results to the returned considered visits."""
self.assertLen(expected_results, num_simulations)
results = seq_halving.get_sequence_of_considered_visits(
max_num_considered_actions, num_simulations)
self.assertEqual(tuple(expected_results), results)
def test_considered_min_sims(self):
# Using exactly `num_simulations = max_num_considered_actions *
# log2(max_num_considered_actions)`.
num_sims = 24
max_num_considered = 8
expected_results = [
0, 0, 0, 0, 0, 0, 0, 0, # Considering 8 actions.
1, 1, 1, 1, # Considering 4 actions.
2, 2, 2, 2, # Considering 4 actions, round 2.
3, 3, 4, 4, 5, 5, 6, 6, # Considering 2 actions.
] # pyformat: disable
self._check_visits(expected_results, max_num_considered, num_sims)
def test_considered_extra_sims(self):
# Using more simulations than `max_num_considered_actions *
# log2(max_num_considered_actions)`.
num_sims = 47
max_num_considered = 8
expected_results = [
0, 0, 0, 0, 0, 0, 0, 0, # Considering 8 actions.
1, 1, 1, 1, # Considering 4 actions.
2, 2, 2, 2, # Considering 4 actions, round 2.
3, 3, 3, 3, # Considering 4 actions, round 3.
4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10,
11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17,
] # pyformat: disable
self._check_visits(expected_results, max_num_considered, num_sims)
def test_considered_less_sims(self):
# Using a very small number of simulations.
num_sims = 2
max_num_considered = 8
expected_results = [0, 0]
self._check_visits(expected_results, max_num_considered, num_sims)
def test_considered_less_sims2(self):
# Using `num_simulations < max_num_considered_actions *
# log2(max_num_considered_actions)`.
num_sims = 13
max_num_considered = 8
expected_results = [
0, 0, 0, 0, 0, 0, 0, 0, # Considering 8 actions.
1, 1, 1, 1, # Considering 4 actions.
2,
] # pyformat: disable
self._check_visits(expected_results, max_num_considered, num_sims)
def test_considered_not_power_of_2(self):
# Using max_num_considered_actions that is not a power of 2.
num_sims = 24
max_num_considered = 7
expected_results = [
0, 0, 0, 0, 0, 0, 0, # Considering 7 actions.
1, 1, 1, 2, 2, 2, # Considering 3 actions.
3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
] # pyformat: disable
self._check_visits(expected_results, max_num_considered, num_sims)
def test_considered_action0(self):
num_sims = 16
max_num_considered = 0
expected_results = range(num_sims)
self._check_visits(expected_results, max_num_considered, num_sims)
def test_considered_action1(self):
num_sims = 16
max_num_considered = 1
expected_results = range(num_sims)
self._check_visits(expected_results, max_num_considered, num_sims)
if __name__ == "__main__":
absltest.main()
| mctx-main | mctx/_src/tests/seq_halving_test.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A demo of Graphviz visualization of a search tree."""
from typing import Optional, Sequence
from absl import app
from absl import flags
import chex
import jax
import jax.numpy as jnp
import mctx
import pygraphviz
FLAGS = flags.FLAGS
flags.DEFINE_integer("seed", 42, "Random seed.")
flags.DEFINE_integer("num_simulations", 32, "Number of simulations.")
flags.DEFINE_integer("max_num_considered_actions", 16,
"The maximum number of actions expanded at the root.")
flags.DEFINE_integer("max_depth", None, "The maximum search depth.")
flags.DEFINE_string("output_file", "/tmp/search_tree.png",
"The output file for the visualization.")
def convert_tree_to_graph(
tree: mctx.Tree,
action_labels: Optional[Sequence[str]] = None,
batch_index: int = 0
) -> pygraphviz.AGraph:
"""Converts a search tree into a Graphviz graph.
Args:
tree: A `Tree` containing a batch of search data.
action_labels: Optional labels for edges, defaults to the action index.
batch_index: Index of the batch element to plot.
Returns:
A Graphviz graph representation of `tree`.
"""
chex.assert_rank(tree.node_values, 2)
batch_size = tree.node_values.shape[0]
if action_labels is None:
action_labels = range(tree.num_actions)
elif len(action_labels) != tree.num_actions:
raise ValueError(
f"action_labels {action_labels} has the wrong number of actions "
f"({len(action_labels)}). "
f"Expecting {tree.num_actions}.")
def node_to_str(node_i, reward=0, discount=1):
return (f"{node_i}\n"
f"Reward: {reward:.2f}\n"
f"Discount: {discount:.2f}\n"
f"Value: {tree.node_values[batch_index, node_i]:.2f}\n"
f"Visits: {tree.node_visits[batch_index, node_i]}\n")
def edge_to_str(node_i, a_i):
node_index = jnp.full([batch_size], node_i)
probs = jax.nn.softmax(tree.children_prior_logits[batch_index, node_i])
return (f"{action_labels[a_i]}\n"
f"Q: {tree.qvalues(node_index)[batch_index, a_i]:.2f}\n" # pytype: disable=unsupported-operands # always-use-return-annotations
f"p: {probs[a_i]:.2f}\n")
graph = pygraphviz.AGraph(directed=True)
# Add root
graph.add_node(0, label=node_to_str(node_i=0), color="green")
# Add all other nodes and connect them up.
for node_i in range(tree.num_simulations):
for a_i in range(tree.num_actions):
# Index of children, or -1 if not expanded
children_i = tree.children_index[batch_index, node_i, a_i]
if children_i >= 0:
graph.add_node(
children_i,
label=node_to_str(
node_i=children_i,
reward=tree.children_rewards[batch_index, node_i, a_i],
discount=tree.children_discounts[batch_index, node_i, a_i]),
color="red")
graph.add_edge(node_i, children_i, label=edge_to_str(node_i, a_i))
return graph
def _run_demo(rng_key: chex.PRNGKey):
"""Runs a search algorithm on a toy environment."""
# We will define a deterministic toy environment.
# The deterministic `transition_matrix` has shape `[num_states, num_actions]`.
# The `transition_matrix[s, a]` holds the next state.
transition_matrix = jnp.array([
[1, 2, 3, 4],
[0, 5, 0, 0],
[0, 0, 0, 6],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
], dtype=jnp.int32)
# The `rewards` have shape `[num_states, num_actions]`. The `rewards[s, a]`
# holds the reward for that (s, a) pair.
rewards = jnp.array([
[1, -1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[10, 0, 20, 0],
], dtype=jnp.float32)
num_states = rewards.shape[0]
# The discount for each (s, a) pair.
discounts = jnp.where(transition_matrix > 0, 1.0, 0.0)
# Using optimistic initial values to encourage exploration.
values = jnp.full([num_states], 15.0)
# The prior policies for each state.
all_prior_logits = jnp.zeros_like(rewards)
root, recurrent_fn = _make_batched_env_model(
# Using batch_size=2 to test the batched search.
batch_size=2,
transition_matrix=transition_matrix,
rewards=rewards,
discounts=discounts,
values=values,
prior_logits=all_prior_logits)
# Running the search.
policy_output = mctx.gumbel_muzero_policy(
params=(),
rng_key=rng_key,
root=root,
recurrent_fn=recurrent_fn,
num_simulations=FLAGS.num_simulations,
max_depth=FLAGS.max_depth,
max_num_considered_actions=FLAGS.max_num_considered_actions,
)
return policy_output
def _make_batched_env_model(
batch_size: int,
*,
transition_matrix: chex.Array,
rewards: chex.Array,
discounts: chex.Array,
values: chex.Array,
prior_logits: chex.Array):
"""Returns a batched `(root, recurrent_fn)`."""
chex.assert_equal_shape([transition_matrix, rewards, discounts,
prior_logits])
num_states, num_actions = transition_matrix.shape
chex.assert_shape(values, [num_states])
# We will start the search at state zero.
root_state = 0
root = mctx.RootFnOutput(
prior_logits=jnp.full([batch_size, num_actions],
prior_logits[root_state]),
value=jnp.full([batch_size], values[root_state]),
# The embedding will hold the state index.
embedding=jnp.zeros([batch_size], dtype=jnp.int32),
)
def recurrent_fn(params, rng_key, action, embedding):
del params, rng_key
chex.assert_shape(action, [batch_size])
chex.assert_shape(embedding, [batch_size])
recurrent_fn_output = mctx.RecurrentFnOutput(
reward=rewards[embedding, action],
discount=discounts[embedding, action],
prior_logits=prior_logits[embedding],
value=values[embedding])
next_embedding = transition_matrix[embedding, action]
return recurrent_fn_output, next_embedding
return root, recurrent_fn
def main(_):
rng_key = jax.random.PRNGKey(FLAGS.seed)
jitted_run_demo = jax.jit(_run_demo)
print("Starting search.")
policy_output = jitted_run_demo(rng_key)
batch_index = 0
selected_action = policy_output.action[batch_index]
q_value = policy_output.search_tree.summary().qvalues[
batch_index, selected_action]
print("Selected action:", selected_action)
# To estimate the value of the root state, use the Q-value of the selected
# action. The Q-value is not affected by the exploration at the root node.
print("Selected action Q-value:", q_value)
graph = convert_tree_to_graph(policy_output.search_tree)
print("Saving tree diagram to:", FLAGS.output_file)
graph.draw(FLAGS.output_file, prog="dot")
if __name__ == "__main__":
app.run(main)
| mctx-main | examples/visualization_demo.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A demonstration of the policy improvement by planning with Gumbel."""
import functools
from typing import Tuple
from absl import app
from absl import flags
import chex
import jax
import jax.numpy as jnp
import mctx
FLAGS = flags.FLAGS
flags.DEFINE_integer("seed", 42, "Random seed.")
flags.DEFINE_integer("batch_size", 256, "Batch size.")
flags.DEFINE_integer("num_actions", 82, "Number of actions.")
flags.DEFINE_integer("num_simulations", 4, "Number of simulations.")
flags.DEFINE_integer("max_num_considered_actions", 16,
"The maximum number of actions expanded at the root.")
flags.DEFINE_integer("num_runs", 1, "Number of runs on random data.")
@chex.dataclass(frozen=True)
class DemoOutput:
prior_policy_value: chex.Array
prior_policy_action_value: chex.Array
selected_action_value: chex.Array
action_weights_policy_value: chex.Array
def _run_demo(rng_key: chex.PRNGKey) -> Tuple[chex.PRNGKey, DemoOutput]:
"""Runs a search algorithm on random data."""
batch_size = FLAGS.batch_size
rng_key, logits_rng, q_rng, search_rng = jax.random.split(rng_key, 4)
# We will demonstrate the algorithm on random prior_logits.
# Normally, the prior_logits would be produced by a policy network.
prior_logits = jax.random.normal(
logits_rng, shape=[batch_size, FLAGS.num_actions])
# Defining a bandit with random Q-values. Only the Q-values of the visited
# actions will be revealed to the search algorithm.
qvalues = jax.random.uniform(q_rng, shape=prior_logits.shape)
# If we know the value under the prior policy, we can use the value to
# complete the missing Q-values. The completed Q-values will produce an
# improved policy in `policy_output.action_weights`.
raw_value = jnp.sum(jax.nn.softmax(prior_logits) * qvalues, axis=-1)
use_mixed_value = False
# The root output would be the output of MuZero representation network.
root = mctx.RootFnOutput(
prior_logits=prior_logits,
value=raw_value,
# The embedding is used only to implement the MuZero model.
embedding=jnp.zeros([batch_size]),
)
# The recurrent_fn would be provided by MuZero dynamics network.
recurrent_fn = _make_bandit_recurrent_fn(qvalues)
# Running the search.
policy_output = mctx.gumbel_muzero_policy(
params=(),
rng_key=search_rng,
root=root,
recurrent_fn=recurrent_fn,
num_simulations=FLAGS.num_simulations,
max_num_considered_actions=FLAGS.max_num_considered_actions,
qtransform=functools.partial(
mctx.qtransform_completed_by_mix_value,
use_mixed_value=use_mixed_value),
)
# Collecting the Q-value of the selected action.
selected_action_value = qvalues[jnp.arange(batch_size), policy_output.action]
# We will compare the selected action to the action selected by the
# prior policy, while using the same Gumbel random numbers.
gumbel = policy_output.search_tree.extra_data.root_gumbel
prior_policy_action = jnp.argmax(gumbel + prior_logits, axis=-1)
prior_policy_action_value = qvalues[jnp.arange(batch_size),
prior_policy_action]
# Computing the policy value under the new action_weights.
action_weights_policy_value = jnp.sum(
policy_output.action_weights * qvalues, axis=-1)
output = DemoOutput(
prior_policy_value=raw_value,
prior_policy_action_value=prior_policy_action_value,
selected_action_value=selected_action_value,
action_weights_policy_value=action_weights_policy_value,
)
return rng_key, output
def _make_bandit_recurrent_fn(qvalues):
"""Returns a recurrent_fn for a determistic bandit."""
def recurrent_fn(params, rng_key, action, embedding):
del params, rng_key
# For the bandit, the reward will be non-zero only at the root.
reward = jnp.where(embedding == 0,
qvalues[jnp.arange(action.shape[0]), action],
0.0)
# On a single-player environment, use discount from [0, 1].
# On a zero-sum self-play environment, use discount=-1.
discount = jnp.ones_like(reward)
recurrent_fn_output = mctx.RecurrentFnOutput(
reward=reward,
discount=discount,
prior_logits=jnp.zeros_like(qvalues),
value=jnp.zeros_like(reward))
next_embedding = embedding + 1
return recurrent_fn_output, next_embedding
return recurrent_fn
def main(_):
rng_key = jax.random.PRNGKey(FLAGS.seed)
jitted_run_demo = jax.jit(_run_demo)
for _ in range(FLAGS.num_runs):
rng_key, output = jitted_run_demo(rng_key)
# Printing the obtained increase of the policy value.
# The obtained increase should be non-negative.
action_value_improvement = (
output.selected_action_value - output.prior_policy_action_value)
weights_value_improvement = (
output.action_weights_policy_value - output.prior_policy_value)
print("action value improvement: %.3f (min=%.3f)" %
(action_value_improvement.mean(), action_value_improvement.min()))
print("action_weights value improvement: %.3f (min=%.3f)" %
(weights_value_improvement.mean(), weights_value_improvement.min()))
if __name__ == "__main__":
app.run(main)
| mctx-main | examples/policy_improvement_demo.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for length generalization tasks."""
import abc
from typing import TypedDict
import chex
import jax.nn as jnn
import jax.numpy as jnp
Batch = TypedDict('Batch', {'input': chex.Array, 'output': chex.Array})
class GeneralizationTask(abc.ABC):
"""A task for the generalization project.
Exposes a sample_batch method, and some details about input/output sizes,
losses and accuracies.
"""
@abc.abstractmethod
def sample_batch(
self, rng: chex.PRNGKey, batch_size: int, length: int
) -> Batch:
"""Returns a batch of inputs/outputs."""
def pointwise_loss_fn(
self, output: chex.Array, target: chex.Array
) -> chex.Array:
"""Returns the pointwise loss between an output and a target."""
return -target * jnn.log_softmax(output)
def accuracy_fn(self, output: chex.Array, target: chex.Array) -> chex.Array:
"""Returns the accuracy between an output and a target."""
return (jnp.argmax(output, axis=-1) == jnp.argmax(target, axis=-1)).astype(
jnp.float32
)
def accuracy_mask(self, target: chex.Array) -> chex.Array:
"""Returns a mask to compute the accuracies, to remove the superfluous ones."""
# Target is a shape of shape (B, T, C) where C is the number of classes.
# We want a mask per input (B, T), so we take this shape.
return jnp.ones(target.shape[:-1])
@property
@abc.abstractmethod
def input_size(self) -> int:
"""Returns the size of the input of the models trained on this task."""
@property
@abc.abstractmethod
def output_size(self) -> int:
"""Returns the size of the output of the models trained on this task."""
def output_length(self, input_length: int) -> int:
"""Returns the length of the output, given an input length."""
del input_length
return 1
| randomized_positional_encodings-main | tasks/task.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Add two binary numbers."""
import random
from typing import Sequence
import chex
import jax.nn as jnn
import jax.numpy as jnp
import numpy as np
from randomized_positional_encodings.tasks import task
def numbers_to_variable_length_binary(
numbers: Sequence[int],
lengths: Sequence[int],
little_endian: bool = True,
) -> list[list[int]]:
"""Returns the binary notation of a certain length for a sequence of numbers.
Args:
numbers: The numbers to be converted to binary.
lengths: The lengths of the binary representations (every number uses its
own length). This argument has no effect if the binary representation is
longer than the specified length.
little_endian: Whether to use little- or big-endian notation.
"""
binary_strings = [f'{num:b}'.zfill(len) for num, len in zip(numbers, lengths)]
if little_endian:
binary_strings = [bin[::-1] for bin in binary_strings]
return [list(map(int, bin)) for bin in binary_strings]
def numbers_to_fixed_length_binary(
numbers: Sequence[int],
length: int,
little_endian: bool = True,
) -> list[list[int]]:
"""Returns the binary notation of a certain length for a sequence of numbers.
Args:
numbers: The numbers to be converted to binary.
length: The length of the binary representations (all numbers use the same
length). This argument has no effect if the binary representation is
longer than the specified length.
little_endian: Whether to use little- or big-endian notation.
"""
return numbers_to_variable_length_binary(
numbers=numbers,
lengths=[length] * len(numbers),
little_endian=little_endian,
)
def expression_from_numbers(
numbers_n: Sequence[list[int]],
numbers_m: Sequence[list[int]],
) -> list[list[int]]:
"""Returns an expression with a placeholder value to denote the operation."""
return [n + [2] + m for n, m in zip(numbers_n, numbers_m)]
class BinaryAddition(task.GeneralizationTask):
"""A task with the goal of summing two numbers in binary (little-endian).
The input is a string of the form `first_number+second_number` in
(little-endian) binary notation (e.g., `01001+011`). The goal of the agent is
to output the result, also in (little-endian) binary form (i.e., in the
example `18 + 6 = 24 = 00011`). The output is padded with 0s to match the
input length, and the end of the sum is denoted with a termination token
(i.e., the output has values in `{0, 1, 2}`).
Examples:
001 + 01101 = 010112000 (4 + 22 = 26)
1001 + 000001 = 10010120000 (9 + 32 = 41)
"""
def _sample_expressions_and_results(
self,
batch_size: int,
length: int,
) -> tuple[Sequence[list[int]], Sequence[list[int]]]:
"""Samples pairs of numbers and sums them in (little-endian) binary.
We use Python's bignums, which can represent arbitrary-precision integers to
perform addition of two potentially very large values (roughly of the size
`2 ** (length // 2)`).
Args:
batch_size: The number of expressions and results to sample.
length: The length of the input expression containing the two numbers and
the separation token.
Returns:
The expression and the sum of the two numbers. The expression has the
format: `[first_number, 2, second_number]`, where the numbers are in
(little-endian) binary notation. The sum is also in (little-endian) binary
notation, without leading (i.e., ending) zeros.
"""
# If `length <= 2`, we just sample a binary value and return it (without
# leading zeros in little-endian notation).
if length <= 2:
# Since `length <= 2`, we can use `np.random`` without overflow errors.
numbers = np.random.randint(0, 2**length - 1, size=(batch_size))
expressions = numbers_to_fixed_length_binary(numbers, length)
results = numbers_to_fixed_length_binary(numbers, 0)
return expressions, results
# We only use `length - 1` tokens for the two values to account for the `+`.
length_n = np.random.randint(1, length - 1, size=(batch_size,))
length_m = length - 1 - length_n
integer_n = [random.randint(1, 2 ** int(len_n) - 1) for len_n in length_n]
integer_m = [random.randint(1, 2 ** int(len_m) - 1) for len_m in length_m]
binary_n = numbers_to_variable_length_binary(integer_n, length_n)
binary_m = numbers_to_variable_length_binary(integer_m, length_m)
expressions = expression_from_numbers(binary_n, binary_m)
integer_sum = list(map(sum, zip(integer_n, integer_m)))
results = numbers_to_fixed_length_binary(integer_sum, length=0)
return expressions, results
def sample_batch(
self,
rng: chex.PRNGKey,
batch_size: int,
length: int,
) -> task.Batch:
"""Returns a batch of binary additions and their results."""
del rng
expressions, results = self._sample_expressions_and_results(
batch_size=batch_size, length=length
)
# Append the termination token to the result and pad the result with zeros
# to match the output length (accounting for the termination token).
results = [res + [2] + [0] * (length - len(res)) for res in results]
expressions = jnp.array(expressions, dtype=jnp.int32)
results = jnp.array(results, dtype=jnp.int32)
return {
'input': jnn.one_hot(expressions, self.input_size),
'output': jnn.one_hot(results, self.output_size),
}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return 3
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return 3
def output_length(self, input_length: int) -> int:
return input_length + 1
def accuracy_mask(self, target: chex.Array) -> chex.Array:
"""Computes a mask that ignores everything after the termination token.
Args:
target: Target tokens of shape `(batch_size, output_length, output_size)`.
Returns:
The mask of shape `(batch_size, output_length)`.
"""
batch_size, length, _ = target.shape
termination_indices = jnp.argmax(
jnp.argmax(target, axis=-1),
axis=-1,
keepdims=True,
)
indices = jnp.tile(jnp.arange(length), (batch_size, 1))
return indices <= termination_indices
| randomized_positional_encodings-main | tasks/cs/binary_addition.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multiply two binary numbers."""
import random
from typing import Sequence
import chex
import jax.nn as jnn
import jax.numpy as jnp
import numpy as np
from randomized_positional_encodings.tasks import task
from randomized_positional_encodings.tasks.cs import binary_addition
class BinaryMultiplication(task.GeneralizationTask):
"""A task with the goal of multiplying two numbers in binary (little-endian).
The input is a string of the form `first_number£second_number` in
(little-endian) binary notation (e.g., `01001*011`). The goal of the agent is
to output the result, also in (little-endian) binary form (i.e., in the
example `18 * 6 = 108 = 00110011`). The output is padded with 0s to match the
input length, and the end of the product is denoted with a termination token
(i.e., the output has values in `{0, 1, 2}`).
Examples:
001 * 01101 = 000110120 (4 * 22 = 88)
1001 * 000001 = 00000100120 (9 * 32 = 288)
"""
def _sample_expressions_and_results(
self,
batch_size: int,
length: int,
) -> tuple[Sequence[list[int]], Sequence[list[int]]]:
"""Samples pairs of numbers and multiplies them in (little-endian) binary.
We use Python's bignums, which can represent arbitrary-precision integers to
perform multiplication of two potentially very large values (roughly of the
size `2 ** (length // 2)`).
Args:
batch_size: The number of expressions and results to sample.
length: The length of the input expression containing the two numbers and
the separation token.
Returns:
The expression and the product of the two numbers. The expression has the
format: `[first_number, 2, second_number]`, where the numbers are in
(little-endian) binary notation. The product is also in (little-endian)
binary notation, without leading (i.e., ending) zeros.
"""
# If `length <= 2`, we just sample a binary sequence for the expression and
# arbitrarily set the result to a fixed value (`[]` for `length == 1` and
# `[0]` for `length == 2`) to maintain the invariant that the result has
# length has most `length - 1`.
if length <= 2:
# Since `length <= 2`, we can use `np.random`` without overflow errors.
numbers = np.random.randint(0, 2**length - 1, size=(batch_size))
expressions = binary_addition.numbers_to_fixed_length_binary(
numbers, length
)
return expressions, [[0] * (length - 1)] * batch_size
# We only use `length - 1` tokens for the two values to account for the `*`.
length_n = np.random.randint(1, length - 1, size=(batch_size,))
length_m = length - 1 - length_n
integer_n = [random.randint(1, 2 ** int(len_n) - 1) for len_n in length_n]
integer_m = [random.randint(1, 2 ** int(len_m) - 1) for len_m in length_m]
binary_n = binary_addition.numbers_to_variable_length_binary(
integer_n, length_n
)
binary_m = binary_addition.numbers_to_variable_length_binary(
integer_m, length_m
)
expressions = binary_addition.expression_from_numbers(binary_n, binary_m)
integer_prod = [int_n * int_m for int_n, int_m in zip(integer_n, integer_m)]
results = binary_addition.numbers_to_fixed_length_binary(
integer_prod, length=0
)
return expressions, results
def sample_batch(
self,
rng: chex.PRNGKey,
batch_size: int,
length: int,
) -> task.Batch:
"""Returns a batch of binary multiplications and their results."""
del rng
expressions, results = self._sample_expressions_and_results(
batch_size=batch_size, length=length
)
# Append the termination token to the result and pad the result with zeros
# to match the output length (accounting for the termination token). The
# binary representation of the result will have at most length
# `#(first_number) + #(second_number)`, where #() denotes the number of
# digits of the binary notation. Since we use the token `2` to separate the
# two numbers in the expression, the result will have length at most
# `length - 1`, and thus by appending the termination token above it will
# have length at most `length`, as desired.
results = [res + [2] + [0] * (length - 1 - len(res)) for res in results]
expressions = jnp.array(expressions, dtype=jnp.int32)
results = jnp.array(results, dtype=jnp.int32)
return {
'input': jnn.one_hot(expressions, self.input_size),
'output': jnn.one_hot(results, self.output_size),
}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return 3
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return 3
def output_length(self, input_length: int) -> int:
return input_length
def accuracy_mask(self, target: chex.Array) -> chex.Array:
"""Computes a mask that ignores everything after the termination token.
Args:
target: Target tokens of shape `(batch_size, output_length, output_size)`.
Returns:
The mask of shape `(batch_size, output_length)`.
"""
batch_size, length, _ = target.shape
termination_indices = jnp.argmax(
jnp.argmax(target, axis=-1),
axis=-1,
keepdims=True,
)
indices = jnp.tile(jnp.arange(length), (batch_size, 1))
return indices <= termination_indices
| randomized_positional_encodings-main | tasks/cs/binary_multiplication.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sort tokens from a fixed alphabet (i.e., bucket sort)."""
import functools
import chex
import jax
from jax import nn as jnn
from jax import numpy as jnp
from jax import random as jrandom
from randomized_positional_encodings.tasks import task
class BucketSort(task.GeneralizationTask):
"""A task with the goal of sorting tokens from a fixed alphabet.
The input string is composed of tokens from a fixed-size alphabet, i.e.,
`{0, 1, ..., vocab_size - 1}`, and the goal is to return the sorted string (in
lexicographically increasing order).
Examples:
10204112 -> 00111224 (with `vocab_size = 5`)
1110001 -> 0001111 (with `vocab_size = 2`)
"""
def __init__(self, *args, vocab_size: int = 5, **kwargs) -> None:
"""Initializes the task.
Args:
*args: The args for the base task class.
vocab_size: The size of the alphabet.
**kwargs: The kwargs for the base task class.
"""
super().__init__(*args, **kwargs)
self._vocab_size = vocab_size
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(
self,
rng: chex.PRNGKey,
batch_size: int,
length: int,
) -> task.Batch:
"""Returns a batch of strings and tokens sorted by (inc.) occurrence."""
strings = jrandom.randint(
rng, shape=(batch_size, length), minval=0, maxval=self._vocab_size
)
sorted_strings = jnp.sort(strings, axis=-1)
return {
'input': jnn.one_hot(strings, num_classes=self.input_size),
'output': jnn.one_hot(sorted_strings, num_classes=self.output_size),
}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return self._vocab_size
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return self._vocab_size
def output_length(self, input_length: int) -> int:
"""Returns the output length for a given input length."""
return input_length
| randomized_positional_encodings-main | tasks/cs/bucket_sort.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute the floor of the square root of a binary number."""
import math
import random
import chex
import jax.nn as jnn
import jax.numpy as jnp
from randomized_positional_encodings.tasks import task
from randomized_positional_encodings.tasks.cs import binary_addition
class ComputeSqrt(task.GeneralizationTask):
"""A task with the goal of computing the square root of a binary number.
The input is a number in binary (big-endian), and the output is the floor of
the square root of this number, also in binary.
Note the output length ie the length of the square root in binary is always
ceil(input_length / 2) (because log(sqrt(x)) = 1/2 log(x)).
Examples:
100101 = 37 -> square root is 6.08... -> floor(6.08) = 6 -> 101
111 = 7 -> square root is 2.64 -> floor(2.64) = 2 -> 10
"""
def sample_batch(
self, rng: chex.PRNGKey, batch_size: int, length: int
) -> task.Batch:
"""Returns a batch of binary numbers and their square roots, in binary."""
del rng
numbers = [random.randint(1, 2**length - 1) for _ in range(batch_size)]
binary_numbers = binary_addition.numbers_to_fixed_length_binary(
numbers, length=length, little_endian=False
)
sqrts = list(map(math.isqrt, numbers))
binary_sqrts = binary_addition.numbers_to_fixed_length_binary(
sqrts, length=self.output_length(length), little_endian=False
)
binary_numbers = jnp.array(binary_numbers, jnp.int32)
binary_sqrts = jnp.array(binary_sqrts, jnp.int32)
inputs = jnn.one_hot(binary_numbers, self.input_size)
output = jnn.one_hot(binary_sqrts, self.output_size)
return {'input': inputs, 'output': output}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return 2
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return 2
def output_length(self, input_length: int) -> int:
return math.ceil(input_length / 2)
| randomized_positional_encodings-main | tasks/cs/compute_sqrt.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Duplicate a string."""
import functools
import chex
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jax.random as jrandom
from randomized_positional_encodings.tasks import task
class DuplicateString(task.GeneralizationTask):
"""A task with the goal of duplicating a string.
The input is a string s_1 ... s_n composed of symbols from a finite set S. The
output is the same string outputted twice without any separator, ie:
s_1 ... s_n s_1 ... s_n
Examples:
101 -> 101 101
111111 -> 111111 111111
In the paper, we use only binary strings (ie S = {0, 1}).
Note that the sampling is jittable so this task is fast.
"""
def __init__(self, vocab_size: int, *args, duplication: int = 2, **kwargs):
"""Initializes the remember_string task.
Args:
vocab_size: The size of the alphabet.
*args: Args for the base task class.
duplication: Number of times the string should be duplicated.
**kwargs: Kwargs for the base task class.
"""
super().__init__(*args, **kwargs)
self._vocab_size = vocab_size
self._duplication = duplication
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(
self, rng: chex.PRNGKey, batch_size: int, length: int
) -> task.Batch:
"""Returns a batch of strings and their copies."""
strings = jrandom.randint(
rng, shape=(batch_size, length), minval=0, maxval=self._vocab_size
)
one_hot_strings = jnn.one_hot(strings, num_classes=self._vocab_size)
output = jnp.concatenate([one_hot_strings] * self._duplication, axis=1)
return {"input": one_hot_strings, "output": output}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return self._vocab_size
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return self._vocab_size
def output_length(self, input_length: int) -> int:
"""Returns the output length for a given input length."""
return self._duplication * input_length
| randomized_positional_encodings-main | tasks/cs/duplicate_string.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sort a string by the parity of the indices (odd indices first)."""
import functools
import chex
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jax.random as jrandom
from randomized_positional_encodings.tasks import task
class OddsFirst(task.GeneralizationTask):
"""A task with the goal of outputting a string's tokens at odd indices first.
The input is a string s_1 ... s_n composed of symbols from a finite set S. The
output is the same string, but where the values at odd indexes have been put
first: s_1 s_3 s_5 ... s_2 s_4 s_6 ...
Examples:
00110101 -> 0100 0111
110 -> 10 1
In the paper, we use only binary strings (ie S = {0, 1}).
Note that the sampling is jittable so this task is fast.
"""
def __init__(self, vocab_size: int, *args, **kwargs):
"""Initializes the odds_first task.
Args:
vocab_size: The size of the alphabet.
*args: Args for the base task class.
**kwargs: Kwargs for the base task class.
"""
super().__init__(*args, **kwargs)
self._vocab_size = vocab_size
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(
self, rng: chex.PRNGKey, batch_size: int, length: int
) -> task.Batch:
"""Returns a batch of strings and their outputs."""
strings = jrandom.randint(
rng, shape=(batch_size, length), minval=0, maxval=self._vocab_size
)
one_hot_strings = jnn.one_hot(strings, num_classes=self._vocab_size)
output = jnp.concatenate(
[one_hot_strings[:, 1::2], one_hot_strings[:, ::2]], axis=1
)
return {"input": one_hot_strings, "output": output}
@property
def input_size(self) -> int:
"""Returns the input size for the model."""
return self._vocab_size
@property
def output_size(self) -> int:
"""Returns the output size for the model."""
return self._vocab_size
def output_length(self, input_length: int) -> int:
"""Returns the output length for the model."""
return input_length
| randomized_positional_encodings-main | tasks/cs/odds_first.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Predict the missing symbol in a duplicated string."""
import functools
import chex
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jax.random as jrandom
from randomized_positional_encodings.tasks import task
class MissingDuplicateString(task.GeneralizationTask):
"""A task with the goal of finding the missing symbol in a duplicated string.
Given a binary string that is presented twice with exactly one element omitted
(denoted by the placeholder token `2`), predict the value of that element.
Thus, an agent trying to solve this task needs to recognize the underlying
duplicated string to be able to produce the correct output.
If the length is odd, the duplicated strings of length `length // 2` are
padded with the empty token `3`.
Examples
01100210 -> 1 (the substring is 0110, so the missing value is 1)
1011213 -> 0 (the subtring is 101, so the missing value is 0)
"""
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(
self,
rng: chex.PRNGKey,
batch_size: int,
length: int,
) -> task.Batch:
"""Returns a batch of strings and the expected class."""
# For `length == 1`, we cannot meaningfully define substrings of length
# `length // 2`, so we arbitrarily set the inputs and outputs to `1`.
if length == 1:
return {
'input': jnn.one_hot(
jnp.ones((batch_size, length)), num_classes=self.input_size
),
'output': jnn.one_hot(
jnp.ones((batch_size,)), num_classes=self.output_size
),
}
strings_rng, indices_rng = jrandom.split(rng)
strings = jrandom.randint(
strings_rng, shape=(batch_size, length // 2), minval=0, maxval=2
)
duplicated_strings = jnp.concatenate((strings, strings), axis=-1)
indices = jrandom.randint(
indices_rng,
shape=(batch_size,),
minval=0,
maxval=duplicated_strings.shape[1],
)
output = jax.vmap(lambda x, y: x[y])(duplicated_strings, indices)
masked_strings = jax.vmap(lambda x, y: x.at[y].set(2))(
duplicated_strings, indices
)
# If `length` is odd, we pad the strings with the empty token `3` at the end
# to ensure that the final input length is equal to `length` given the two
# substrings of length `length // 2`.
padding = jnp.full((batch_size, length % 2), fill_value=3)
padded_strings = jnp.concatenate((masked_strings, padding), axis=-1)
return {
'input': jnn.one_hot(padded_strings, num_classes=self.input_size),
'output': jnn.one_hot(output, num_classes=self.output_size),
}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return 4
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return 2
| randomized_positional_encodings-main | tasks/cs/missing_duplicate_string.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Modular arithmetic with brackets."""
import collections
from typing import Sequence
import chex
import jax.nn as jnn
import numpy as np
import tqdm
import tree
from randomized_positional_encodings.tasks import task
def generate_one_expression_and_result(
modulus: int, length: int, mult: bool = False
) -> tuple[str, int]:
"""Returns a modular arithmetic expression with brackets, and its result.
The values in the expression are in {0, 1, ..., modulus-1}. The allowed
operations are either {+, -} (mult=False) or {+, -, *} (mult=True).
Args:
modulus: The modulus to use for the expression.
length: The length of the expression.
mult: Whether to include the multiplication operator in the expressions.
Raises:
ValueError if length < 1.
"""
# Generates a terminal (digit).
def gen_terminal():
terminal = np.random.randint(low=0, high=modulus)
return str(terminal), terminal
# If length is less than 1, issue an error.
if length < 1:
raise ValueError(f"Can't generate expressions of length < 1. Got {length}.")
# If length is less than 5, generate a digit d, -d, (d), or (-d).
if length == 1:
return gen_terminal()
elif length == 2:
term_str, term_val = gen_terminal()
return '-' + term_str, -term_val % modulus
elif length == 3:
term_str, term_val = gen_terminal()
return '(' + term_str + ')', term_val
elif length == 4:
term_str, term_val = gen_terminal()
return '(-' + term_str + ')', -term_val % modulus
# If length is >= 5, sample an operator with brackets.
# First split the length into a left and right part.
left_length = np.random.randint(low=1, high=length - 3)
right_length = length - (left_length + 3)
left_str, left_val = generate_one_expression_and_result(
modulus, left_length, mult=mult
)
right_str, right_val = generate_one_expression_and_result(
modulus, right_length, mult=mult
)
# Now sample an operator and return.
maxop = 3 if mult else 2
op = np.random.randint(low=0, high=maxop)
if op == 0:
return (
'(' + left_str + '+' + right_str + ')',
(left_val + right_val) % modulus,
)
elif op == 1:
return (
'(' + left_str + '-' + right_str + ')',
(left_val - right_val) % modulus,
)
else:
return (
'(' + left_str + '*' + right_str + ')',
(left_val * right_val) % modulus,
)
def generate_raw_dataset(
n: int,
lengths: Sequence[int],
modulus: int,
mult: bool = False,
with_tqdm: bool = False,
) -> dict[int, dict[str, np.ndarray]]:
"""Generates a dataset of maths expressions with brackets, and their results.
Args:
n: The number of datapoints in the dataset.
lengths: The lengths of the sequences to generate. n is evenly distributed
over these lengths.
modulus: Modulus used to compute the expressions.
mult: Whether to include the multiplication operator in the expressions.
with_tqdm: As the computation might be long, whether to add a tqdm progress
bar or not.
Returns:
A dict which keys are the passed lengths, and the values are dicts with keys
'equations' and 'solutions', and values are the data numpy arrays.
"""
alphabet_to_int = {
'+': modulus,
'-': modulus + 1,
'*': modulus + 2,
'(': modulus + 3,
')': modulus + 4,
'x': modulus + 5,
'=': modulus + 6,
}
for x in range(modulus):
alphabet_to_int[str(x)] = x
sequences = collections.defaultdict(
lambda: { # pylint: disable=g-long-lambda
'expressions': [],
'results': [],
}
)
range_lengths = tqdm.tqdm(lengths) if with_tqdm else lengths
for length in range_lengths:
for _ in range(n // len(lengths)):
seq, label = generate_one_expression_and_result(modulus, length, mult)
seq = [alphabet_to_int[x] for x in seq]
sequences[length]['expressions'].append(seq)
sequences[length]['results'].append(label)
sequences = tree.traverse(
lambda l: np.array(l, dtype=np.int32) if isinstance(l, list) else l,
sequences,
top_down=False,
)
return dict(sequences)
class ModularArithmeticBrackets(task.GeneralizationTask):
"""A task with the goal of reducing an arithmetic expression with brackets."""
def __init__(self, modulus: int, *args, mult: bool = False, **kwargs):
super().__init__(*args, **kwargs)
self._modulus = modulus
self._mult = mult
def sample_batch(
self, rng: chex.PRNGKey, batch_size: int, length: int
) -> task.Batch:
"""Returns a batch of inputs/outputs."""
np.random.seed(rng[0])
batch = generate_raw_dataset(
batch_size, lengths=[length], modulus=self._modulus, mult=self._mult
)[length]
inputs = jnn.one_hot(batch['expressions'], self.input_size)
output = jnn.one_hot(batch['results'], self.output_size)
return {'input': inputs, 'output': output}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return self._modulus + 6
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return self._modulus
| randomized_positional_encodings-main | tasks/dcf/modular_arithmetic_brackets.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute the reverse of an input string."""
import functools
import chex
import jax
import jax.numpy as jnp
from randomized_positional_encodings.tasks import task
from randomized_positional_encodings.tasks.cs import duplicate_string
class ReverseString(duplicate_string.DuplicateString):
"""A task with the goal of reversing a given string.
The input is a string s_1 ... s_n composed of symbols from a finite set S. The
output is the string, reversed, ie s_n ... s_1.
Examples:
011010 -> 010110
123021 -> 120321
In the paper, we use only binary strings (ie S = {0, 1}).
Note that the sampling is jittable so this task is fast.
"""
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(
self, rng: chex.PRNGKey, batch_size: int, length: int
) -> task.Batch:
"""Returns a batch of strings and their reversed version."""
batch = super().sample_batch(rng, batch_size, length)
batch['output'] = jnp.flip(batch['input'], axis=1)
return batch
def output_length(self, input_length: int) -> int:
"""Returns the output length for a given input length."""
return input_length
| randomized_positional_encodings-main | tasks/dcf/reverse_string.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Solve for the value of an unknown variable in an equation."""
import collections
from typing import Sequence
import chex
import jax.nn as jnn
import jax.numpy as jnp
import numpy as np
import tqdm
import tree
from randomized_positional_encodings.tasks import task
from randomized_positional_encodings.tasks.dcf import modular_arithmetic_brackets as mab
def generate_equation_and_solution(
modulus: int, length: int, mult: bool = False
) -> tuple[str, int]:
"""Returns a modular arithmetic equation with brackets, and its solution.
The values are in {0, 1, ..., modulus-1}, and the unknown
value is x. The allowed operations are either {+, -} (mult=False) or
{+, -, *} (mult=True).
Warning: if mult=True, x might have multiple valid solutions.
Args:
modulus: The modulus to use for the expression.
length: The length of the expression.
mult: Whether to include the multiplication operator in the expressions.
Raises:
ValueError if the length is < 3.
"""
# Generate the expression.
expr, val = mab.generate_one_expression_and_result(
modulus, length - 2, mult=mult
)
# Replace random digit with 'x'.
idx = np.random.randint(low=0, high=len(expr))
digits = [str(n) for n in range(modulus)]
while expr[idx] not in digits:
idx = (idx + 1) % (length - 2)
solution = int(expr[idx])
equation = expr[:idx] + 'x' + expr[idx + 1 :] + '=' + str(val)
return equation, solution
def generate_raw_dataset(
n: int,
lengths: Sequence[int],
modulus: int,
mult: bool = False,
with_tqdm: bool = False,
) -> dict[int, dict[str, np.ndarray]]:
"""Generates a dataset of equations and their solutions.
Args:
n: The number of datapoints in the dataset.
lengths: The lengths of the sequences to generate. n is evenly distributed
over these lengths.
modulus: Modulus used to compute the expressions.
mult: Whether to include the multiplication operator in the expressions.
with_tqdm: As the computation might be long, whether to add a tqdm progress
bar or not.
Returns:
A dict which keys are the passed lengths, and the values are dicts with keys
'equations' and 'solutions', and values are the data numpy arrays.
"""
alphabet_to_int = {
'+': modulus,
'-': modulus + 1,
'(': modulus + 2,
')': modulus + 3,
'x': modulus + 4,
'=': modulus + 5,
}
for x in range(modulus):
alphabet_to_int[str(x)] = x
sequences = collections.defaultdict(
lambda: { # pylint: disable=g-long-lambda
'equations': [],
'solutions': [],
}
)
range_lengths = tqdm.tqdm(lengths) if with_tqdm else lengths
for length in range_lengths:
for _ in range(n // len(lengths)):
seq, label = generate_equation_and_solution(modulus, length, mult=mult)
seq = [alphabet_to_int[x] for x in seq]
sequences[length]['equations'].append(seq)
sequences[length]['solutions'].append(label)
# Convert the list of numbers we have to arrays at the leaves.
sequences = tree.traverse(
lambda l: np.array(l, dtype=np.int32) if isinstance(l, list) else l,
sequences,
top_down=False,
)
return dict(sequences)
class SolveEquation(task.GeneralizationTask):
"""A task with the goal of solving an modular equation for an unknown."""
def __init__(self, modulus: int, *args, **kwargs):
super().__init__(*args, **kwargs)
self._modulus = modulus
def sample_batch(
self, rng: chex.PRNGKey, batch_size: int, length: int
) -> task.Batch:
"""Returns a batch of inputs/outputs."""
np.random.seed(rng[0])
if length < 3:
return {
'input': jnn.one_hot(
jnp.zeros((batch_size, length)), num_classes=self.input_size
),
'output': jnn.one_hot(
jnp.zeros((batch_size,)), num_classes=self.output_size
),
}
batch = generate_raw_dataset(
batch_size, lengths=[length], modulus=self._modulus
)[length]
inputs = jnn.one_hot(batch['equations'], self.input_size)
output = jnn.one_hot(batch['solutions'], self.output_size)
return {'input': inputs, 'output': output}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return self._modulus + 6
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return self._modulus
| randomized_positional_encodings-main | tasks/dcf/solve_equation.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Manipulate an input stack, using the input actions."""
import chex
import jax.nn as jnn
import jax.numpy as jnp
import numpy as np
from randomized_positional_encodings.tasks import task
class StackManipulation(task.GeneralizationTask):
"""A task with the goal of following instructions and returning the end stack.
The input is composed of a stack of 0s and 1s followed by a sequence of
instructions POP/PUSH 0/PUSH 1 (represented by 2s/3s/4s). The input stack is
given bottom-to-top, and the agent needs to execute the instructions given
(left-to-rigth) and output the final stack top-to-bottom (i.e., as if it were
popping the final stack). If a POP action is to be called on an empty stack,
the action is ignored. The output is padded with 0s to match the input length
+ 1 (to accommodate for the termination token), and the end of the final stack
is denoted with the termination symbol 2 (i.e., the output has values in {0,
1, 2}).
Examples:
0 1 1 0 PUSH 1 POP POP
initial 0 1 1 0 (the stack is received bottom-to-top)
PUSH 1 0 1 1 0 1
POP 0 1 1 0
POP 0 1 1
-> 1 1 0 2 0 0 0 0 (the stack is returned top-to-bottom)
1 1 0 POP POP POP
initial 1 1 0
POP 1 1
POP 1
POP
-> 2 0 0 0 0 0 0 0 (the stack is empty and padded with zeros)
"""
def _sample_expression_and_result(
self, length: int
) -> tuple[np.ndarray, list[int]]:
"""Returns an expression with stack instructions, and the result stack."""
if length == 1:
value = np.random.randint(low=0, high=2, size=(1,))
return value, list(value)
# Initialize the stack content and the actions (POP/PUSH).
stack_length = np.random.randint(low=1, high=length)
stack = np.random.randint(low=0, high=2, size=(stack_length,))
actions = np.random.randint(low=2, high=5, size=(length - stack_length,))
# Apply the actions on the stack.
current_stack = list(stack)
for action in actions:
if action == 2: # POP
if current_stack:
current_stack.pop()
elif action in [3, 4]: # PUSH a 0 (case 3) or a 1 (case 4)
current_stack.append(action - 3)
return np.concatenate([stack, actions]), current_stack[::-1]
def sample_batch(
self, rng: chex.PRNGKey, batch_size: int, length: int
) -> task.Batch:
"""Returns a batch of strings and the expected class."""
np.random.seed(rng[0])
expressions, results = [], []
for _ in range(batch_size):
expression, result = self._sample_expression_and_result(length)
expressions.append(expression)
# Append the termination token to the result.
result += [self.output_size - 1]
# Pad the result with zeros to match the input length (accounting for the
# termination token).
result += [0] * (length + 1 - len(result))
results.append(result)
expressions = jnp.array(expressions)
results = jnp.array(results)
inputs = jnn.one_hot(expressions, self.input_size)
output = jnn.one_hot(results, self.output_size)
return {'input': inputs, 'output': output}
@property
def input_size(self) -> int:
"""Returns the input size for the models.
The value is 5 because we have two possible tokens in the stack (0, 1), plus
three tokens to describe the PUSH 0, PUSH 1, and POP actions.
"""
return 5
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return 3
def output_length(self, input_length: int) -> int:
"""Returns the output length of the task."""
return input_length + 1
def accuracy_mask(self, target: chex.Array) -> chex.Array:
"""Computes mask that ignores everything after the termination tokens.
Args:
target: Target tokens of shape `(batch_size, output_length, output_size)`.
Returns:
The mask of shape `(batch_size, output_length)`.
"""
batch_size, length, _ = target.shape
termination_indices = jnp.argmax(
jnp.argmax(target, axis=-1),
axis=-1,
keepdims=True,
)
indices = jnp.tile(jnp.arange(length), (batch_size, 1))
return indices <= termination_indices
| randomized_positional_encodings-main | tasks/dcf/stack_manipulation.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute whether the number of 01's and 10's is even."""
import functools
import chex
import jax
from jax import nn as jnn
from jax import numpy as jnp
from jax import random as jrandom
from randomized_positional_encodings.tasks import task
class EvenPairs(task.GeneralizationTask):
"""A task with the goal of checking whether the number of 01s and 10s is even.
The input is a binary string, composed of 0s and 1s. If the result is even,
the class is 0, otherwise it's one.
Examples:
001110 -> 1 '10' and 1 '01' -> class 0
0101001 -> 2 '10' and 3 '01' -> class 1
Note the sampling is jittable so this task is fast.
"""
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(
self, rng: chex.PRNGKey, batch_size: int, length: int
) -> task.Batch:
"""Returns a batch of strings and the expected class."""
strings = jrandom.randint(
rng,
shape=(batch_size, length),
minval=0,
maxval=2,
)
one_hot_strings = jnn.one_hot(strings, num_classes=2)
unequal_pairs = jnp.logical_xor(strings[:, :-1], strings[:, 1:])
odd_unequal_pairs = jnp.sum(unequal_pairs, axis=-1) % 2
return {
'input': one_hot_strings,
'output': jnn.one_hot(odd_unequal_pairs, num_classes=self.output_size),
}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return 2
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return 2
| randomized_positional_encodings-main | tasks/regular/even_pairs.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute whether the number of 1s in a string is even."""
import functools
import chex
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jax.random as jrandom
from randomized_positional_encodings.tasks import task
class ParityCheck(task.GeneralizationTask):
"""A task with the goal of counting the number of '1' in a string, modulo 2.
The input is a string, composed of 0s and 1s. If the result is even, the class
is 0, otherwise it's 1.
Examples:
1010100 -> 3 1s (odd) -> class 1
01111 -> 4 1s (even) -> class 0
Note that the sampling is jittable so this task is fast.
"""
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(
self, rng: chex.PRNGKey, batch_size: int, length: int
) -> task.Batch:
"""Returns a batch of strings and the expected class."""
strings = jrandom.randint(
rng, shape=(batch_size, length), minval=0, maxval=2
)
n_b = jnp.sum(strings, axis=1) % 2
n_b = jnn.one_hot(n_b, num_classes=2)
one_hot_strings = jnn.one_hot(strings, num_classes=2)
return {"input": one_hot_strings, "output": n_b}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return 2
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return 2
| randomized_positional_encodings-main | tasks/regular/parity_check.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute the final state after randomly walking on a circle."""
import functools
import chex
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jax.random as jrandom
from randomized_positional_encodings.tasks import task
class CycleNavigation(task.GeneralizationTask):
"""A task with the goal of computing the final state on a circle.
The input is a string of actions, composed of 0s, 1s or -1s. The actions give
directions to take on a finite length circle (0 is for stay, 1 is for right,
-1 is for left). The goal is to give the final position on the circle after
all the actions have been taken. The agent starts at position 0.
By default, the length the circle is 5.
Examples:
1 -1 0 -1 -1 -> -2 = class 3
1 1 1 -1 -> 2 = class 2
Note that the sampling is jittable so it is fast.
"""
@property
def _cycle_length(self) -> int:
"""Returns the cycle length, number of possible states."""
return 5
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(
self, rng: chex.PRNGKey, batch_size: int, length: int
) -> task.Batch:
"""Returns a batch of strings and the expected class."""
actions = jrandom.randint(
rng, shape=(batch_size, length), minval=0, maxval=3
)
final_states = jnp.sum(actions - 1, axis=1) % self._cycle_length
final_states = jnn.one_hot(final_states, num_classes=self.output_size)
one_hot_strings = jnn.one_hot(actions, num_classes=self.input_size)
return {"input": one_hot_strings, "output": final_states}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return 3
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return self._cycle_length
| randomized_positional_encodings-main | tasks/regular/cycle_navigation.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Modular arithmetic without brackets.
Note this allows to generate samples using a jittable function, and is therefore
much faster than its 'brackets' counterpart, which requires to simulate the full
CF grammar, non-jittable.
"""
import functools
from typing import Optional, Sequence
import chex
import jax
import jax.nn as jnn
import jax.numpy as jnp
import jax.random as jrandom
from randomized_positional_encodings.tasks import task
OP_BY_CHARACTER = {'+': 0, '-': 1, '*': 2, '_': 3}
def _replace_subtractions(expression: chex.Array, modulus: int) -> chex.Array:
"""Replaces subtractions in an expression by additions with the inverse.
e.g. the expression [1, -, 3] results in [1, +, -3].
Args:
expression: Encoded expression (a 1D array of integers) in which to replace
subtractions.
modulus: The modulus to use for the modular arithmetic.
Returns:
The expression with all subtractions replaced by additions with the inverse.
"""
if expression.size < 2:
return expression
mask = expression == modulus + OP_BY_CHARACTER['-']
subtract_replaced = jnp.where(
mask, modulus + OP_BY_CHARACTER['+'], expression
)
return subtract_replaced.at[2:].multiply(1 - 2 * mask[1:-1])
def _perform_multiplications(
expression: chex.Array, modulus: int
) -> chex.Array:
"""Performs all multiplications in an expression containing only + and *.
This is done at fixed length and the result is zero-padded to achieve this.
Since the result of performing multiplications is an expression containing
only + operators, the operators are dropped from the output. For example, the
expression [1, +, 3, *, 4] results in [1, 12, 0].
Args:
expression: Encoded expression in which to perform multiplications.
modulus: The modulus to use for the modular arithmetic.
Returns:
An array with the results of the multiplications (potentially zero-padded).
"""
term_ids = jnp.cumsum(expression == modulus + OP_BY_CHARACTER['+'])[::2]
# Segment_prod can only be jit-compiled with a fixed number of segments.
# Therefore, we have to set to the maximum number of terms possible and
# mask out superfluous segment results with zeros afterwards.
maximum_term_number = expression.shape[0] // 2 + 1
products = jax.ops.segment_prod(
expression[::2],
term_ids,
num_segments=maximum_term_number,
indices_are_sorted=True,
)
valid_segment_mask = jnp.arange(maximum_term_number) <= term_ids[-1]
return products * valid_segment_mask
def _replace_blanks(expression: chex.Array, modulus: int) -> chex.Array:
"""Replaces blank symbols in expression with either `+` or `0`.
Depending on whether the blank symbol is at the position of an operator or a
residual, the blank symbol is replaced with a `+` operator or a `0`.
Args:
expression: Encoded expression in which to replace blank symbols.
modulus: The modulus to use for the modular arithmetic.
Returns:
An array with blank symbols replaced by either `+` or `0`.
"""
mask = expression == OP_BY_CHARACTER['_'] + modulus
operator_mask = mask.at[::2].set(False) # pytype: disable=attribute-error # numpy-scalars
residual_mask = mask.at[1::2].set(False) # pytype: disable=attribute-error # numpy-scalars
blanks_replaced = jnp.where(
operator_mask, OP_BY_CHARACTER['+'] + modulus, expression
)
blanks_replaced = jnp.where(residual_mask, 0, blanks_replaced)
return blanks_replaced
def _evaluate_expression(expression: chex.Array, modulus: int) -> chex.Array:
"""Returns the result of evaluating a modular arithmetic expression."""
expression = _replace_blanks(expression, modulus)
expression = _replace_subtractions(expression, modulus)
additive_terms = _perform_multiplications(expression, modulus)
return jnp.sum(additive_terms) % modulus
class ModularArithmetic(task.GeneralizationTask):
"""A task with the goal of reducing a simple arithmetic expression.
The input is a string, composed of numbers (in {0, ..., modulus-1}), and
operators (in {+, -, *}). The output is the reduced value of this expression,
which is also in {0, ..., modulus-1}.
Examples (modulo 5):
1 + 2 * 3 = 2
1 - 1 - 1 = 4
0 * 1 + 4 * 3 - 2 = 0
Note that the input strings are always of odd length.
"""
def __init__(
self,
modulus: int,
*args,
operators: Optional[Sequence[str]] = None,
**kwargs
):
"""Initializes the modular arithmetic task.
Args:
modulus: The modulus used for the computation.
*args: Args for the base task class.
operators: Operators to be used in the sequences. By default it's None,
meaning all operators available are used.
**kwargs: Kwargs for the base task class.
"""
super().__init__(*args, **kwargs)
self._modulus = modulus
if operators is None:
operators = ('+', '*', '-')
self._operators = [OP_BY_CHARACTER[op] for op in operators]
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(
self,
rng: chex.PRNGKey,
batch_size: int,
length: int,
) -> task.Batch:
"""Returns a batch of modular arithmetic expressions and their labels.
Args:
rng: The jax random number generator.
batch_size: The size of the batch returned.
length: The length of the sequence. As this length must be odd for the
modular arithmetic dataset, if it's not, we force it to be by
subtracting one to the length passed.
"""
# Subtracting one to the length if it's not odd already.
if length % 2 != 1:
length -= 1
batch = jnp.empty((batch_size, length), dtype=int)
rng1, rng2 = jax.random.split(rng)
remainders = jax.random.randint(
rng1, (batch_size, length // 2 + 1), 0, self._modulus
)
ops = self._modulus + jnp.array(self._operators)
operations = jrandom.choice(rng2, ops, (batch_size, length // 2))
batch = batch.at[:, ::2].set(remainders)
expressions = batch.at[:, 1::2].set(operations)
evaluate = functools.partial(_evaluate_expression, modulus=self._modulus)
labels = jax.vmap(evaluate)(expressions)
labels = jnn.one_hot(labels, self._modulus)
one_hot_expressions = jnn.one_hot(
expressions, self._modulus + len(OP_BY_CHARACTER)
)
return {'input': one_hot_expressions, 'output': labels}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return self._modulus + len(OP_BY_CHARACTER)
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return self._modulus
| randomized_positional_encodings-main | tasks/regular/modular_arithmetic.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Curricula over sequence lengths used to evaluate length generalization.
Allows to sample different sequence lengths along training. For instance,
one might want to start with length=1 and regularly increase the length by 1,
every 50k steps.
"""
import abc
from collections.abc import Collection
import random
import numpy as np
class Curriculum(abc.ABC):
"""Curriculum to sample lengths."""
@abc.abstractmethod
def sample_sequence_length(self, step: int) -> int:
"""Samples a sequence length from the current distribution."""
class FixedCurriculum(Curriculum):
"""A fixed curriculum, always sampling the same sequence length."""
def __init__(self, sequence_length: int):
"""Initializes.
Args:
sequence_length: The sequence length to sample.
"""
super().__init__()
self._sequence_length = sequence_length
def sample_sequence_length(self, step: int) -> int:
"""Returns a fixed sequence length."""
del step
return self._sequence_length
class UniformCurriculum(Curriculum):
"""A uniform curriculum, sampling different sequence lengths."""
def __init__(self, values: Collection[int]):
"""Initializes.
Args:
values: The sequence lengths to sample.
"""
super().__init__()
self._values = tuple(values)
def sample_sequence_length(self, step: int) -> int:
"""Returns a sequence length sampled from a uniform distribution."""
del step
return random.choice(self._values)
class ReverseExponentialCurriculum(Curriculum):
"""A reverse exponential curriculum, sampling different sequence lengths."""
def __init__(self, values: Collection[int], tau: bool):
"""Initializes.
Args:
values: The sequence lengths to sample.
tau: The exponential rate to use.
"""
super().__init__()
self._values = tuple(values)
self._tau = tau
def sample_sequence_length(self, step: int) -> int:
"""Returns a length sampled from a reverse exponential distribution."""
del step
probs = self._tau ** np.array(self._values)
probs = np.array(probs, dtype=np.float32)
probs = probs / np.sum(probs)
return np.random.choice(self._values, p=probs)
class RegularIncreaseCurriculum(Curriculum):
"""Curriculum for sequence lengths with a regular increase."""
def __init__(
self,
initial_sequence_length: int,
increase_frequency: int,
increase_amount: int,
sample_all_length: bool,
):
"""Initializes.
Args:
initial_sequence_length: The value of the sequence length at the beginning
of the curriculum.
increase_frequency: How often we increase the possible sequence length.
increase_amount: The amount of the increase in length.
sample_all_length: Whether to sample all length lower than the current one
or just return the current one.
"""
super().__init__()
self._initial_sequence_length = initial_sequence_length
self._increase_frequency = increase_frequency
self._increase_amount = increase_amount
self._sample_all_length = sample_all_length
def sample_sequence_length(self, step: int) -> int:
"""Returns a sequence length from the curriculum with the current step."""
if not self._sample_all_length:
return self._initial_sequence_length + self._increase_amount * (
step // self._increase_frequency
)
return (
self._initial_sequence_length
+ self._increase_amount
* np.random.randint(0, step // self._increase_frequency + 1)
)
| randomized_positional_encodings-main | experiments/curriculum.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants for our length generalization experiments."""
import functools
from randomized_positional_encodings.experiments import curriculum as curriculum_lib
from randomized_positional_encodings.models import transformer
from randomized_positional_encodings.tasks.cs import binary_addition
from randomized_positional_encodings.tasks.cs import binary_multiplication
from randomized_positional_encodings.tasks.cs import bucket_sort
from randomized_positional_encodings.tasks.cs import compute_sqrt
from randomized_positional_encodings.tasks.cs import duplicate_string
from randomized_positional_encodings.tasks.cs import missing_duplicate_string
from randomized_positional_encodings.tasks.cs import odds_first
from randomized_positional_encodings.tasks.dcf import modular_arithmetic_brackets
from randomized_positional_encodings.tasks.dcf import reverse_string
from randomized_positional_encodings.tasks.dcf import solve_equation
from randomized_positional_encodings.tasks.dcf import stack_manipulation
from randomized_positional_encodings.tasks.regular import cycle_navigation
from randomized_positional_encodings.tasks.regular import even_pairs
from randomized_positional_encodings.tasks.regular import modular_arithmetic
from randomized_positional_encodings.tasks.regular import parity_check
MODEL_BUILDERS = {
'transformer_encoder': functools.partial(
transformer.make_transformer,
transformer_module=transformer.TransformerEncoder, # pytype: disable=module-attr
),
}
CURRICULUM_BUILDERS = {
'fixed': curriculum_lib.FixedCurriculum,
'regular_increase': curriculum_lib.RegularIncreaseCurriculum,
'reverse_exponential': curriculum_lib.ReverseExponentialCurriculum,
'uniform': curriculum_lib.UniformCurriculum,
}
TASK_BUILDERS = {
'even_pairs': even_pairs.EvenPairs,
'modular_arithmetic': functools.partial(
modular_arithmetic.ModularArithmetic, modulus=5
),
'parity_check': parity_check.ParityCheck,
'cycle_navigation': cycle_navigation.CycleNavigation,
'stack_manipulation': stack_manipulation.StackManipulation,
'reverse_string': functools.partial(
reverse_string.ReverseString, vocab_size=2
),
'modular_arithmetic_brackets': functools.partial(
modular_arithmetic_brackets.ModularArithmeticBrackets,
modulus=5,
mult=True,
),
'solve_equation': functools.partial(
solve_equation.SolveEquation, modulus=5
),
'duplicate_string': functools.partial(
duplicate_string.DuplicateString, vocab_size=2
),
'missing_duplicate_string': missing_duplicate_string.MissingDuplicateString,
'odds_first': functools.partial(odds_first.OddsFirst, vocab_size=2),
'binary_addition': binary_addition.BinaryAddition,
'binary_multiplication': binary_multiplication.BinaryMultiplication,
'compute_sqrt': compute_sqrt.ComputeSqrt,
'bucket_sort': functools.partial(bucket_sort.BucketSort, vocab_size=5),
}
TASK_LEVELS = {
'even_pairs': 'regular',
'modular_arithmetic': 'regular',
'parity_check': 'regular',
'cycle_navigation': 'regular',
'stack_manipulation': 'dcf',
'reverse_string': 'dcf',
'modular_arithmetic_brackets': 'dcf',
'solve_equation': 'dcf',
'duplicate_string': 'cs',
'missing_duplicate_string': 'cs',
'odds_first': 'cs',
'binary_addition': 'cs',
'binary_multiplication': 'cs',
'compute_sqrt': 'cs',
'bucket_sort': 'cs',
}
| randomized_positional_encodings-main | experiments/constants.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluation of a network on sequences of different lengths."""
import dataclasses
import random
from typing import Any, Callable, Mapping
from absl import logging
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import tqdm
_Batch = Mapping[str, jnp.ndarray]
@dataclasses.dataclass
class EvaluationParams:
"""The parameters used for range evaluation of networks."""
model: hk.Transformed
params: chex.ArrayTree
accuracy_fn: Callable[[jnp.ndarray, jnp.ndarray], jnp.ndarray]
sample_batch: Callable[[chex.Array, int, int], _Batch]
max_test_length: int
total_batch_size: int
sub_batch_size: int
is_autoregressive: bool = False
def range_evaluation(
eval_params: EvaluationParams,
use_tqdm: bool = False,
) -> list[Mapping[str, Any]]:
"""Evaluates the model on longer, never seen strings and log the results.
Args:
eval_params: The evaluation parameters, see above.
use_tqdm: Whether to use a progress bar with tqdm.
Returns:
The list of dicts containing the accuracies.
"""
model = eval_params.model
params = eval_params.params
random.seed(1)
np.random.seed(1)
rng_seq = hk.PRNGSequence(1)
if eval_params.is_autoregressive:
apply_fn = jax.jit(model.apply, static_argnames=('sample',))
else:
apply_fn = jax.jit(model.apply)
results = []
lengths = range(1, eval_params.max_test_length + 1)
if use_tqdm:
lengths = tqdm.tqdm(lengths)
for length in lengths:
# We need to clear the cache of jitted functions, to avoid overflow as we
# are jitting len(lengths) ones, which can be a lot.
apply_fn.clear_cache()
sub_accuracies = []
for _ in range(eval_params.total_batch_size // eval_params.sub_batch_size):
batch = eval_params.sample_batch(
next(rng_seq), eval_params.sub_batch_size, length
)
if eval_params.is_autoregressive:
outputs = apply_fn(
params,
next(rng_seq),
batch['input'],
jnp.empty_like(batch['output']),
sample=True,
)
else:
outputs = apply_fn(params, next(rng_seq), batch['input'])
sub_accuracies.append(
float(np.mean(eval_params.accuracy_fn(outputs, batch['output'])))
)
log_data = {
'length': length,
'accuracy': np.mean(sub_accuracies),
}
logging.info(log_data)
results.append(log_data)
return results
| randomized_positional_encodings-main | experiments/range_evaluation.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example script to train and evaluate a network."""
from absl import app
from absl import flags
import haiku as hk
import jax.numpy as jnp
import numpy as np
from randomized_positional_encodings.experiments import constants
from randomized_positional_encodings.experiments import curriculum as curriculum_lib
from randomized_positional_encodings.experiments import training
from randomized_positional_encodings.experiments import utils
_BATCH_SIZE = flags.DEFINE_integer(
'batch_size',
default=128,
help='Training batch size.',
lower_bound=1,
)
_SEQUENCE_LENGTH = flags.DEFINE_integer(
'sequence_length',
default=40,
help='Maximum training sequence length.',
lower_bound=1,
)
_TASK = flags.DEFINE_string(
'task',
default='missing_duplicate_string',
help='Length generalization task (see `constants.py` for other tasks).',
)
_ARCHITECTURE = flags.DEFINE_string(
'architecture',
default='transformer_encoder',
help='Model architecture (see `constants.py` for other architectures).',
)
_IS_AUTOREGRESSIVE = flags.DEFINE_boolean(
'is_autoregressive',
default=False,
help='Whether to use autoregressive sampling or not.',
)
_COMPUTATION_STEPS_MULT = flags.DEFINE_integer(
'computation_steps_mult',
default=0,
help=(
'The amount of computation tokens to append to the input tape (defined'
' as a multiple of the input length)'
),
lower_bound=0,
)
# The architecture parameters depend on the architecture, so we cannot define
# them as via flags. See `constants.py` for the required values.
_ARCHITECTURE_PARAMS = {
'num_layers': 5,
'embedding_dim': 64,
'dropout_prob': 0.1,
'positional_encodings': 'NOISY_RELATIVE',
'positional_encodings_params': {'noise_max_length': 2048},
}
def main(_) -> None:
# Create the task.
curriculum = curriculum_lib.UniformCurriculum(
values=list(range(1, _SEQUENCE_LENGTH.value + 1))
)
task = constants.TASK_BUILDERS[_TASK.value]()
# Create the model.
single_output = task.output_length(10) == 1
model = constants.MODEL_BUILDERS[_ARCHITECTURE.value](
output_size=task.output_size,
return_all_outputs=True,
**_ARCHITECTURE_PARAMS,
)
if _IS_AUTOREGRESSIVE.value:
if 'transformer' not in _ARCHITECTURE.value:
model = utils.make_model_with_targets_as_input(
model, _COMPUTATION_STEPS_MULT.value
)
model = utils.add_sampling_to_autoregressive_model(model, single_output)
else:
model = utils.make_model_with_empty_targets(
model, task, _COMPUTATION_STEPS_MULT.value, single_output
)
model = hk.transform(model)
# Create the loss and accuracy based on the pointwise ones.
def loss_fn(output, target):
loss = jnp.mean(jnp.sum(task.pointwise_loss_fn(output, target), axis=-1))
return loss, {}
def accuracy_fn(output, target):
mask = task.accuracy_mask(target)
return jnp.sum(mask * task.accuracy_fn(output, target)) / jnp.sum(mask)
# Create the final training parameters.
training_params = training.ClassicTrainingParams(
seed=0,
model_init_seed=0,
training_steps=10_000,
log_frequency=100,
length_curriculum=curriculum,
batch_size=_BATCH_SIZE.value,
task=task,
model=model,
loss_fn=loss_fn,
learning_rate=1e-3,
l2_weight=0.0,
accuracy_fn=accuracy_fn,
compute_full_range_test=True,
max_range_test_length=100,
range_test_total_batch_size=512,
range_test_sub_batch_size=64,
is_autoregressive=_IS_AUTOREGRESSIVE.value,
)
training_worker = training.TrainingWorker(training_params, use_tqdm=True)
_, eval_results, _ = training_worker.run()
# Gather results and print final score.
accuracies = [r['accuracy'] for r in eval_results]
score = np.mean(accuracies[_SEQUENCE_LENGTH.value + 1 :])
print(f'Score: {score}')
if __name__ == '__main__':
app.run(main)
| randomized_positional_encodings-main | experiments/example.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utility functions for training and evaluation."""
import inspect
from typing import Any, Callable
import chex
import haiku as hk
from jax import nn as jnn
from jax import numpy as jnp
from randomized_positional_encodings.tasks import task
COMPUTATION_EMPTY_TOKEN = 0
OUTPUT_EMPTY_TOKEN = 1
def make_model_with_empty_targets(
model: Callable[[chex.Array], chex.Array],
generalization_task: task.GeneralizationTask,
computation_steps_mult: int = 0,
single_output: bool = False,
) -> Callable[[chex.Array], chex.Array]:
"""Returns a wrapped model that pads the inputs to match the output length.
For a given input tape `input_tape` of vocabulary size `vocab_size`, the
wrapped model will process a tape of the format
[`input_tape`, `empty_tape`], where the empty tape token is `vocab_size + 1`.
The `empty_tape` has the same length as the task output.
Args:
model: A model function that converts inputs to outputs.
generalization_task: The task that we train on.
computation_steps_mult: The amount of empty cells to append to the input
tape. This variable is a multiplier and the actual number of cells is
`computation_steps_mult * input_length`.
single_output: Whether to return the squeezed tensor of values.
"""
def new_model(x: chex.Array) -> chex.Array:
batch_size, input_length, input_size = x.shape
output_length = generalization_task.output_length(input_length)
extra_dims_onehot = 1 + int(computation_steps_mult > 0)
final_input_size = input_size + extra_dims_onehot
# Add trailing zeros to account for new final_input_size.
extra_zeros_x = jnp.zeros(
(batch_size, input_length, final_input_size - input_size)
)
x = jnp.concatenate([x, extra_zeros_x], axis=-1)
computation_tape = jnp.full(
(batch_size, computation_steps_mult * input_length),
fill_value=input_size + COMPUTATION_EMPTY_TOKEN,
)
computation_tape = jnn.one_hot(
computation_tape, num_classes=final_input_size
)
output_tokens = jnp.full(
(batch_size, output_length),
fill_value=input_size
+ OUTPUT_EMPTY_TOKEN
- int(computation_steps_mult == 0),
)
output_tokens = jnn.one_hot(output_tokens, num_classes=final_input_size)
final_input = jnp.concatenate([x, computation_tape, output_tokens], axis=1)
if 'input_length' in inspect.getfullargspec(model).args:
output = model(final_input, input_length=input_length) # pytype: disable=wrong-keyword-args
else:
output = model(final_input)
output = output[:, -output_length:]
if single_output:
output = jnp.squeeze(output, axis=1)
return output
return new_model
def make_model_with_targets_as_input(
model: Callable[[chex.Array], chex.Array], computation_steps_mult: int = 0
) -> Callable[[chex.Array, chex.Array], chex.Array]:
"""Returns a wrapped model that takes the targets as inputs.
This function is useful for the autoregressive case where we pass the targets
as inputs to the model. The final input looks like:
[inputs, computation_tokens, output_token, targets]
Args:
model: A haiku model that takes 'x' as input.
computation_steps_mult: The amount of computation tokens to append to the
input tape. This variable is a multiplier and the actual number of cell is
computation_steps_mult * input_length.
"""
def new_model(x: chex.Array, y: chex.Array) -> chex.Array:
"""Returns an output from the inputs and targets.
Args:
x: One-hot input vectors, shape (B, T, input_size).
y: One-hot target output vectors, shape (B, T, output_size).
"""
batch_size, input_length, input_size = x.shape
_, output_length, output_size = y.shape
extra_dims_onehot = 1 + int(computation_steps_mult > 0)
final_input_size = max(input_size, output_size) + extra_dims_onehot
# Add trailing zeros to account for new final_input_size.
extra_zeros_x = jnp.zeros(
(batch_size, input_length, final_input_size - input_size)
)
x = jnp.concatenate([x, extra_zeros_x], axis=-1)
extra_zeros_y = jnp.zeros(
(batch_size, output_length, final_input_size - output_size)
)
y = jnp.concatenate([y, extra_zeros_y], axis=-1)
computation_tape = jnp.full(
(batch_size, computation_steps_mult * input_length),
fill_value=input_size + COMPUTATION_EMPTY_TOKEN,
)
computation_tape = jnn.one_hot(
computation_tape, num_classes=final_input_size
)
output_token = jnp.full(
(batch_size, 1),
fill_value=input_size
+ OUTPUT_EMPTY_TOKEN
- int(computation_steps_mult == 0),
)
output_token = jnn.one_hot(output_token, num_classes=final_input_size)
final_input = jnp.concatenate(
[x, computation_tape, output_token, y], axis=1
)
if 'input_length' in inspect.getfullargspec(model).args:
output = model(final_input, input_length=input_length) # pytype: disable=wrong-keyword-args
else:
output = model(final_input)
return output[:, -output_length - 1 : -1]
return new_model
def add_sampling_to_autoregressive_model(
model: Callable[[chex.Array, chex.Array], chex.Array],
single_output: bool = False,
) -> Callable[[chex.Array, chex.Array, bool], chex.Array]:
"""Adds a 'sample' argument to the model, to use autoregressive sampling."""
def new_model_with_sampling(
x: chex.Array,
y: chex.Array,
sample: bool,
) -> chex.Array:
"""Returns an autoregressive model if `sample == True and output_size > 1`.
Args:
x: The input sequences of shape (b, t, i), where i is the input size.
y: The target sequences of shape (b, t, o), where o is the output size.
sample: Whether to evaluate the model using autoregressive decoding.
"""
output_length = 1 if len(y.shape) == 2 else y.shape[1]
output_size = y.shape[-1]
if not sample or output_length == 1:
output = model(x, y)
else:
def evaluate_model_autoregressively(
idx: int,
predictions: chex.Array,
) -> chex.Array:
"""Iteratively evaluates the model based on the previous predictions.
Args:
idx: The index of the target sequence that should be evaluated.
predictions: The logits for the predictions up to but not including
the index `idx`.
Returns:
The `predictions` array modified only at position `idx` where the
logits for index `idx` have been inserted.
"""
one_hot_predictions = jnn.one_hot(
jnp.argmax(predictions, axis=-1),
num_classes=output_size,
)
logits = model(x, one_hot_predictions)
return predictions.at[:, idx].set(logits[:, idx])
output = hk.fori_loop(
lower=0,
upper=output_length,
body_fun=evaluate_model_autoregressively,
init_val=jnp.empty_like(y),
)
if single_output:
output = jnp.squeeze(output, axis=1)
return output
return new_model_with_sampling
def update_tree_with_new_containers(
tree: Any, update_dict: dict[str, Any]
) -> None:
"""Updates a dataclass tree in place, adding new containers.
This method is useful for the nested library to add fields to a tree, for
which containers have not been created.
For instance, if A is a dataclass with attribute architecture_params, and we
want to add the value architecture_params.rnn_model.size, we need to create
the container 'rnn_model' inside architecture_params.
Args:
tree: An object with attribute (typically a dataclass).
update_dict: A dict of nested updates. See example above.
"""
for key in update_dict:
subkeys = key.split('.')
if len(subkeys) >= 2:
# Example: architecture.params.size
for i in range(0, len(subkeys) - 2):
getattr(tree, subkeys[i])[subkeys[i + 1]] = {}
| randomized_positional_encodings-main | experiments/utils.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training loop for length generalization experiments."""
import dataclasses
import functools
import random
from typing import Any, Callable, Mapping, Optional
from absl import logging
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import tqdm
from randomized_positional_encodings.experiments import curriculum as curriculum_lib
from randomized_positional_encodings.experiments import range_evaluation
from randomized_positional_encodings.tasks import task as task_lib
_Batch = Mapping[str, jnp.ndarray]
_LossMetrics = Optional[Mapping[str, jnp.ndarray]]
_LossFn = Callable[[chex.Array, chex.Array], tuple[float, _LossMetrics]]
_AccuracyFn = Callable[[chex.Array, chex.Array], float]
_ModelApplyFn = Callable[..., chex.Array]
_MAX_RNGS_RESERVE = 50000
@dataclasses.dataclass
class ClassicTrainingParams:
"""Parameters needed to train classical architectures."""
seed: int # Used to sample during forward pass (e.g. from final logits).
model_init_seed: int # Used to initialize model parameters.
training_steps: int
log_frequency: int
task: task_lib.GeneralizationTask
length_curriculum: curriculum_lib.Curriculum
batch_size: int
model: hk.Transformed
loss_fn: Callable[[jnp.ndarray, jnp.ndarray], tuple[float, _LossMetrics]]
learning_rate: float
l2_weight: float
test_model: Optional[hk.Transformed] = None
max_grad_norm: float = 1.0
is_autoregressive: bool = False
compute_full_range_test: bool = False
range_test_total_batch_size: int = 512
range_test_sub_batch_size: int = 64
max_range_test_length: int = 100
accuracy_fn: Optional[Callable[[jnp.ndarray, jnp.ndarray], jnp.ndarray]] = (
None
)
def _apply_loss_and_metrics_fn(
params: hk.Params,
rng_key: chex.PRNGKey,
batch: _Batch,
model_apply_fn: _ModelApplyFn,
loss_fn: _LossFn,
accuracy_fn: _AccuracyFn,
is_autoregressive: bool = False,
) -> tuple[float, tuple[_LossMetrics, float]]:
"""Computes the model output and applies the loss function.
Depending on whether a model is autoregressive or not, it will have a
different number of input parameters (i.e., autoregressive models also require
the targets as an input).
Args:
params: The model parameters.
rng_key: The prng key to use for random number generation.
batch: The data (consists of both inputs and outputs).
model_apply_fn: The model function that converts inputs into outputs.
loss_fn: A function that computes the loss for a batch of logits and labels.
accuracy_fn: A function that computes the accuracy for a batch of logits and
labels.
is_autoregressive: Whether the model is autoregressive or not.
Returns:
The loss of the model for the batch of data, extra loss metrics and the
accuracy, if accuracy_fn is not None.
"""
if is_autoregressive:
outputs = model_apply_fn(
params, rng_key, batch["input"], batch["output"], sample=False
)
else:
outputs = model_apply_fn(params, rng_key, batch["input"])
loss, loss_metrics = loss_fn(outputs, batch["output"])
if accuracy_fn is not None:
accuracy = accuracy_fn(outputs, batch["output"])
else:
accuracy = None
return loss, (loss_metrics, accuracy)
@functools.partial(
jax.jit,
static_argnames=(
"model_apply_fn",
"loss_fn",
"accuracy_fn",
"optimizer",
"is_autoregressive",
),
)
def _update_parameters(
params: hk.Params,
rng_key: chex.PRNGKey,
batch: _Batch,
model_apply_fn: _ModelApplyFn,
loss_fn: _LossFn,
accuracy_fn: _AccuracyFn,
optimizer: optax.GradientTransformation,
opt_state: optax.OptState,
is_autoregressive: bool = False,
) -> tuple[hk.Params, optax.OptState, tuple[float, _LossMetrics, float]]:
"""Applies a single SGD update step to the model parameters.
Args:
params: The model parameters.
rng_key: The prng key to use for random number generation.
batch: The data (consists of both inputs and outputs).
model_apply_fn: The model function that converts inputs into outputs.
loss_fn: A function that computes the loss for a batch of logits and labels.
accuracy_fn: A function that computes the accuracy for a batch of logits and
labels.
optimizer: The optimizer that computes the updates from the gradients of the
`loss_fn` with respect to the `params` and the previous `opt_state`.
opt_state: The optimizer state, e.g., momentum for each variable when using
Adam.
is_autoregressive: Whether the model is autoregressive or not.
Returns:
The updated parameters, the new optimizer state, and the loss, loss metrics
and accuracy.
"""
(loss, (metrics, accuracy)), grads = jax.value_and_grad(
_apply_loss_and_metrics_fn, has_aux=True
)(
params,
rng_key,
batch,
model_apply_fn,
loss_fn,
accuracy_fn,
is_autoregressive,
)
updates, new_opt_state = optimizer.update(grads, opt_state, params)
new_params = optax.apply_updates(params, updates)
return new_params, new_opt_state, (loss, metrics, accuracy)
class TrainingWorker:
"""Training worker."""
def __init__(
self, training_params: ClassicTrainingParams, use_tqdm: bool = False
):
"""Initializes the worker.
Args:
training_params: The training parameters.
use_tqdm: Whether to add a progress bar to stdout.
"""
self._training_params = training_params
self._use_tqdm = use_tqdm
self._params = None
self._step = 0
def step_for_evaluator(self) -> int:
return self._step
def run(
self,
) -> tuple[list[Mapping[str, Any]], list[Mapping[str, Any]], chex.ArrayTree]:
"""Trains the model with the provided config.
Returns:
Results (various training and validation metrics), module parameters
and router parameters.
"""
logging.info("Starting training!")
training_params = self._training_params
rngs_reserve = min(_MAX_RNGS_RESERVE, training_params.training_steps)
random.seed(training_params.seed)
np.random.seed(training_params.seed)
rng_seq = hk.PRNGSequence(training_params.seed)
rng_seq.reserve(rngs_reserve)
step = 0
results = []
model = training_params.model
task = training_params.task
length_curriculum = training_params.length_curriculum
if training_params.l2_weight is None or training_params.l2_weight == 0:
optimizer = optax.adam(training_params.learning_rate)
else:
optimizer = optax.adamw(
training_params.learning_rate, weight_decay=training_params.l2_weight
)
optimizer = optax.chain(
optax.clip_by_global_norm(training_params.max_grad_norm), optimizer
)
dummy_batch = task.sample_batch(
next(rng_seq), length=10, batch_size=training_params.batch_size
)
model_init_rng_key = jax.random.PRNGKey(training_params.model_init_seed)
if training_params.is_autoregressive:
params = model.init(
model_init_rng_key,
dummy_batch["input"],
dummy_batch["output"],
sample=False,
)
else:
params = model.init(model_init_rng_key, dummy_batch["input"])
opt_state = optimizer.init(params)
self._params, self._step = params, 0
steps = range(training_params.training_steps + 1)
if self._use_tqdm:
steps = tqdm.tqdm(steps)
for step in steps:
# Randomness handled by either python.random or numpy.
length = length_curriculum.sample_sequence_length(step)
# Randomness handled by either jax, python.random or numpy.
train_batch = task.sample_batch(
next(rng_seq), length=length, batch_size=training_params.batch_size
)
params, opt_state, (train_loss, train_metrics, train_accuracy) = (
_update_parameters(
params=params,
rng_key=next(rng_seq),
batch=train_batch,
model_apply_fn=model.apply,
loss_fn=training_params.loss_fn,
accuracy_fn=training_params.accuracy_fn,
optimizer=optimizer,
opt_state=opt_state,
is_autoregressive=training_params.is_autoregressive,
)
)
self._params, self._step = params, step
log_freq = training_params.log_frequency
if (log_freq > 0) and (step % log_freq == 0):
log_data = {
"step": step,
"train_loss": float(train_loss),
}
if training_params.accuracy_fn is not None:
log_data["train_accuracy"] = float(train_accuracy)
for key, value in train_metrics.items():
log_data[".".join(["train_metrics", key])] = np.array(value)
logging.info(log_data)
results.append(log_data)
# We need to access this private attribute since the default reserve size
# can not be edited yet.
if not rng_seq._subkeys: # pylint: disable=protected-access
rng_seq.reserve(rngs_reserve)
eval_results = list()
if training_params.compute_full_range_test:
eval_params = range_evaluation.EvaluationParams(
model=training_params.test_model or model,
params=params,
accuracy_fn=training_params.accuracy_fn,
sample_batch=task.sample_batch,
max_test_length=training_params.max_range_test_length,
total_batch_size=training_params.range_test_total_batch_size,
sub_batch_size=training_params.range_test_sub_batch_size,
is_autoregressive=training_params.is_autoregressive,
)
eval_results = range_evaluation.range_evaluation(
eval_params,
use_tqdm=True,
)
return results, eval_results, params
| randomized_positional_encodings-main | experiments/training.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for the transformer architectures."""
import chex
import haiku as hk
import jax.numpy as jnp
def layer_norm(x: chex.Array) -> chex.Array:
return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(x)
def chunk_sequences(sequences: chex.Array, chunk_length: int) -> chex.Array:
"""Chunks an array of sequences, on the second (time) dimension.
Args:
sequences: An array of sequences, of shape (B, T, F).
chunk_length: The length of each chunk.
Returns:
An array of shape (B, T // chunk_length, chunk_length, F)
Raises:
ValueError if T is not a multiple of chunk_length.
"""
chex.assert_rank(sequences, 3)
batch_size, history_len, num_features = sequences.shape
if history_len < chunk_length:
context_length = history_len
elif history_len % chunk_length == 0:
context_length = chunk_length
else:
raise ValueError(
'The history length should a multiple of the context length. Got'
f' history_length={history_len} and'
f' context_length={chunk_length}'
)
history_batch_size = history_len // context_length
return jnp.reshape(
sequences,
(batch_size * history_batch_size, context_length, num_features),
)
def compute_sliding_window_mask(
sequence_length: int, attention_window: int
) -> chex.Array:
"""Returns a k-diagonal mask for a sliding window.
Args:
sequence_length: The length of the sequence, which will determine the shape
of the output.
attention_window: The size of the sliding window.
Returns:
A symmetric matrix of shape (sequence_length, sequence_length),
attention_window-diagonal, with ones on the diagonal and on all the
upper/lower diagonals up to attention_window // 2.
Raises:
ValueError if attention_window is <= 0.
"""
if attention_window <= 0:
raise ValueError(
f'The attention window should be > 0. Got {attention_window}.'
)
if attention_window == 1:
return jnp.eye(sequence_length, sequence_length)
attention_mask = jnp.sum(
jnp.stack(
[
jnp.eye(sequence_length, sequence_length, k=k, dtype=jnp.int32)
for k in range(1, attention_window // 2 + 1)
]
),
axis=0,
)
attention_mask = attention_mask + jnp.transpose(attention_mask)
attention_mask += jnp.eye(sequence_length, sequence_length)
return attention_mask
| randomized_positional_encodings-main | models/transformer_utils.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transformer model."""
import dataclasses
from typing import Any, Callable, Optional, Union
from absl import logging
import chex
import haiku as hk
import jax
import jax.nn as jnn
import jax.numpy as jnp
from randomized_positional_encodings.models import positional_encodings as pos_encs_lib
from randomized_positional_encodings.models import transformer_utils
@chex.dataclass
class TransformerConfig:
"""Hyperparameters used in the Transformer architectures."""
# The dimension of the first embedding.
embedding_dim: int = 64
# The number of multi-head attention layers.
num_layers: int = 5
# The number of heads per layer.
num_heads: int = 8
# The number of hidden neurons per head. If None, it is set to be equal to
# `embedding_dim // num_heads`.
num_hiddens_per_head: Optional[int] = None
# The probability that each element is discarded by the dropout modules.
# None means dropout is not used at all.
dropout_prob: Optional[float] = 0.1
# The parameter initialization scale for the embeddings.
emb_init_scale: float = 0.02
# Whether to use the embeddings rather than raw inputs.
use_embeddings: bool = True
# Whether to use lookup-embeddings, in which case the inputs must be ints.
use_lookup_embeddings: bool = False
# Input vocabulary size, not needed if use_lookup_embeddings is False.
input_vocab_size: Optional[int] = None
# Whether to share embeddings between the Encoder and the Decoder.
share_embeddings: bool = False
# The size of the sliding attention window. See MultiHeadDotProductAttention.
attention_window: Optional[int] = None
# The positional encoding used with default sin/cos (Vaswani et al., 2017).
positional_encodings: pos_encs_lib.PositionalEncodings = dataclasses.field(
default_factory=lambda: pos_encs_lib.PositionalEncodings.SIN_COS
)
# The parameters for the positional encodings, default sin/cos.
positional_encodings_params: pos_encs_lib.PositionalEncodingsParams = (
dataclasses.field(default_factory=pos_encs_lib.SinCosParams)
)
# How much larger the hidden layer of the feedforward network should be
# compared to the `embedding_dim`.
widening_factor: int = 4
# Which activation function to use.
activation_fn: Callable[[jax.Array], jax.Array] = jnn.relu
# Add mask to make causal predictions. All the decoders use causal masking by
# default, this option is only used in the encoder. This is quite unusual but
# can still be useful in some rare cases.
encoder_causal_masking: bool = False
# Which token to use for the beginning of the string. None means an array
# full of zeros will be used.
bos_token: Optional[int] = None
# Used by the chunked transformer.
chunk_context_length: Optional[int] = None
def __post_init__(self) -> None:
"""Runs after the config has been created."""
if self.num_hiddens_per_head is None:
self.num_hiddens_per_head = self.embedding_dim // self.num_heads
if self.positional_encodings is None:
self.positional_encodings = pos_encs_lib.PositionalEncodings.SIN_COS
self.positional_encodings_params = pos_encs_lib.SinCosParams()
elif self.positional_encodings_params is None:
raise ValueError('No parameters for positional encodings are passed.')
elif not isinstance(
self.positional_encodings, pos_encs_lib.PositionalEncodings
) or not isinstance(
self.positional_encodings_params, pos_encs_lib.PositionalEncodingsParams
):
raise ValueError(
"The positional encodings passed are not of the right type. You're"
' probably passing strings rather than actual objects.'
)
class MultiHeadDotProductAttention(hk.Module):
"""Multi-head dot-product attention (Vaswani et al., 2017)."""
def __init__(
self,
num_heads: int,
num_hiddens_per_head: int,
positional_encodings: Optional[pos_encs_lib.PositionalEncodings] = None,
positional_encodings_params: Optional[
pos_encs_lib.PositionalEncodingsParams
] = None,
attention_window: Optional[int] = None,
name: Optional[str] = None,
) -> None:
"""Initializes the attention module.
Args:
num_heads: Number of heads to use.
num_hiddens_per_head: Number of hidden neurons per head.
positional_encodings: Which positional encodings to use in the attention.
None means no positional encodings are applied to keys or queries.
positional_encodings_params: Parameters for the positional encodings.
attention_window: Size of the attention sliding window. None means no
sliding window is used (or equivalently, window=full_attention_length).
We attend only on attention_window tokens around a given query token. We
attend to tokens before AND after the query token. If attention_window
is even, we use the value +1.
name: Name of the module.
"""
super().__init__(name=name)
self._num_heads = num_heads
self._num_hiddens_per_head = num_hiddens_per_head
self._positional_encodings = positional_encodings
self._attention_window = attention_window
self._positional_encodings_params = (
positional_encodings_params # pytype: disable=annotation-type-mismatch
)
def __call__(
self,
inputs_q: chex.Array,
inputs_kv: chex.Array,
mask: Optional[chex.Array] = None,
causal: bool = False,
) -> chex.Array:
"""Returns the output of the multi-head attention."""
batch_size, sequence_length, embedding_size = inputs_q.shape
num_hiddens = self._num_hiddens_per_head * self._num_heads
q = hk.Linear(num_hiddens, with_bias=False)(inputs_q)
k = hk.Linear(num_hiddens, with_bias=False)(inputs_kv)
v = hk.Linear(num_hiddens, with_bias=False)(inputs_kv)
# The second (sequence) dimension is undefined since it can differ between
# queries and keys/values when decoding. Also checking that the inputs have
# the same batch size as the reshape below does not guarantee a failure if
# they are different.
chex.assert_equal_shape_prefix([inputs_q, inputs_kv], prefix_len=1)
new_shape = (batch_size, -1, self._num_heads, self._num_hiddens_per_head)
q = jnp.reshape(q, new_shape)
k = jnp.reshape(k, new_shape)
v = jnp.reshape(v, new_shape)
# Let b=batch_size, t=seq_len, h=num_heads, and d=num_hiddens_per_head.
if self._positional_encodings == pos_encs_lib.PositionalEncodings.RELATIVE:
# We type hint the params to match the if statement, for pytype.
self._positional_encodings_params: pos_encs_lib.RelativeParams
attention = pos_encs_lib.compute_attention_with_relative_encodings(
q, k, self._positional_encodings_params.max_time, causal=causal
)
elif (
self._positional_encodings
== pos_encs_lib.PositionalEncodings.NOISY_RELATIVE
):
if causal:
raise NotImplementedError(
'Noisy positional encodings not implemented for causal attention.'
)
# We type hint the params to match the if statement, for pytype.
self._positional_encodings_params: pos_encs_lib.NoisyRelativeParams
attention = pos_encs_lib.compute_attention_with_noisy_relative_encodings(
q,
k,
max_time=self._positional_encodings_params.max_time,
noise_max_length=self._positional_encodings_params.noise_max_length,
randomize_both_sides=self._positional_encodings_params.randomize_both_sides,
causal=causal,
)
else:
if self._positional_encodings == pos_encs_lib.PositionalEncodings.ROTARY:
q = pos_encs_lib.apply_rotary_encoding(
q, position=jnp.arange(q.shape[1])[None, :]
)
k = pos_encs_lib.apply_rotary_encoding(
k, position=jnp.arange(k.shape[1])[None, :]
)
elif (
self._positional_encodings
== pos_encs_lib.PositionalEncodings.NOISY_ROTARY
):
# We type hint the params to match the if statement, for pytype.
self._positional_encodings_params: pos_encs_lib.NoisyRotaryParams
noise_max_length = self._positional_encodings_params.noise_max_length
# WARNING: This only works with self-attention, ie q.shape==k.shape.
rng = hk.next_rng_key()
q = pos_encs_lib.apply_rotary_encoding(
q,
position=jnp.arange(noise_max_length)[None, :],
noisy=True,
rng=rng,
)
k = pos_encs_lib.apply_rotary_encoding(
k,
position=jnp.arange(noise_max_length)[None, :],
noisy=True,
rng=rng,
)
attention = jnp.einsum('bthd,bThd->bhtT', q, k)
attention *= 1.0 / jnp.sqrt(self._num_hiddens_per_head)
# ALiBi encodings are not scaled with the 1 / sqrt(d_k) factor.
if self._positional_encodings == pos_encs_lib.PositionalEncodings.ALIBI:
attention += pos_encs_lib.compute_alibi_encodings_biases(
attention.shape[1:]
)
if (
self._positional_encodings
== pos_encs_lib.PositionalEncodings.NOISY_ALIBI
):
# We type hint the params to match the if statement, for pytype.
self._positional_encodings_params: pos_encs_lib.NoisyAlibiParams
attention += pos_encs_lib.compute_noisy_alibi_encodings_biases(
attention.shape[1:],
noise_max_length=self._positional_encodings_params.noise_max_length,
randomize_both_sides=self._positional_encodings_params.randomize_both_sides,
)
if self._attention_window is not None:
# We compute the sliding attention by just applying a mask on the values
# that are outside our window.
attention_mask = transformer_utils.compute_sliding_window_mask(
sequence_length, self._attention_window
)
attention = jnp.where(
attention_mask, attention, jnp.finfo(jnp.float32).min
)
if mask is not None:
attention = jnp.where(mask, attention, jnp.finfo(jnp.float32).min)
normalized_attention = jnn.softmax(attention)
output = jnp.einsum('bhtT,bThd->bthd', normalized_attention, v)
output = jnp.reshape(output, (batch_size, sequence_length, num_hiddens))
return hk.Linear(embedding_size, with_bias=False)(output)
class TransformerInit(hk.Module):
"""Helper class to avoid repeating the same __init__."""
def __init__(self, config: TransformerConfig):
"""Initializes the module."""
super().__init__()
self._config = config
if self._config.use_lookup_embeddings and self._config.bos_token is None:
raise ValueError("Can't use lookup embeddings with a zero bos_token.")
class TransformerEmbedder(TransformerInit):
"""A module to embed sequences and add positional encodings if needed."""
def embed_sequences(self, sequences: chex.Array) -> chex.Array:
"""Returns embedded sequences, following a linear operation or hk.Embed."""
embs_init = hk.initializers.TruncatedNormal(
stddev=self._config.emb_init_scale
)
if self._config.use_lookup_embeddings:
embeddings_layer = hk.Embed(
vocab_size=self._config.input_vocab_size,
embed_dim=self._config.embedding_dim,
lookup_style=hk.EmbedLookupStyle.ARRAY_INDEX,
w_init=embs_init,
)
integer_sequences = jnp.argmax(sequences, axis=-1)
embeddings = embeddings_layer(integer_sequences)
else:
embeddings_layer = hk.Linear(
self._config.embedding_dim,
with_bias=False,
w_init=embs_init,
)
embeddings = embeddings_layer(sequences)
embeddings *= jnp.sqrt(self._config.embedding_dim)
return embeddings
def add_positional_encodings(self, embeddings: chex.Array) -> chex.Array:
"""Returns new embeddings, which have been added positional encodings.
The shape of the returned array is (B, T, E), where E is the dimension of
the embeddings (if any are used, otherwise E = F).
Args:
embeddings: A batch of embeddings, of shape (B, T, F).
"""
chex.assert_rank(embeddings, 3)
_, sequence_length, embedding_size = embeddings.shape
pos_enc_params = self._config.positional_encodings_params
if (
self._config.positional_encodings
== pos_encs_lib.PositionalEncodings.SIN_COS
):
pos_enc_params: pos_encs_lib.SinCosParams
pos_encodings = pos_encs_lib.sinusoid_position_encoding(
sequence_length=sequence_length,
hidden_size=embedding_size,
max_timescale=pos_enc_params.max_time,
)
h = embeddings + pos_encodings
if self._config.dropout_prob is not None:
h = hk.dropout(hk.next_rng_key(), self._config.dropout_prob, h)
elif (
self._config.positional_encodings
== pos_encs_lib.PositionalEncodings.NOISY_SIN_COS
):
pos_enc_params: pos_encs_lib.NoisySinCosParams
if pos_enc_params.noise_max_length > pos_enc_params.max_time:
logging.warning(
(
'noise_max_length=%i is larger than max_time=%i, some '
'positional encodings will be equal.'
),
pos_enc_params.noise_max_length,
pos_enc_params.max_time,
)
pos_encodings = pos_encs_lib.sinusoid_position_encoding(
sequence_length=pos_enc_params.noise_max_length,
hidden_size=embedding_size,
max_timescale=pos_enc_params.max_time,
)
pos_encodings = jnp.array(pos_encodings)
pos_encodings = pos_encs_lib.noisy_fixed_positional_encodings(
pos_encodings, sequence_length
)
h = embeddings + pos_encodings
if self._config.dropout_prob is not None:
h = hk.dropout(hk.next_rng_key(), self._config.dropout_prob, h)
elif (
self._config.positional_encodings
== pos_encs_lib.PositionalEncodings.LEARNT
):
pos_enc_params: pos_encs_lib.LearntParams
pos_encodings = jnp.arange(sequence_length)
pos_encodings = hk.Embed(
vocab_size=pos_enc_params.max_sequence_length,
embed_dim=embedding_size,
)(pos_encodings)
h = embeddings + pos_encodings
if self._config.dropout_prob is not None:
h = hk.dropout(hk.next_rng_key(), self._config.dropout_prob, h)
elif (
self._config.positional_encodings
== pos_encs_lib.PositionalEncodings.NOISY_LEARNT
):
pos_enc_params: pos_encs_lib.NoisyLearntParams
pos_encodings = jnp.arange(pos_enc_params.noise_max_length)
pos_encodings = hk.Embed(
vocab_size=pos_enc_params.noise_max_length, embed_dim=embedding_size
)(pos_encodings)
pos_encodings = pos_encs_lib.noisy_fixed_positional_encodings(
pos_encodings, sequence_length
)
h = embeddings + pos_encodings
if self._config.dropout_prob is not None:
h = hk.dropout(hk.next_rng_key(), self._config.dropout_prob, h)
else:
h = embeddings
return h
class TransformerEncoder(TransformerInit):
"""Transformer Encoder (Vaswani et al., 2017)."""
def __call__(self, inputs: jnp.ndarray) -> chex.Array:
"""Returns the transformer encoder output, shape [B, T, E]."""
batch_size, sequence_length = inputs.shape[:2]
# Embeds the inputs, adds positional encodings.
embedder = TransformerEmbedder(self._config)
embeddings = embedder.embed_sequences(inputs)
h = embedder.add_positional_encodings(embeddings)
# The causal mask is shared across heads.
if self._config.encoder_causal_masking:
causal_mask = jnp.tril(
jnp.ones((batch_size, 1, sequence_length, sequence_length))
)
else:
causal_mask = None
for _ in range(self._config.num_layers):
attention = MultiHeadDotProductAttention(
num_heads=self._config.num_heads,
num_hiddens_per_head=self._config.num_hiddens_per_head,
positional_encodings=self._config.positional_encodings,
positional_encodings_params=self._config.positional_encodings_params,
attention_window=self._config.attention_window,
)(
inputs_q=h,
inputs_kv=h,
mask=causal_mask,
causal=self._config.encoder_causal_masking,
)
if self._config.dropout_prob is not None:
attention = hk.dropout(
hk.next_rng_key(), self._config.dropout_prob, attention
)
attention = transformer_utils.layer_norm(h + attention)
# Position-wise feedforward network.
h = hk.Linear(self._config.embedding_dim * self._config.widening_factor)(
attention
)
h = self._config.activation_fn(h)
h = hk.Linear(self._config.embedding_dim)(h)
if self._config.dropout_prob is not None:
h = hk.dropout(hk.next_rng_key(), self._config.dropout_prob, h)
h = transformer_utils.layer_norm(h + attention)
return h
class ChunkedTransformerEncoder(TransformerInit):
"""A Transformer encoder that can handle large histories via chunks.
We chunk the inputs, moving from a shape (B, T, F) to a shape (B, T/C, C, F),
where C is the length of the chunk. Note that T must be a multiple of C for it
to work. The chunks are then passed independently to the encoder, and all the
outputs are then concatenated together, to return a shape (B, T, E), where E
is the embedding_dim of the TransformerEncoder, see class above.
"""
def __call__(self, inputs: chex.Array) -> jnp.ndarray:
"""Calls the chunked transformer encoder."""
batch_size, history_len = inputs.shape[:2]
inputs = transformer_utils.chunk_sequences(
inputs, chunk_length=self._config.chunk_context_length
)
outputs = TransformerEncoder(self._config)(inputs=inputs)
return jnp.reshape(outputs, (batch_size, history_len, outputs.shape[-1]))
CallableTransformer = Union[
ChunkedTransformerEncoder,
TransformerEncoder,
]
def make_transformer(
output_size: int,
transformer_module: type[CallableTransformer],
return_all_outputs: bool = False,
**transformer_kwargs,
) -> Any:
"""Returns a transformer predict function."""
if 'positional_encodings' in transformer_kwargs:
if isinstance(transformer_kwargs['positional_encodings'], str):
transformer_kwargs['positional_encodings_params'] = (
pos_encs_lib.POS_ENC_PARAMS_TABLE[
transformer_kwargs['positional_encodings']
](**transformer_kwargs['positional_encodings_params'])
)
transformer_kwargs['positional_encodings'] = pos_encs_lib.POS_ENC_TABLE[
transformer_kwargs['positional_encodings']
]
config = TransformerConfig(**transformer_kwargs)
def transformer(*args, **kwargs) -> chex.Array:
output = transformer_module(config=config)(*args, **kwargs)
if not return_all_outputs:
output = output[:, -1, :]
return hk.Linear(output_size)(output)
return transformer
| randomized_positional_encodings-main | models/transformer.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Positional encodings, used in `transformer.py`."""
import enum
import functools
import math
from typing import Any, Optional, Union
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import jax.random as jrandom
import numpy as np
class PositionalEncodings(enum.Enum):
"""Enum for all the positional encodings implemented."""
NONE = 0
SIN_COS = 1
ALIBI = 2
RELATIVE = 3
ROTARY = 4
LEARNT = 5
NOISY_SIN_COS = 6
NOISY_RELATIVE = 7
NOISY_LEARNT = 8
NOISY_ROTARY = 9
NOISY_ALIBI = 10
@chex.dataclass
class SinCosParams:
"""Parameters for the classical sin/cos positional encoding."""
# The maximum wavelength used.
max_time: int = 10_000
# We will use this same class for Rotary and Relative.
RotaryParams = SinCosParams
RelativeParams = SinCosParams
@chex.dataclass
class LearntParams:
"""Parameters for the classical sin/cos positional encoding."""
# The size of the embedding matrix to use.
max_sequence_length: int
@chex.dataclass
class NoisySinCosParams:
"""Parameters for the noisy sin/cos positional encoding."""
# The maximum length to sample.
noise_max_length: int
# The maximum wavelength used.
max_time: int = 10_000
@chex.dataclass
class NoisyRelativeParams:
"""Parameters for the noisy relative positional encoding."""
# The maximum length to sample.
noise_max_length: int
# Either randomize the right side and keep the same encodings for the left
# part, keeping the symmetry, or randomize each side independently.
randomize_both_sides: bool = False
# The maximum wavelength used.
max_time: int = 10_000
@chex.dataclass
class NoisyLearntParams:
"""Parameters for the noisy relative positional encoding."""
# The maximum length to sample.
noise_max_length: int
@chex.dataclass
class NoisyAlibiParams:
"""Parameters for the noisy alibi positional encoding."""
# The maximum length to sample.
noise_max_length: int
# Either randomize the right side and keep the same encodings for the left
# part, maintaining symmetry, or randomize each side independently.
randomize_both_sides: bool = False
@chex.dataclass
class NoisyRotaryParams:
"""Parameters for the noisy rotary positional encoding."""
# The maximum length to sample.
noise_max_length: int
PositionalEncodingsParams = Union[
SinCosParams,
RelativeParams,
RotaryParams,
LearntParams,
NoisySinCosParams,
NoisyAlibiParams,
NoisyRelativeParams,
NoisyRotaryParams,
NoisyLearntParams,
]
POS_ENC_TABLE = {
'NONE': PositionalEncodings.NONE,
'SIN_COS': PositionalEncodings.SIN_COS,
'ALIBI': PositionalEncodings.ALIBI,
'RELATIVE': PositionalEncodings.RELATIVE,
'ROTARY': PositionalEncodings.ROTARY,
'LEARNT': PositionalEncodings.LEARNT,
'NOISY_SIN_COS': PositionalEncodings.NOISY_SIN_COS,
'NOISY_ALIBI': PositionalEncodings.NOISY_ALIBI,
'NOISY_RELATIVE': PositionalEncodings.NOISY_RELATIVE,
'NOISY_ROTARY': PositionalEncodings.NOISY_ROTARY,
'NOISY_LEARNT': PositionalEncodings.NOISY_LEARNT,
}
POS_ENC_PARAMS_TABLE = {
'NONE': SinCosParams,
'SIN_COS': SinCosParams,
'ALIBI': SinCosParams,
'RELATIVE': RelativeParams,
'ROTARY': RotaryParams,
'LEARNT': LearntParams,
'NOISY_SIN_COS': NoisySinCosParams,
'NOISY_ALIBI': NoisyAlibiParams,
'NOISY_RELATIVE': NoisyRelativeParams,
'NOISY_ROTARY': NoisyRotaryParams,
'NOISY_LEARNT': NoisyLearntParams,
}
def sinusoid_position_encoding(
sequence_length: int,
hidden_size: int,
max_timescale: float = 1e4,
add_negative_side: bool = False,
) -> np.ndarray:
"""Creates sinusoidal encodings from the original transformer paper.
The returned values are, for all i < D/2:
array[pos, i] = sin(pos / (max_timescale^(2*i / D)))
array[pos, D/2 + i] = cos(pos / (max_timescale^(2*i / D)))
Args:
sequence_length: Sequence length.
hidden_size: Dimension of the positional encoding vectors, D. Should be
even.
max_timescale: Maximum timescale for the frequency.
add_negative_side: Whether to also include the positional encodings for
negative positions.
Returns:
An array of shape [L, D] if add_negative_side is False, else [2 * L, D].
"""
if hidden_size % 2 != 0:
raise ValueError(
'The feature dimension should be even for sin/cos positional encodings.'
)
freqs = np.arange(0, hidden_size, 2)
inv_freq = max_timescale ** (-freqs / hidden_size)
pos_seq = np.arange(
start=-sequence_length if add_negative_side else 0, stop=sequence_length
)
sinusoid_inp = np.einsum('i,j->ij', pos_seq, inv_freq)
return np.concatenate([np.sin(sinusoid_inp), np.cos(sinusoid_inp)], axis=-1)
def noisy_fixed_positional_encodings(
fixed_positional_encodings: chex.Array,
sequence_length: int,
rng: Optional[chex.PRNGKey] = None,
) -> chex.Array:
"""Generates noisy positional encodings from fixed positional encodings.
Randomly samples and orders sequence_length positional encodings from a wider
range [0, noise_max_length) rather than just [0, sequence_length).
The user provides the full_encodings, which should span the entire range
[0, noise_max_length).
Args:
fixed_positional_encodings: A tensor of shape (noise_max_length,
embedding_size). This is from what the encodings will be sampled.
sequence_length: The length of the output sequence.
rng: Optional rng to use rather than hk.next_rng_key().
Returns:
A tensor of size [sequence_length, embedding_size].
"""
noise_max_length, _ = fixed_positional_encodings.shape
indexes = jrandom.choice(
rng if rng is not None else hk.next_rng_key(),
jnp.arange(noise_max_length),
shape=(sequence_length,),
replace=False,
)
indexes = jnp.sort(indexes)
encodings = fixed_positional_encodings[indexes]
return encodings
def _rel_shift_inner(logits: jax.Array, attention_length: int) -> jax.Array:
"""Shifts the relative logits.
This is a more general than the original Transformer-XL implementation as
inputs may also see the future. (The implementation does not rely on a
causal mask removing the upper-right triangle.)
Given attention length 3 and inputs:
[[-3, -2, -1, 0, 1, 2],
[-3, -2, -1, 0, 1, 2],
[-3, -2, -1, 0, 1, 2]]
The shifted output is:
[[0, 1, 2],
[-1, 0, 1],
[-2, -1, 0]]
Args:
logits: input tensor of shape [T_q, T_v + T_q]
attention_length: T_v `int` length of the attention, should be equal to
memory size + sequence length.
Returns:
A shifted version of the input of size [T_q, T_v]. In each row, a window of
size T_v elements is kept. The window starts at
subsequent row.
"""
if logits.ndim != 2:
raise ValueError('`logits` needs to be an array of dimension 2.')
tq, total_len = logits.shape
assert total_len == tq + attention_length
logits = jnp.reshape(logits, [total_len, tq])
logits = jnp.reshape(logits, [total_len, tq])
logits = jax.lax.slice(logits, (1, 0), logits.shape) # logits[1:]
logits = jnp.reshape(logits, [tq, total_len - 1])
# Equiv to logits[:, :attention_length].
logits = jax.lax.slice(logits, (0, 0), (tq, attention_length))
return logits
def _rel_shift_causal(logits: jax.Array) -> jax.Array:
"""Shifts the relative logits, assuming causal attention.
Given inputs:
[[-4, -3, -2, -1],
[-4, -3, -2, -1]]
The shifted (and, later, masked) output is:
[[-3, -2, -1, 0],
[-4, -3, -2, -1]]
Args:
logits: input tensor of shape [T_q, T_v]
Returns:
A shifted version of the input of size [T_q, T_v].
"""
t1, t2 = logits.shape
# We prepend zeros on the final timescale dimension.
to_pad = jnp.zeros_like(logits[..., :1])
x = jnp.concatenate((to_pad, logits), axis=-1)
# Reshape trick to shift input.
x = jnp.reshape(x, [t2 + 1, t1])
# Remove extra time dimension and re-shape.
x = jax.lax.slice(x, [1] + [0] * (x.ndim - 1), x.shape)
return jnp.reshape(x, [t1, t2])
def relative_shift(
logits: jax.Array, attention_length: int, causal: bool = False
) -> jax.Array:
if causal:
fn = _rel_shift_causal
else:
fn = lambda t: _rel_shift_inner(t, attention_length)
return jax.vmap(jax.vmap(fn))(logits)
def apply_rotary_encoding(
x: jnp.ndarray,
position: jnp.ndarray,
max_time: int = 10_000,
noisy: bool = False,
rng: Optional[chex.PRNGKey] = None,
) -> jnp.ndarray:
"""Applies RoPE positional encodings for the input.
Paper: https://arxiv.org/abs/2104.09864
Args:
x: The input tensor on which RoPE will be applied. Usually it is either some
queries q or some keys k.
position: The positions to use. Usually it's an arange of the maximum
length.
max_time: Constant used to scale position by in the encodings.
noisy: Whether to use the noisy version.
rng: The rng key to use if the noisy version is used.
Returns:
A tensor with the same shape as x.
"""
# Expand dims for positions to support inputs of shapes BTC or BTHC.
freq_seq = jnp.arange(x.shape[-1] // 2, dtype=jnp.float32)
freq_seq = freq_seq / (x.shape[-1] // 2)
inv_freq = max_time**-freq_seq
inv_freq = jnp.repeat(inv_freq, 2, 0)
# Produce position inputs to periodic functions.
t = position[:, :, None, None] * inv_freq[None, None, None, :]
if noisy:
t = noisy_fixed_positional_encodings(t[0, :, 0], x.shape[1], rng=rng)
t = t[None, :, None, :]
x_rot = jnp.einsum('bthd,dD->bthD', x, _rope_kernel(x.shape[-1], x.dtype))
return x * jnp.cos(t).astype(x.dtype) + jnp.sin(t).astype(x.dtype) * x_rot
def _rope_kernel(n: int, dtype: Any) -> np.ndarray:
"""Reorders the embedding dimension of an array, to make rotation easier."""
# We implement the equivalent of
# even_dims, odd_dims, = x[..., ::2], x[..., 1::2]
# return jnp.stack((-odd_dims, even_dims), axis=-1).reshape(x.shape)
# with a custom kernel for einsum. This allows the computation to execute
# on the MXU instead of producing a slow gather.
assert n % 2 == 0, n
kernel = np.zeros((n, n), dtype)
for i in range(n):
# Swap each neighbouring pair of values.
if i % 2 == 0:
kernel[i, i + 1] = 1
else:
kernel[i, i - 1] = -1
return kernel
def compute_attention_with_relative_encodings(
queries: chex.Array,
keys: chex.Array,
max_time: int = 10_000,
causal: bool = False,
) -> chex.Array:
"""Returns attention with relative positional encodings.
This code strictly follows what is described in the TransformerXL paper.
https://arxiv.org/pdf/1901.02860.pdf
Args:
queries: The queries used for attention. Shape (b, t, h, d).
keys: The keys used for attention. Shape (b, T, h, d).
max_time: Constant used to scale position by in the sin/cos encodings.
causal: Whether to use causal attention when shifting the relative logits.
Returns:
The attention logits. Shape (b, h, t, T).
"""
batch_size, k_seq_len, num_heads, num_hiddens = keys.shape
hiddens = num_hiddens * num_heads
# First compute the content logits.
content_bias = hk.get_parameter(
name='relpos_contentbias',
shape=[num_heads, num_hiddens],
init=hk.initializers.RandomNormal(stddev=0.02),
)
content_logits = jnp.einsum('bthd,bThd->bhtT', queries + content_bias, keys)
positional_encodings = sinusoid_position_encoding(
sequence_length=k_seq_len,
hidden_size=hiddens,
max_timescale=max_time,
add_negative_side=not causal,
)
positional_encodings = jnp.broadcast_to(
positional_encodings, (batch_size,) + positional_encodings.shape
)
relative_keys = hk.Linear(hiddens, with_bias=False)(positional_encodings)
relative_keys = jnp.reshape(
relative_keys, positional_encodings.shape[:-1] + (num_heads, num_hiddens)
)
# Then compute the relative part.
relative_bias = hk.get_parameter(
name='relpos_relativebias',
shape=[num_heads, num_hiddens],
init=hk.initializers.RandomNormal(stddev=0.02),
)
relative_logits = jnp.einsum(
'bthd,bThd->bhtT', queries + relative_bias, relative_keys
)
# We shift the relative logits instead of the positional encoding matrix as
# described in Appendix B of the paper (https://arxiv.org/pdf/1901.02860.pdf).
relative_logits = relative_shift(
relative_logits, attention_length=content_logits.shape[-1], causal=causal
)
assert content_logits.shape == relative_logits.shape
return content_logits + relative_logits
def compute_attention_with_noisy_relative_encodings(
queries: chex.Array,
keys: chex.Array,
noise_max_length: int,
randomize_both_sides: bool = False,
max_time: int = 10_000,
causal: bool = False,
) -> chex.Array:
"""Returns attention with *noisy* relative positional encodings.
This code follows what is described in the TransformerXL paper.
https://arxiv.org/pdf/1901.02860.pdf
However, in this version, the base positional encodings R (which are then
shifted), are randomly sampled and ordered from a wider range than the
sequence length.
Args:
queries: The queries used for attention. Shape (b, t, h, d).
keys: The keys used for attention. Shape (b, T, h, d).
noise_max_length: The maximum length used to sample the encodings.
randomize_both_sides: Whether to sample the encodings on the left and on the
right of the current token, or just sample from the left and take the
inverted ones for the right part.
max_time: Constant used to scale position by in the sin/cos encodings.
causal: Whether to use causal attention when shifting the relative logits.
Returns:
The attention logits. Shape (b, h, t, T).
"""
batch_size, k_seq_len, num_heads, num_hiddens = keys.shape
hiddens = num_hiddens * num_heads
# First compute the content logits.
content_bias = hk.get_parameter(
name='relpos_contentbias',
shape=[num_heads, num_hiddens],
init=hk.initializers.RandomNormal(stddev=0.02),
)
content_logits = jnp.einsum('bthd,bThd->bhtT', queries + content_bias, keys)
# Select random indexes.
# The indexes are in the range
# [-noise_max_length + 1, noise_max_length - 1]
right_indexes = jrandom.choice(
hk.next_rng_key(),
jnp.arange(1, noise_max_length),
shape=(k_seq_len - 1,),
replace=False,
)
right_indexes = jnp.sort(right_indexes)
if randomize_both_sides:
left_indexes = jrandom.choice(
hk.next_rng_key(),
jnp.arange(start=-noise_max_length + 1, stop=0),
shape=(k_seq_len,),
replace=False,
)
left_indexes = jnp.sort(left_indexes)
else:
left_indexes = -right_indexes[::-1]
# The leftmost index is required by position_embedding.relative_shift.
left_indexes = jnp.concatenate([jnp.zeros((1,)), left_indexes])
zero_index = jnp.zeros((1,))
indexes = jnp.concatenate([left_indexes, zero_index, right_indexes])
# We shift the indexes to the range [0, 2*noise_max_length-1], since this
# will be the range of the sin/cos. In this array, the value at index
# noise_max_length is the sin/cos encoding at position 0, which is exactly
# what we want: when doing relative attention, the token should have a fixed
# encoding of position 0 for its own position.
indexes += noise_max_length
indexes = jnp.array(indexes, dtype=jnp.int32)
positional_encodings = sinusoid_position_encoding(
sequence_length=noise_max_length,
hidden_size=hiddens,
max_timescale=max_time,
)
positional_encodings = jnp.array(positional_encodings, dtype=jnp.float32)
positional_encodings = positional_encodings[indexes]
positional_encodings = jnp.broadcast_to(
positional_encodings, (batch_size,) + positional_encodings.shape
)
relative_keys = hk.Linear(hiddens, with_bias=False)(positional_encodings)
relative_keys = jnp.reshape(
relative_keys, positional_encodings.shape[:-1] + (num_heads, num_hiddens)
)
# Then compute the relative part.
relative_bias = hk.get_parameter(
name='relpos_relativebias',
shape=[num_heads, num_hiddens],
init=hk.initializers.RandomNormal(stddev=0.02),
)
relative_logits = jnp.einsum(
'bthd,bThd->bhtT', queries + relative_bias, relative_keys
)
# We shift the relative logits instead of the positional encoding matrix as
# described in Appendix B of the paper (https://arxiv.org/pdf/1901.02860.pdf).
relative_logits = relative_shift(
relative_logits, attention_length=content_logits.shape[-1], causal=causal
)
assert content_logits.shape == relative_logits.shape
return content_logits + relative_logits
def _get_alibi_slopes(num_heads: int) -> list[float]:
"""Returns the slopes for the different attention heads.
While this does not exactly match the description of the [ALiBi
paper](https://arxiv.org/pdf/2108.12409.pdf), it corresponds to the [official
implementation](https://github.com/ofirpress/attention_with_linear_biases/blob/a06526fbfe557f9148e414b8569dcb97c7b182ba/fairseq/models/transformer.py#L742).
Args:
num_heads: The number of attention heads to create slopes for.
"""
def get_slopes_power_of_2(n):
start = 2 ** (-(2 ** -(math.log2(n) - 3)))
ratio = start
return [start * ratio**i for i in range(n)]
if math.log2(num_heads).is_integer():
return get_slopes_power_of_2(num_heads)
else:
closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
return (
get_slopes_power_of_2(closest_power_of_2)
+ _get_alibi_slopes(2 * closest_power_of_2)[0::2][
: num_heads - closest_power_of_2
]
)
def compute_alibi_encodings_biases(
attention_shape: tuple[int, ...]
) -> chex.Array:
"""Returns the biases following the ALiBi method.
This code strictly follows what is described in the ALiBi paper.
https://arxiv.org/pdf/2108.12409.pdf
Args:
attention_shape: The attention logits shape, without batch size, (h, t, T).
Returns:
The alibi biases, same shape as the input logits shape.
"""
num_heads, q_seq_len, k_seq_len = attention_shape
# Since we do not use causal masking, the upper triangle of the matrix has to
# be nonzero. Therefore, we set it equal to the lower triangle, but we also
# add a constant factor of 0.5 to the lower triangle, to (arbitrarily) break
# the symmetry (otherwise, the model cannot distinguish left and right).
alibi = np.zeros((q_seq_len, k_seq_len))
alibi -= sum(np.tri(*alibi.shape, k=-i) for i in range(1, q_seq_len))
alibi -= sum(np.tri(*alibi.T.shape, k=-i).T for i in range(1, k_seq_len))
alibi += 0.5 * np.tri(*alibi.shape, k=-1)
return alibi * jnp.array(_get_alibi_slopes(num_heads))[:, None, None]
def compute_noisy_alibi_encodings_biases(
attention_shape: tuple[int, ...],
noise_max_length: int,
randomize_both_sides: bool = False,
) -> chex.Array:
"""Returns the biases following the ALiBi method.
This code strictly follows what is described in the [ALiBi
paper](https://arxiv.org/pdf/2108.12409.pdf).
However, in this version, the biases are randomly sampled and ordered from a
wider range than the sequence length.
Args:
attention_shape: The attention logits shape, without batch size, (h, t, T).
noise_max_length: The maximum length used to sample the encodings.
randomize_both_sides: Whether to sample the encodings on the left and on the
right of the current token or just sample from the left and take the
inverted ones for the right part.
Returns:
The alibi biases, same shape as the input logits shape.
"""
num_heads, q_seq_len, k_seq_len = attention_shape
sample_positions = functools.partial(
jrandom.choice,
a=jnp.arange(1, noise_max_length),
replace=False,
)
if randomize_both_sides:
right_positions = sample_positions(
hk.next_rng_key(), shape=(k_seq_len - 1,)
)
left_positions = sample_positions(hk.next_rng_key(), shape=(q_seq_len - 1,))
right_positions = -jnp.sort(right_positions)
left_positions = jnp.sort(-left_positions)
else:
symmetric_positions = sample_positions(
hk.next_rng_key(), shape=(max(q_seq_len, k_seq_len) - 1,)
)
symmetric_positions = -jnp.sort(symmetric_positions)
right_positions = symmetric_positions[: k_seq_len - 1]
left_positions = jnp.flip(symmetric_positions)[: q_seq_len - 1]
# Since we do not use causal masking, the upper triangle of the matrix has to
# be nonzero. Therefore, we set it equal to the lower triangle if
# `randomize_both_side` is `False` and to randomly sampled positions
# otherwise, but we also add a constant factor of 0.5 to the lower triangle,
# to (arbitrarily) break the symmetry (otherwise, the model cannot distinguish
# left and right).
left_positions += 0.5
# We add a dummy value to make the dimensions work for
# position_embedding.relative_shift. The value will be ignored.
left_positions = jnp.concatenate((jnp.empty((1,)), left_positions))
positions = jnp.concatenate(
(left_positions, jnp.zeros((1,)), right_positions)
)
# position_embedding.relative_shift requires a four-dimensional tensor.
positions = jnp.tile(positions, (1, 1, q_seq_len, 1))
alibi = relative_shift(
positions,
attention_length=k_seq_len,
causal=False,
)
alibi = jnp.squeeze(alibi, axis=(0, 1))
return alibi * jnp.array(_get_alibi_slopes(num_heads))[:, None, None]
| randomized_positional_encodings-main | models/positional_encodings.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The setup script for setuptools.
See https://setuptools.readthedocs.io/en/latest/setuptools.html
"""
import os
import subprocess
import sys
import setuptools
from setuptools.command.build_ext import build_ext
class CMakeExtension(setuptools.Extension):
"""An extension with no sources.
We do not want distutils to handle any of the compilation (instead we rely
on CMake), so we always pass an empty list to the constructor.
"""
def __init__(self, name, sourcedir=""):
super().__init__(name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class BuildExt(build_ext):
"""Our custom build_ext command.
Uses CMake to build extensions instead of a bare compiler (e.g. gcc, clang).
"""
def run(self):
self._check_build_environment()
for ext in self.extensions:
self.build_extension(ext)
def _check_build_environment(self):
"""Check for required build tools: CMake, C++ compiler, and python dev."""
try:
subprocess.check_call(["cmake", "--version"])
except OSError as e:
ext_names = ", ".join(e.name for e in self.extensions)
raise RuntimeError(
"CMake must be installed to build" +
f"the following extensions: {ext_names}") from e
print("Found CMake")
cxx = "clang++"
if os.environ.get("CXX") is not None:
cxx = os.environ.get("CXX")
try:
subprocess.check_call([cxx, "--version"])
except OSError as e:
ext_names = ", ".join(e.name for e in self.extensions)
raise RuntimeError(
"A C++ compiler that supports c++17 must be installed to build the "
+ "following extensions: {}".format(ext_names)
+ ". We recommend: Clang version >= 7.0.0."
) from e
print("Found C++ compiler: {}".format(cxx))
def build_extension(self, ext):
extension_dir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
cxx = "clang++"
if os.environ.get("CXX") is not None:
cxx = os.environ.get("CXX")
env = os.environ.copy()
cmake_args = [
f"-DPython3_EXECUTABLE={sys.executable}",
f"-DCMAKE_CXX_COMPILER={cxx}",
f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extension_dir}",
]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(
["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp,
env=env)
if os.environ.get("OPEN_SPIEL_BUILD_ALL") is not None:
# Build everything (necessary for nox tests)
subprocess.check_call(["make", f"-j{os.cpu_count()}"],
cwd=self.build_temp,
env=env)
else:
# Build only pyspiel (for pip package)
subprocess.check_call(["make", "pyspiel", f"-j{os.cpu_count()}"],
cwd=self.build_temp,
env=env)
def _get_requirements(requirements_file): # pylint: disable=g-doc-args
"""Returns a list of dependencies for setup() from requirements.txt.
Currently a requirements.txt is being used to specify dependencies. In order
to avoid specifying it in two places, we're going to use that file as the
source of truth.
"""
with open(requirements_file) as f:
return [_parse_line(line) for line in f if line]
def _parse_line(s):
"""Parses a line of a requirements.txt file."""
requirement, *_ = s.split("#")
return requirement.strip()
# Get the requirements from file. During nox tests, this is in the current
# directory, but when installing from pip it is in the parent directory
req_file = ""
if os.path.exists("requirements.txt"):
req_file = "requirements.txt"
else:
req_file = "../requirements.txt"
setuptools.setup(
name="open_spiel",
version="1.3",
license="Apache 2.0",
author="The OpenSpiel authors",
author_email="[email protected]",
description="A Framework for Reinforcement Learning in Games",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/deepmind/open_spiel",
install_requires=_get_requirements(req_file),
python_requires=">=3.7",
ext_modules=[CMakeExtension("pyspiel", sourcedir="open_spiel")],
cmdclass={"build_ext": BuildExt},
zip_safe=False,
packages=setuptools.find_packages(include=["open_spiel", "open_spiel.*"]))
| open_spiel-master | setup.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An integration test building and testing open_spiel wheel."""
import os
import sys
import sysconfig
import nox
def get_distutils_tempdir():
return (
f"temp.{sysconfig.get_platform()}-{sys.version_info[0]}.{sys.version_info[1]}"
)
@nox.session(python="3")
def tests(session):
"""Run the tests via nox."""
session.install("-r", "requirements.txt")
child_env = os.environ.copy()
child_env["OPEN_SPIEL_BUILD_ALL"] = "ON"
if child_env.get("OPEN_SPIEL_ENABLE_JAX") == "ON":
session.install(*child_env["OPEN_SPIEL_PYTHON_JAX_DEPS"].split())
if child_env.get("OPEN_SPIEL_ENABLE_PYTORCH") == "ON":
session.install(*child_env["OPEN_SPIEL_PYTHON_PYTORCH_DEPS"].split())
if child_env.get("OPEN_SPIEL_ENABLE_TENSORFLOW") == "ON":
session.install(*child_env["OPEN_SPIEL_PYTHON_TENSORFLOW_DEPS"].split())
if child_env.get("OPEN_SPIEL_ENABLE_PYTHON_MISC") == "ON":
session.install(*child_env["OPEN_SPIEL_PYTHON_MISC_DEPS"].split())
session.run("python3", "setup.py", "build", env=child_env)
session.run("python3", "setup.py", "install", env=child_env)
session.cd(os.path.join("build", get_distutils_tempdir()))
session.run(
"ctest", f"-j{4*os.cpu_count()}", "--output-on-failure", external=True)
| open_spiel-master | noxfile.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The existence of this file allows us to have PYTHONPATH pointing to
# the parent of this directory and then use:
# from open_spiel.python import rl_environment
| open_spiel-master | open_spiel/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for XinXin MCTS bot."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import evaluate_bots
import pyspiel
SEED = 12983641
class ISMCTSBotTest(absltest.TestCase):
def xinxin_play_game(self, game):
bots = []
for _ in range(4):
bots.append(pyspiel.make_xinxin_bot(game.get_parameters()))
evaluate_bots.evaluate_bots(game.new_initial_state(), bots, np.random)
def test_basic_xinxin_selfplay(self):
game = pyspiel.load_game("hearts")
self.xinxin_play_game(game)
if __name__ == "__main__":
np.random.seed(SEED)
absltest.main()
| open_spiel-master | open_spiel/bots/xinxin/xinxin_bot_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Joint policy denoted by the RL agents of a game."""
from typing import Dict
from open_spiel.python import policy
from open_spiel.python import rl_agent
from open_spiel.python import rl_environment
class JointRLAgentPolicy(policy.Policy):
"""Joint policy denoted by the RL agents of a game.
Given a list of RL agents of players for a game, this class can be used derive
the corresponding (joint) policy. In particular, the distribution over
possible actions will be those that are returned by the step() method of
the RL agents given the state.
"""
def __init__(self, game, agents: Dict[int, rl_agent.AbstractAgent],
use_observation: bool):
"""Initializes the joint RL agent policy.
Args:
game: The game.
agents: Dictionary of agents keyed by the player IDs.
use_observation: If true then observation tensor will be used as the
`info_state` in the step() calls; otherwise, information state tensor
will be used. See `use_observation` property of
rl_environment.Environment.
"""
player_ids = list(sorted(agents.keys()))
super().__init__(game, player_ids)
self._agents = agents
self._obs = {
"info_state": [None] * game.num_players(),
"legal_actions": [None] * game.num_players()
}
self._use_observation = use_observation
def action_probabilities(self, state, player_id=None):
if state.is_simultaneous_node():
assert player_id is not None, "Player ID should be specified."
else:
if player_id is None:
player_id = state.current_player()
else:
assert player_id == state.current_player()
# Make sure that player_id is an integer and not an enum as it is used to
# index lists.
player_id = int(player_id)
legal_actions = state.legal_actions(player_id)
self._obs["current_player"] = player_id
self._obs["info_state"][player_id] = (
state.observation_tensor(player_id)
if self._use_observation else state.information_state_tensor(player_id))
self._obs["legal_actions"][player_id] = legal_actions
info_state = rl_environment.TimeStep(
observations=self._obs, rewards=None, discounts=None, step_type=None)
p = self._agents[player_id].step(info_state, is_evaluation=True).probs
prob_dict = {action: p[action] for action in legal_actions}
return prob_dict
class RLAgentPolicy(JointRLAgentPolicy):
"""A policy for a specific agent trained in an RL environment."""
def __init__(self, game, agent: rl_agent.AbstractAgent, player_id: int,
use_observation: bool):
"""Initializes the RL agent policy.
Args:
game: The game.
agent: RL agent.
player_id: ID of the player.
use_observation: See JointRLAgentPolicy above.
"""
self._player_id = player_id
super().__init__(game, {player_id: agent}, use_observation)
def action_probabilities(self, state, player_id=None):
return super().action_probabilities(
state, self._player_id if player_id is None else player_id)
| open_spiel-master | open_spiel/python/rl_agent_policy.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An observation of a game.
This is intended to be the main way to get observations of states in Python.
The usage pattern is as follows:
0. Create the game we will be playing
1. Create each kind of observation required, using `make_observation`
2. Every time a new observation is required, call:
`observation.set_from(state, player)`
The tensor contained in the Observation class will be updated with an
observation of the supplied state. This tensor is updated in-place, so if
you wish to retain it, you must make a copy.
The following options are available when creating an Observation:
- perfect_recall: if true, each observation must allow the observing player to
reconstruct their history of actions and observations.
- public_info: if true, the observation should include public information
- private_info: specifies for which players private information should be
included - all players, the observing player, or no players
- params: game-specific parameters for observations
We ultimately aim to have all games support all combinations of these arguments.
However, initially many games will only support the combinations corresponding
to ObservationTensor and InformationStateTensor:
- ObservationTensor: perfect_recall=False, public_info=True,
private_info=SinglePlayer
- InformationStateTensor: perfect_recall=True, public_info=True,
private_info=SinglePlayer
Three formats of observation are supported:
a. 1-D numpy array, accessed by `observation.tensor`
b. Dict of numpy arrays, accessed by `observation.dict`. These are pieces of the
1-D array, reshaped. The np.array objects refer to the same memory as the
1-D array (no copying!).
c. String, hopefully human-readable (primarily for debugging purposes)
For usage examples, see `observation_test.py`.
"""
import numpy as np
import pyspiel
# Corresponds to the old information_state_XXX methods.
INFO_STATE_OBS_TYPE = pyspiel.IIGObservationType(perfect_recall=True)
class _Observation:
"""Contains an observation from a game."""
def __init__(self, game, observer):
self._observation = pyspiel._Observation(game, observer)
self.dict = {}
if self._observation.has_tensor():
self.tensor = np.frombuffer(self._observation, np.float32)
offset = 0
for tensor_info in self._observation.tensors_info():
size = np.prod(tensor_info.shape, dtype=np.int64)
values = self.tensor[offset:offset + size].reshape(tensor_info.shape)
self.dict[tensor_info.name] = values
offset += size
else:
self.tensor = None
def set_from(self, state, player):
self._observation.set_from(state, player)
def string_from(self, state, player):
return (self._observation.string_from(state, player)
if self._observation.has_string() else None)
def compress(self):
return self._observation.compress()
def decompress(self, compressed_observation):
self._observation.decompress(compressed_observation)
def make_observation(
game,
imperfect_information_observation_type=None,
params=None,
):
"""Returns an _Observation instance if the imperfect_information_observation_type is supported, otherwise None."""
params = params or {}
if hasattr(game, 'make_py_observer'):
return game.make_py_observer(imperfect_information_observation_type, params)
else:
if imperfect_information_observation_type is not None:
observer = game.make_observer(
imperfect_information_observation_type, params
)
else:
observer = game.make_observer(params)
if observer is None:
return None
return _Observation(game, observer)
class IIGObserverForPublicInfoGame:
"""Observer for imperfect information obvservations of public-info games."""
def __init__(self, iig_obs_type, params):
if params:
raise ValueError(f'Observation parameters not supported; passed {params}')
self._iig_obs_type = iig_obs_type
self.tensor = None
self.dict = {}
def set_from(self, state, player):
pass
def string_from(self, state, player):
del player
if self._iig_obs_type.public_info:
return state.history_str()
else:
return '' # No private information to return
| open_spiel-master | open_spiel/python/observation.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Useful functions for testing."""
from typing import Optional
import numpy as np
import pyspiel
def random_playout(state: pyspiel.State, seed: Optional[int] = None):
"""Plays random actions until the state is terminal."""
rng = np.random.RandomState(seed)
while not state.is_terminal():
state.apply_action(rng.choice(state.legal_actions()))
return state
| open_spiel-master | open_spiel/python/test_utils.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reinforcement Learning (RL) tools Open Spiel."""
import abc
class ValueSchedule(metaclass=abc.ABCMeta):
"""Abstract base class changing (decaying) values."""
@abc.abstractmethod
def __init__(self):
"""Initialize the value schedule."""
@abc.abstractmethod
def step(self):
"""Apply a potential change in the value.
This method should be called every time the agent takes a training step.
Returns:
the value after the step.
"""
@property
@abc.abstractmethod
def value(self):
"""Return the current value."""
class ConstantSchedule(ValueSchedule):
"""A schedule that keeps the value constant."""
def __init__(self, value):
super(ConstantSchedule, self).__init__()
self._value = value
def step(self):
return self._value
@property
def value(self):
return self._value
class LinearSchedule(ValueSchedule):
"""A simple linear schedule."""
def __init__(self, init_val, final_val, num_steps):
"""A simple linear schedule.
Once the the number of steps is reached, value is always equal to the final
value.
Arguments:
init_val: the initial value.
final_val: the final_value
num_steps: the number of steps to get from the initial to final value.
"""
super(LinearSchedule, self).__init__()
self._value = init_val
self._final_value = final_val
assert isinstance(num_steps, int)
self._num_steps = num_steps
self._steps_taken = 0
self._increment = (final_val - init_val) / num_steps
def step(self):
self._steps_taken += 1
if self._steps_taken < self._num_steps:
self._value += self._increment
elif self._steps_taken == self._num_steps:
self._value = self._final_value
return self._value
@property
def value(self):
return self._value
| open_spiel-master | open_spiel/python/rl_tools.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple network classes for Tensorflow based on tf.Module."""
import math
import tensorflow.compat.v1 as tf
# Temporarily disable TF2 behavior until code is updated.
tf.disable_v2_behavior()
# This code is based directly on the TF docs:
# https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/Module
class Linear(tf.Module):
"""A simple linear module.
Always includes biases and only supports ReLU activations.
"""
def __init__(self, in_size, out_size, activate_relu=True, name=None):
"""Creates a linear layer.
Args:
in_size: (int) number of inputs
out_size: (int) number of outputs
activate_relu: (bool) whether to include a ReLU activation layer
name: (string): the name to give to this layer
"""
super(Linear, self).__init__(name=name)
self._activate_relu = activate_relu
# Weight initialization inspired by Sonnet's Linear layer,
# which cites https://arxiv.org/abs/1502.03167v3
stddev = 1.0 / math.sqrt(in_size)
self._weights = tf.Variable(
tf.random.truncated_normal([in_size, out_size], mean=0.0,
stddev=stddev),
name="weights")
self._bias = tf.Variable(tf.zeros([out_size]), name="bias")
def __call__(self, tensor):
y = tf.matmul(tensor, self._weights) + self._bias
return tf.nn.relu(y) if self._activate_relu else y
class Sequential(tf.Module):
"""A simple sequential module.
Always includes biases and only supports ReLU activations.
"""
def __init__(self, layers, name=None):
"""Creates a model from successively applying layers.
Args:
layers: Iterable[tf.Module] that can be applied.
name: (string): the name to give to this layer
"""
super(Sequential, self).__init__(name=name)
self._layers = layers
def __call__(self, tensor):
for layer in self._layers:
tensor = layer(tensor)
return tensor
class MLP(tf.Module):
"""A simple dense network built from linear layers above."""
def __init__(self,
input_size,
hidden_sizes,
output_size,
activate_final=False,
name=None):
"""Create the MLP.
Args:
input_size: (int) number of inputs
hidden_sizes: (list) sizes (number of units) of each hidden layer
output_size: (int) number of outputs
activate_final: (bool) should final layer should include a ReLU
name: (string): the name to give to this network
"""
super(MLP, self).__init__(name=name)
self._layers = []
with self.name_scope:
# Hidden layers
for size in hidden_sizes:
self._layers.append(Linear(in_size=input_size, out_size=size))
input_size = size
# Output layer
self._layers.append(
Linear(
in_size=input_size,
out_size=output_size,
activate_relu=activate_final))
@tf.Module.with_name_scope
def __call__(self, x):
for layer in self._layers:
x = layer(x)
return x
class MLPTorso(tf.Module):
"""A specialized half-MLP module when constructing multiple heads.
Note that every layer includes a ReLU non-linearity activation.
"""
def __init__(self, input_size, hidden_sizes, name=None):
super(MLPTorso, self).__init__(name=name)
self._layers = []
with self.name_scope:
for size in hidden_sizes:
self._layers.append(Linear(in_size=input_size, out_size=size))
input_size = size
@tf.Module.with_name_scope
def __call__(self, x):
for layer in self._layers:
x = layer(x)
return x
| open_spiel-master | open_spiel/python/simple_nets.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A vectorized RL Environment."""
class SyncVectorEnv(object):
"""A vectorized RL Environment.
This environment is synchronized - games do not execute in parallel. Speedups
are realized by calling models on many game states simultaneously.
"""
def __init__(self, envs):
if not isinstance(envs, list):
raise ValueError(
"Need to call this with a list of rl_environment.Environment objects")
self.envs = envs
def __len__(self):
return len(self.envs)
def observation_spec(self):
return self.envs[0].observation_spec()
@property
def num_players(self):
return self.envs[0].num_players
def step(self, step_outputs, reset_if_done=False):
"""Apply one step.
Args:
step_outputs: the step outputs
reset_if_done: if True, automatically reset the environment
when the epsiode ends
Returns:
time_steps: the time steps,
reward: the reward
done: done flag
unreset_time_steps: unreset time steps
"""
time_steps = [
self.envs[i].step([step_outputs[i].action])
for i in range(len(self.envs))
]
reward = [step.rewards for step in time_steps]
done = [step.last() for step in time_steps]
unreset_time_steps = time_steps # Copy these because you may want to look
# at the unreset versions to extract
# information from them
if reset_if_done:
time_steps = self.reset(envs_to_reset=done)
return time_steps, reward, done, unreset_time_steps
def reset(self, envs_to_reset=None):
if envs_to_reset is None:
envs_to_reset = [True for _ in range(len(self.envs))]
time_steps = [
self.envs[i].reset()
if envs_to_reset[i] else self.envs[i].get_time_step()
for i in range(len(self.envs))
]
return time_steps
| open_spiel-master | open_spiel/python/vector_env.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Representation of a policy for a game.
This is a standard representation for passing policies into algorithms,
with currently the following implementations:
TabularPolicy - an explicit policy per state, stored in an array
of shape `(num_states, num_actions)`, convenient for tabular policy
solution methods.
UniformRandomPolicy - a uniform distribution over all legal actions for
the specified player. This is computed as needed, so can be used for
games where a tabular policy would be unfeasibly large.
The main way of using a policy is to call `action_probabilities(state,
player_id`), to obtain a dict of {action: probability}. `TabularPolicy`
objects expose a lower-level interface, which may be more efficient for
some use cases.
"""
import itertools
from typing import Iterable
import numpy as np
from open_spiel.python.algorithms import get_all_states
import pyspiel
def child(state, action):
"""Returns a child state, handling the simultaneous node case."""
if isinstance(action, Iterable):
child_state = state.clone()
child_state.apply_actions(action)
return child_state
else:
return state.child(action)
def joint_action_probabilities_aux(state, policy):
"""Auxiliary function for joint_action_probabilities.
Args:
state: a game state at a simultaneous decision node.
policy: policy that gives the probability distribution over the legal
actions for each players.
Returns:
actions_per_player: list of list of actions for each player
probs_per_player: list of list of probabilities do the corresponding action
in actions_per_player for each player.
"""
assert state.is_simultaneous_node()
action_probs_per_player = [
policy.action_probabilities(state, player)
for player in range(state.get_game().num_players())
]
actions_per_player = [pi.keys() for pi in action_probs_per_player]
probs_per_player = [pi.values() for pi in action_probs_per_player]
return actions_per_player, probs_per_player
def joint_action_probabilities(state, policy):
"""Yields action, probability pairs for a joint policy in simultaneous state.
Args:
state: a game state at a simultaneous decision node.
policy: policy that gives the probability distribution over the legal
actions for each players.
Yields:
(action, probability) pairs. An action is a tuple of individual
actions for each player of the game. The probability is a single joint
probability (product of all the individual probabilities).
"""
actions_per_player, probs_per_player = joint_action_probabilities_aux(
state, policy)
for actions, probs in zip(
itertools.product(*actions_per_player),
itertools.product(*probs_per_player)):
yield actions, np.prod(probs)
class Policy:
"""Base class for policies.
A policy is something that returns a distribution over possible actions
given a state of the world.
Attributes:
game: the game for which this policy applies
player_ids: list of player ids for which this policy applies; each in the
interval [0..game.num_players()-1].
"""
def __init__(self, game, player_ids):
"""Initializes a policy.
Args:
game: the game for which this policy applies
player_ids: list of player ids for which this policy applies; each should
be in the range 0..game.num_players()-1.
"""
self.game = game
self.player_ids = player_ids
def action_probabilities(self, state, player_id=None):
"""Returns a dictionary {action: prob} for all legal actions.
IMPORTANT: We assume the following properties hold:
- All probabilities are >=0 and sum to 1
- TLDR: Policy implementations should list the (action, prob) for all legal
actions, but algorithms should not rely on this (yet).
Details: Before May 2020, only legal actions were present in the mapping,
but it did not have to be exhaustive: missing actions were considered to
be associated to a zero probability.
For example, a deterministic state-poliy was previously {action: 1.0}.
Given this change of convention is new and hard to enforce, algorithms
should not rely on the fact that all legal actions should be present.
Args:
state: A `pyspiel.State` object.
player_id: Optional, the player id for whom we want an action. Optional
unless this is a simultaneous state at which multiple players can act.
Returns:
A `dict` of `{action: probability}` for the specified player in the
supplied state.
"""
raise NotImplementedError()
def __call__(self, state, player_id=None):
"""Turns the policy into a callable.
Args:
state: The current state of the game.
player_id: Optional, the player id for whom we want an action. Optional
unless this is a simultaneous state at which multiple players can act.
Returns:
Dictionary of action: probability.
"""
return self.action_probabilities(state, player_id)
def to_tabular(self, states=None):
"""Returns a new `TabularPolicy` equivalent to this policy.
Args:
states: States of the game that will be used for the tabular policy. If
None, then get_tabular_policy_states() method will be used to generate
them.
Returns:
a TabularPolicy.
"""
states = states or get_tabular_policy_states(self.game)
tabular_policy = TabularPolicy(self.game, self.player_ids, states=states)
for index, state in enumerate(tabular_policy.states):
tabular_policy.action_probability_array[index, :] = 0
for action, probability in self.action_probabilities(state).items():
tabular_policy.action_probability_array[index, action] = probability
return tabular_policy
class TabularPolicy(Policy):
"""Policy implementation where the policy is in explicit tabular form.
In addition to implementing the `Policy` interface, this class exposes
details of the policy representation for easy manipulation.
The states are guaranteed to be grouped by player, which can simplify
code for users of this class, i.e. `action_probability_array` contains
states for player 0 first, followed by states for player 1, etc.
The policy uses `state.information_state_string` as the keys if available,
otherwise `state.observation_string`.
Usages:
- Set `policy(info_state, action)`:
```
tabular_policy = TabularPolicy(game)
info_state_str = state.information_state_string(<optional player>)
state_policy = tabular_policy.policy_for_key(info_state_str)
state_policy[action] = <value>
```
- Set `policy(info_state)`:
```
tabular_policy = TabularPolicy(game)
info_state_str = state.information_state_string(<optional player>)
state_policy = tabular_policy.policy_for_key(info_state_str)
state_policy[:] = <list or numpy.array>
```
Attributes:
action_probability_array: array of shape `(num_states, num_actions)`, where
`action_probability_array[s, a]` is the probability of choosing action `a`
when at state `s`.
state_lookup: `dict` mapping state key string to index into the
`tabular_policy` array. If information state strings overlap, e.g. for
different players or if the information state string has imperfect recall,
then those states will be mapped to the same policy.
legal_actions_mask: array of shape `(num_states, num_actions)`, each row
representing which of the possible actions in the game are valid in this
particular state, containing 1 for valid actions, 0 for invalid actions.
states_per_player: A `list` per player of the state key strings at which
they have a decision to make.
states: A `list` of the states as ordered in the `action_probability_array`.
state_in: array of shape `(num_states, state_vector_size)` containing the
normalised vector representation of each information state. Populated only
for games which support information_state_tensor(), and is None otherwise.
game_type: The game attributes as returned by `Game::GetType`; used to
determine whether to use information state or observation as the key in
the tabular policy.
"""
def __init__(self,
game,
players=None,
to_string=lambda s: s.history_str(),
states=None):
"""Initializes a uniform random policy for all players in the game."""
players = sorted(players or range(game.num_players()))
super().__init__(game, players)
self.game_type = game.get_type()
# Get all states in the game at which players have to make decisions unless
# they are explicitly specified.
states = states or get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False,
include_mean_field_states=False,
to_string=to_string)
# Assemble legal actions for every valid (state, player) pair, keyed by
# information state string.
self.state_lookup = {}
self.states_per_player = [[] for _ in range(game.num_players())]
self.states = []
legal_actions_list = []
state_in_list = []
for player in players:
# States are ordered by their history.
for _, state in sorted(states.items(), key=lambda pair: pair[0]):
if state.is_simultaneous_node() or player == state.current_player():
legal_actions = state.legal_actions_mask(player)
if any(legal_actions):
key = self._state_key(state, player)
if key not in self.state_lookup:
state_index = len(legal_actions_list)
self.state_lookup[key] = state_index
legal_actions_list.append(legal_actions)
self.states_per_player[player].append(key)
self.states.append(state)
if self.game_type.provides_information_state_tensor:
state_in_list.append(state.information_state_tensor(player))
elif self.game_type.provides_observation_tensor:
state_in_list.append(state.observation_tensor(player))
# Put legal action masks in a numpy array and create the uniform random
# policy.
self.state_in = None
if state_in_list:
self.state_in = np.array(state_in_list)
self.legal_actions_mask = np.array(legal_actions_list)
self.action_probability_array = (
self.legal_actions_mask /
np.sum(self.legal_actions_mask, axis=-1, keepdims=True))
def _state_key(self, state, player):
"""Returns the key to use to look up this (state, player) pair."""
if self.game_type.provides_information_state_string:
if player is None:
return state.information_state_string()
return state.information_state_string(player)
if self.game_type.provides_observation_string:
if player is None:
return state.observation_string()
return state.observation_string(player)
return str(state)
def action_probabilities(self, state, player_id=None):
"""Returns an {action: probability} dict, covering all legal actions."""
legal_actions = (
state.legal_actions()
if player_id is None else state.legal_actions(player_id))
if not legal_actions:
return {0: 1.0}
probability = self.policy_for_key(self._state_key(state, player_id))
return {action: probability[action] for action in legal_actions}
def state_index(self, state):
"""Returns the index in the TabularPolicy associated to `state`."""
return self.state_lookup[self._state_key(state, state.current_player())]
def policy_for_key(self, key):
"""Returns the policy as a vector given a state key string.
Args:
key: A key for the specified state.
Returns:
A vector of probabilities, one per action. This is a slice of the
backing policy array, and so slice or index assignment will update the
policy. For example:
```
tabular_policy.policy_for_key(s)[:] = [0.1, 0.5, 0.4]
```
"""
return self.action_probability_array[self.state_lookup[key]]
def to_dict(self):
"""Returns a single dictionary representing the tabular policy.
Returns:
A dictionary of string keys to lists of (action, prob) pairs.
"""
policy_dict = {}
num_actions = self.action_probability_array.shape[1]
for infostate_key, index in self.state_lookup.items():
probs = self.action_probability_array[index]
actions_and_probs = [(a, probs[a]) for a in range(num_actions)]
policy_dict[infostate_key] = actions_and_probs
return policy_dict
def __copy__(self, copy_action_probability_array=True):
"""Returns a shallow copy of self.
Most class attributes will be pointers to the copied object's attributes,
and therefore altering them could lead to unexpected behavioural changes.
Only action_probability_array is expected to be modified.
Args:
copy_action_probability_array: Whether to also include
action_probability_array in the copy operation.
Returns:
Copy.
"""
result = TabularPolicy.__new__(TabularPolicy)
result.state_lookup = self.state_lookup
result.game_type = self.game_type
result.legal_actions_mask = self.legal_actions_mask
result.state_in = self.state_in
result.state_lookup = self.state_lookup
result.states_per_player = self.states_per_player
result.states = self.states
result.game = self.game
result.player_ids = self.player_ids
if copy_action_probability_array:
result.action_probability_array = np.copy(self.action_probability_array)
return result
def copy_with_noise(self,
alpha=0.0,
beta=0.0,
random_state=np.random.RandomState()):
"""Returns a copy of this policy perturbed with noise.
Generates a new random distribution using a softmax on normal random
variables with temperature beta, and mixes it with the old distribution
using 1-alpha * old_distribution + alpha * random_distribution.
Args:
alpha: Parameter characterizing the mixture amount between new and old
distributions. Between 0 and 1.
alpha = 0: keep old table.
alpha = 1: keep random table.
beta: Temperature of the softmax. Makes for more extreme policies.
random_state: A numpy `RandomState` object. If not provided, a shared
random state will be used.
Returns:
Perturbed copy.
"""
copied_instance = self.__copy__(False)
probability_array = self.action_probability_array
noise_mask = random_state.normal(size=probability_array.shape)
noise_mask = np.exp(beta * noise_mask) * self.legal_actions_mask
noise_mask = noise_mask / (np.sum(noise_mask, axis=1).reshape(-1, 1))
copied_instance.action_probability_array = (
1 - alpha) * probability_array + alpha * noise_mask
return copied_instance
class UniformRandomPolicy(Policy):
"""Policy where the action distribution is uniform over all legal actions.
This is computed as needed, so can be used for games where a tabular policy
would be unfeasibly large, but incurs a legal action computation every time.
"""
def __init__(self, game):
"""Initializes a uniform random policy for all players in the game."""
all_players = list(range(game.num_players()))
super().__init__(game, all_players)
def action_probabilities(self, state, player_id=None):
"""Returns a uniform random policy for a player in a state.
Args:
state: A `pyspiel.State` object.
player_id: Optional, the player id for which we want an action. Optional
unless this is a simultaneous state at which multiple players can act.
Returns:
A `dict` of `{action: probability}` for the specified player in the
supplied state. This will contain all legal actions, each with the same
probability, equal to 1 / num_legal_actions.
"""
legal_actions = (
state.legal_actions()
if player_id is None else state.legal_actions(player_id))
if not legal_actions:
return {0: 1.0}
probability = 1 / len(legal_actions)
return {action: probability for action in legal_actions}
class FirstActionPolicy(Policy):
"""A policy that always takes the lowest-numbered legal action."""
def __init__(self, game):
all_players = list(range(game.num_players()))
super().__init__(game, all_players)
def action_probabilities(self, state, player_id=None):
legal_actions = (
state.legal_actions()
if player_id is None else state.legal_actions(player_id))
if not legal_actions:
return {0: 1.0}
min_action = min(legal_actions)
return {
action: 1.0 if action == min_action else 0.0 for action in legal_actions
}
def get_tabular_policy_states(game):
"""Returns the states of the game for a tabular policy."""
if game.get_type().dynamics == pyspiel.GameType.Dynamics.MEAN_FIELD:
# TODO(author18): We use s.observation_string(DEFAULT_MFG_PLAYER) here as the
# number of history is exponential on the depth of the MFG. What we really
# need is a representation of the state. For many player Mean Field games,
# the state will be (x0, x1, x2, ..., xn) and the observation_string(0) will
# output the string of x0. In that case we would need something like
# str([observation_string(i) for i in range(num_player)])
to_string = lambda s: s.observation_string(pyspiel.PlayerId.
DEFAULT_PLAYER_ID)
else:
to_string = lambda s: s.history_str()
return get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False,
include_mean_field_states=False,
to_string=to_string)
def tabular_policy_from_callable(game, callable_policy, players=None):
"""Converts a legacy callable policy into a TabularPolicy.
Recommendation - instead of using this to convert your policy for evaluation
purposes, work directly with a `TabularPolicy` if possible.
Second choice - work with a `Policy` class and call `to_tabular` as needed.
Args:
game: The game for which we want a TabularPolicy.
callable_policy: A callable: state -> action probabilities dict or list.
players: List of players this policy applies to. If `None`, applies to all
players.
Returns:
A TabularPolicy that materializes the callable policy.
"""
tabular_policy = TabularPolicy(game, players)
for state_index, state in enumerate(tabular_policy.states):
action_probabilities = dict(callable_policy(state))
infostate_policy = [
action_probabilities.get(action, 0.)
for action in range(game.num_distinct_actions())
]
tabular_policy.action_probability_array[state_index, :] = infostate_policy
return tabular_policy
def pyspiel_policy_to_python_policy(game, pyspiel_tabular_policy, players=None):
"""Converts a pyspiel.TabularPolicy to a TabularPolicy.
Args:
game: The OpenSpiel game.
pyspiel_tabular_policy: Pyspiel tabular policy to copy from.
players: List of integer player ids to copy policy from. For example,
`players=[0]` will only copy player 0's policy over into the python policy
(the other player's policies will be undefined). Default value of `None`
will copy all players' policies.
Returns:
python_policy
"""
policy = TabularPolicy(game, players=players)
for item in pyspiel_tabular_policy.policy_table().items():
info_state_str, actions_probs = item
# If requested, only populate a policy for particular players.
if players is not None and info_state_str not in policy.state_lookup:
continue
state_policy = policy.policy_for_key(info_state_str)
for action, prob in actions_probs:
state_policy[action] = prob
return policy
def python_policy_to_pyspiel_policy(python_tabular_policy):
"""Converts a TabularPolicy to a pyspiel.TabularPolicy."""
infostates_to_probabilities = dict()
for infostate, index in python_tabular_policy.state_lookup.items():
probs = python_tabular_policy.action_probability_array[index]
legals = python_tabular_policy.legal_actions_mask[index]
action_probs = []
for action, (prob, is_legal) in enumerate(zip(probs, legals)):
if is_legal == 1:
action_probs.append((action, prob))
infostates_to_probabilities[infostate] = action_probs
return pyspiel.TabularPolicy(infostates_to_probabilities)
def python_policies_to_pyspiel_policies(policies):
"""Same conversion as above (list version).
Args:
policies: a list of python.TabularPolicy
Returns:
a list of pyspiel.TabularPolicy.
"""
return [python_policy_to_pyspiel_policy(p) for p in policies]
def merge_tabular_policies(tabular_policies, game):
"""Merges n_player policies into single joint policy.
Missing states are filled with a valid uniform policy.
Args:
tabular_policies: List of python TabularPolicy (one for each player).
game: The game corresponding to the resulting TabularPolicy.
Returns:
merged_policy: A TabularPolicy with each player i's policy taken from the
ith joint_policy.
"""
if len(tabular_policies) != game.num_players():
raise ValueError("len(tabular_policies) != num_players: %d != %d" %
(len(tabular_policies), game.num_players()))
merged_policy = TabularPolicy(game)
for p, p_states in enumerate(merged_policy.states_per_player):
for p_state in p_states:
to_index = merged_policy.state_lookup[p_state]
# Only copy if the state exists, otherwise fall back onto uniform.
if p_state in tabular_policies[p].state_lookup:
from_index = tabular_policies[p].state_lookup[p_state]
merged_policy.action_probability_array[to_index] = (
tabular_policies[p].action_probability_array[from_index])
return merged_policy
| open_spiel-master | open_spiel/python/policy.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reinforcement Learning (RL) Agent Base for Open Spiel."""
import abc
import collections
StepOutput = collections.namedtuple("step_output", ["action", "probs"])
class AbstractAgent(metaclass=abc.ABCMeta):
"""Abstract base class for Open Spiel RL agents."""
@abc.abstractmethod
def __init__(self,
player_id,
session=None,
observation_spec=None,
name="agent",
**agent_specific_kwargs):
"""Initializes agent.
Args:
player_id: integer, mandatory. Corresponds to the player position in the
game and is used to index the observation list.
session: optional Tensorflow session.
observation_spec: optional dict containing observation specifications.
name: string. Must be used to scope TF variables. Defaults to `agent`.
**agent_specific_kwargs: optional extra args.
"""
@abc.abstractmethod
def step(self, time_step, is_evaluation=False):
"""Returns action probabilities and chosen action at `time_step`.
Agents should handle `time_step` and extract the required part of the
`time_step.observations` field. This flexibility enables algorithms which
rely on opponent observations / information, e.g. CFR.
`is_evaluation` can be used so agents change their behaviour for evaluation
purposes, e.g.: preventing exploration rate decaying during test and
insertion of data to replay buffers.
Arguments:
time_step: an instance of rl_environment.TimeStep.
is_evaluation: bool indicating whether the step is an evaluation routine,
as opposed to a normal training step.
Returns:
A `StepOutput` for the current `time_step`.
"""
| open_spiel-master | open_spiel/python/rl_agent.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reinforcement Learning (RL) Environment for Open Spiel.
This module wraps Open Spiel Python interface providing an RL-friendly API. It
covers both turn-based and simultaneous move games. Interactions between agents
and the underlying game occur mostly through the `reset` and `step` methods,
which return a `TimeStep` structure (see its docstrings for more info).
The following example illustrates the interaction dynamics. Consider a 2-player
Kuhn Poker (turn-based game). Agents have access to the `observations` (a dict)
field from `TimeSpec`, containing the following members:
* `info_state`: list containing the game information state for each player. The
size of the list always correspond to the number of players. E.g.:
[[0, 1, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0]].
* `legal_actions`: list containing legal action ID lists (one for each player).
E.g.: [[0, 1], [0]], which corresponds to actions 0 and 1 being valid for
player 0 (the 1st player) and action 0 being valid for player 1 (2nd player).
* `current_player`: zero-based integer representing the player to make a move.
At each `step` call, the environment expects a singleton list with the action
(as it's a turn-based game), e.g.: [1]. This (zero-based) action must correspond
to the player specified at `current_player`. The game (which is at decision
node) will process the action and take as many steps necessary to cover chance
nodes, halting at a new decision or final node. Finally, a new `TimeStep`is
returned to the agent.
Simultaneous-move games follow analogous dynamics. The only differences is the
environment expects a list of actions, one per player. Note the `current_player`
field is "irrelevant" here, admitting a constant value defined in spiel.h, which
defaults to -2 (module level constant `SIMULTANEOUS_PLAYER_ID`).
See open_spiel/python/examples/rl_example.py for example usages.
"""
import collections
import enum
from absl import logging
import numpy as np
import pyspiel
SIMULTANEOUS_PLAYER_ID = pyspiel.PlayerId.SIMULTANEOUS
class TimeStep(
collections.namedtuple(
"TimeStep", ["observations", "rewards", "discounts", "step_type"])):
"""Returned with every call to `step` and `reset`.
A `TimeStep` contains the data emitted by a game at each step of interaction.
A `TimeStep` holds an `observation` (list of dicts, one per player),
associated lists of `rewards`, `discounts` and a `step_type`.
The first `TimeStep` in a sequence will have `StepType.FIRST`. The final
`TimeStep` will have `StepType.LAST`. All other `TimeStep`s in a sequence will
have `StepType.MID.
Attributes:
observations: a list of dicts containing observations per player.
rewards: A list of scalars (one per player), or `None` if `step_type` is
`StepType.FIRST`, i.e. at the start of a sequence.
discounts: A list of discount values in the range `[0, 1]` (one per player),
or `None` if `step_type` is `StepType.FIRST`.
step_type: A `StepType` enum value.
"""
__slots__ = ()
def first(self):
return self.step_type == StepType.FIRST
def mid(self):
return self.step_type == StepType.MID
def last(self):
return self.step_type == StepType.LAST
def is_simultaneous_move(self):
return self.observations["current_player"] == SIMULTANEOUS_PLAYER_ID
def current_player(self):
return self.observations["current_player"]
class StepType(enum.Enum):
"""Defines the status of a `TimeStep` within a sequence."""
FIRST = 0 # Denotes the first `TimeStep` in a sequence.
MID = 1 # Denotes any `TimeStep` in a sequence that is not FIRST or LAST.
LAST = 2 # Denotes the last `TimeStep` in a sequence.
def first(self):
return self is StepType.FIRST
def mid(self):
return self is StepType.MID
def last(self):
return self is StepType.LAST
# Global pyspiel members
def registered_games():
return pyspiel.registered_games()
class ChanceEventSampler(object):
"""Default sampler for external chance events."""
def __init__(self, seed=None):
self.seed(seed)
def seed(self, seed=None):
self._rng = np.random.RandomState(seed)
def __call__(self, state):
"""Sample a chance event in the given state."""
actions, probs = zip(*state.chance_outcomes())
return self._rng.choice(actions, p=probs)
class ObservationType(enum.Enum):
"""Defines what kind of observation to use."""
OBSERVATION = 0 # Use observation_tensor
INFORMATION_STATE = 1 # Use information_state_tensor
class Environment(object):
"""Open Spiel reinforcement learning environment class."""
def __init__(self,
game,
discount=1.0,
chance_event_sampler=None,
observation_type=None,
include_full_state=False,
mfg_distribution=None,
mfg_population=None,
enable_legality_check=False,
**kwargs):
"""Constructor.
Args:
game: [string, pyspiel.Game] Open Spiel game name or game instance.
discount: float, discount used in non-initial steps. Defaults to 1.0.
chance_event_sampler: optional object with `sample_external_events` method
to sample chance events.
observation_type: what kind of observation to use. If not specified, will
default to INFORMATION_STATE unless the game doesn't provide it.
include_full_state: whether or not to include the full serialized
OpenSpiel state in the observations (sometimes useful for debugging).
mfg_distribution: the distribution over states if the game is a mean field
game.
mfg_population: The Mean Field Game population to consider.
enable_legality_check: Check the legality of the move before stepping.
**kwargs: dict, additional settings passed to the Open Spiel game.
"""
self._chance_event_sampler = chance_event_sampler or ChanceEventSampler()
self._include_full_state = include_full_state
self._mfg_distribution = mfg_distribution
self._mfg_population = mfg_population
self._enable_legality_check = enable_legality_check
if isinstance(game, str):
if kwargs:
game_settings = {key: val for (key, val) in kwargs.items()}
logging.info("Using game settings: %s", game_settings)
self._game = pyspiel.load_game(game, game_settings)
else:
logging.info("Using game string: %s", game)
self._game = pyspiel.load_game(game)
else: # pyspiel.Game or API-compatible object.
logging.info("Using game instance: %s", game.get_type().short_name)
self._game = game
self._num_players = self._game.num_players()
self._state = None
self._should_reset = True
# Discount returned at non-initial steps.
self._discounts = [discount] * self._num_players
# Determine what observation type to use.
if observation_type is None:
if self._game.get_type().provides_information_state_tensor:
observation_type = ObservationType.INFORMATION_STATE
else:
observation_type = ObservationType.OBSERVATION
# Check the requested observation type is supported.
if observation_type == ObservationType.OBSERVATION:
if not self._game.get_type().provides_observation_tensor:
raise ValueError(f"observation_tensor not supported by {game}")
elif observation_type == ObservationType.INFORMATION_STATE:
if not self._game.get_type().provides_information_state_tensor:
raise ValueError(f"information_state_tensor not supported by {game}")
self._use_observation = (observation_type == ObservationType.OBSERVATION)
if self._game.get_type().dynamics == pyspiel.GameType.Dynamics.MEAN_FIELD:
assert mfg_distribution is not None
assert mfg_population is not None
assert 0 <= mfg_population < self._num_players
def seed(self, seed=None):
self._chance_event_sampler.seed(seed)
def get_time_step(self):
"""Returns a `TimeStep` without updating the environment.
Returns:
A `TimeStep` namedtuple containing:
observation: list of dicts containing one observations per player, each
corresponding to `observation_spec()`.
reward: list of rewards at this timestep, or None if step_type is
`StepType.FIRST`.
discount: list of discounts in the range [0, 1], or None if step_type is
`StepType.FIRST`.
step_type: A `StepType` value.
"""
observations = {
"info_state": [],
"legal_actions": [],
"current_player": [],
"serialized_state": []
}
rewards = []
step_type = StepType.LAST if self._state.is_terminal() else StepType.MID
self._should_reset = step_type == StepType.LAST
cur_rewards = self._state.rewards()
for player_id in range(self.num_players):
rewards.append(cur_rewards[player_id])
observations["info_state"].append(
self._state.observation_tensor(player_id) if self._use_observation
else self._state.information_state_tensor(player_id))
observations["legal_actions"].append(self._state.legal_actions(player_id))
observations["current_player"] = self._state.current_player()
discounts = self._discounts
if step_type == StepType.LAST:
# When the game is in a terminal state set the discount to 0.
discounts = [0. for _ in discounts]
if self._include_full_state:
observations["serialized_state"] = pyspiel.serialize_game_and_state(
self._game, self._state)
# For gym environments
if hasattr(self._state, "last_info"):
observations["info"] = self._state.last_info
return TimeStep(
observations=observations,
rewards=rewards,
discounts=discounts,
step_type=step_type)
def _check_legality(self, actions):
if self.is_turn_based:
legal_actions = self._state.legal_actions()
if actions[0] not in legal_actions:
raise RuntimeError(f"step() called on illegal action {actions[0]}")
else:
for p in range(len(actions)):
legal_actions = self._state.legal_actions(p)
if legal_actions and actions[p] not in legal_actions:
raise RuntimeError(f"step() by player {p} called on illegal " +
f"action: {actions[p]}")
def step(self, actions):
"""Updates the environment according to `actions` and returns a `TimeStep`.
If the environment returned a `TimeStep` with `StepType.LAST` at the
previous step, this call to `step` will start a new sequence and `actions`
will be ignored.
This method will also start a new sequence if called after the environment
has been constructed and `reset` has not been called. Again, in this case
`actions` will be ignored.
Args:
actions: a list containing one action per player, following specifications
defined in `action_spec()`.
Returns:
A `TimeStep` namedtuple containing:
observation: list of dicts containing one observations per player, each
corresponding to `observation_spec()`.
reward: list of rewards at this timestep, or None if step_type is
`StepType.FIRST`.
discount: list of discounts in the range [0, 1], or None if step_type is
`StepType.FIRST`.
step_type: A `StepType` value.
"""
assert len(actions) == self.num_actions_per_step, (
"Invalid number of actions! Expected {}".format(
self.num_actions_per_step))
if self._should_reset:
return self.reset()
if self._enable_legality_check:
self._check_legality(actions)
if self.is_turn_based:
self._state.apply_action(actions[0])
else:
self._state.apply_actions(actions)
self._sample_external_events()
return self.get_time_step()
def reset(self):
"""Starts a new sequence and returns the first `TimeStep` of this sequence.
Returns:
A `TimeStep` namedtuple containing:
observations: list of dicts containing one observations per player, each
corresponding to `observation_spec()`.
rewards: list of rewards at this timestep, or None if step_type is
`StepType.FIRST`.
discounts: list of discounts in the range [0, 1], or None if step_type
is `StepType.FIRST`.
step_type: A `StepType` value.
"""
self._should_reset = False
if self._game.get_type(
).dynamics == pyspiel.GameType.Dynamics.MEAN_FIELD and self._num_players > 1:
self._state = self._game.new_initial_state_for_population(
self._mfg_population)
else:
self._state = self._game.new_initial_state()
self._sample_external_events()
observations = {
"info_state": [],
"legal_actions": [],
"current_player": [],
"serialized_state": []
}
for player_id in range(self.num_players):
observations["info_state"].append(
self._state.observation_tensor(player_id) if self._use_observation
else self._state.information_state_tensor(player_id))
observations["legal_actions"].append(self._state.legal_actions(player_id))
observations["current_player"] = self._state.current_player()
if self._include_full_state:
observations["serialized_state"] = pyspiel.serialize_game_and_state(
self._game, self._state)
return TimeStep(
observations=observations,
rewards=None,
discounts=None,
step_type=StepType.FIRST)
def _sample_external_events(self):
"""Sample chance events until we get to a decision node."""
while self._state.is_chance_node() or (self._state.current_player()
== pyspiel.PlayerId.MEAN_FIELD):
if self._state.is_chance_node():
outcome = self._chance_event_sampler(self._state)
self._state.apply_action(outcome)
if self._state.current_player() == pyspiel.PlayerId.MEAN_FIELD:
dist_to_register = self._state.distribution_support()
dist = [
self._mfg_distribution.value_str(str_state, default_value=0.0)
for str_state in dist_to_register
]
self._state.update_distribution(dist)
def observation_spec(self):
"""Defines the observation per player provided by the environment.
Each dict member will contain its expected structure and shape. E.g.: for
Kuhn Poker {"info_state": (6,), "legal_actions": (2,), "current_player": (),
"serialized_state": ()}
Returns:
A specification dict describing the observation fields and shapes.
"""
return dict(
info_state=tuple([
self._game.observation_tensor_size() if self._use_observation else
self._game.information_state_tensor_size()
]),
legal_actions=(self._game.num_distinct_actions(),),
current_player=(),
serialized_state=(),
)
def action_spec(self):
"""Defines per player action specifications.
Specifications include action boundaries and their data type.
E.g.: for Kuhn Poker {"num_actions": 2, "min": 0, "max":1, "dtype": int}
Returns:
A specification dict containing per player action properties.
"""
return dict(
num_actions=self._game.num_distinct_actions(),
min=0,
max=self._game.num_distinct_actions() - 1,
dtype=int,
)
# Environment properties
@property
def use_observation(self):
"""Returns whether the environment is using the game's observation.
If false, it is using the game's information state.
"""
return self._use_observation
# Game properties
@property
def name(self):
return self._game.get_type().short_name
@property
def num_players(self):
return self._game.num_players()
@property
def num_actions_per_step(self):
return 1 if self.is_turn_based else self.num_players
# New RL calls for more advanced use cases (e.g. search + RL).
@property
def is_turn_based(self):
return ((self._game.get_type().dynamics
== pyspiel.GameType.Dynamics.SEQUENTIAL) or
(self._game.get_type().dynamics
== pyspiel.GameType.Dynamics.MEAN_FIELD))
@property
def max_game_length(self):
return self._game.max_game_length()
@property
def is_chance_node(self):
return self._state.is_chance_node()
@property
def game(self):
return self._game
def set_state(self, new_state):
"""Updates the game state."""
assert new_state.get_game() == self.game, (
"State must have been created by the same game.")
self._state = new_state
@property
def get_state(self):
return self._state
@property
def mfg_distribution(self):
return self._mfg_distribution
def update_mfg_distribution(self, mfg_distribution):
"""Updates the distribution over the states of the mean field game."""
assert (
self._game.get_type().dynamics == pyspiel.GameType.Dynamics.MEAN_FIELD)
self._mfg_distribution = mfg_distribution
| open_spiel-master | open_spiel/python/rl_environment.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.egt.dynamics."""
import math
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python.egt import dynamics
from open_spiel.python.egt.utils import game_payoffs_array
import pyspiel
def _sum_j_x_j_ln_x_j_over_x_i(x):
r"""Computes \sum_j x_j ln(x_j / x_i)."""
# By having a = x.reshape([1, -1]) and b = x.reshape([-1, 1]), we can use
# broadcasting and have:
# (a / b)[i, j] = x_j / x_i
# thus giving:
# \sum_j x_j * log(x_j/ x_i) = sum(a * ln (a/b), axis=1)
a = x.reshape([1, -1])
b = x.reshape([-1, 1])
return np.sum(a * np.log(np.divide(a, b)), axis=1)
def _q_learning_dynamics(composition, payoff, temperature):
r"""An equivalent implementation of `dynamics.boltzmannq`."""
return 1 / temperature * dynamics.replicator(composition, payoff) + (
composition * _sum_j_x_j_ln_x_j_over_x_i(composition))
class _InternalTest(absltest.TestCase):
def test__sum_j_x_j_ln_x_j_over_x_i(self):
# This tests a sub-function of `_q_learning_dynamics` to ensure its
# internals are correct.
x = np.asarray([1., 2., 3.])
# We use 2 different formula to check we have the correct result.
expected = [sum([x_j * math.log(x_j / x_i) for x_j in x]) for x_i in x]
log = math.log
expected_0 = 1. * log(1 / 1.) + 2 * log(2 / 1.) + 3 * log(3 / 1.)
expected_1 = 1. * log(1 / 2.) + 2 * log(2 / 2.) + 3 * log(3 / 2.)
expected_2 = 1. * log(1 / 3.) + 2 * log(2 / 3.) + 3 * log(3 / 3.)
expected_2 = np.asarray([expected_0, expected_1, expected_2])
np.testing.assert_array_equal(expected, expected_2)
np.testing.assert_array_almost_equal(expected,
_sum_j_x_j_ln_x_j_over_x_i(x))
class DynamicsTest(parameterized.TestCase):
def test_boltzmann_q(self):
x = np.asarray([1 / 2, 1 / 2])
payoff = np.asarray([[1, 0], [0, 1]], dtype=np.float32)
temperature = 1
np.testing.assert_array_equal(
dynamics.boltzmannq(x, payoff, temperature),
_q_learning_dynamics(x, payoff, temperature))
def test_rd_rps_pure_fixed_points(self):
game = pyspiel.load_matrix_game('matrix_rps')
payoff_matrix = game_payoffs_array(game)
rd = dynamics.replicator
dyn = dynamics.SinglePopulationDynamics(payoff_matrix, rd)
x = np.eye(3)
np.testing.assert_allclose(dyn(x[0]), np.zeros((3,)))
np.testing.assert_allclose(dyn(x[1]), np.zeros((3,)))
np.testing.assert_allclose(dyn(x[2]), np.zeros((3,)))
@parameterized.parameters(dynamics.replicator, dynamics.boltzmannq,
dynamics.qpg)
def test_dynamics_rps_mixed_fixed_point(self, func):
game = pyspiel.load_matrix_game('matrix_rps')
payoff_matrix = game_payoffs_array(game)
dyn = dynamics.SinglePopulationDynamics(payoff_matrix, func)
x = np.ones(shape=(3,)) / 3.
np.testing.assert_allclose(dyn(x), np.zeros((3,)), atol=1e-15)
def test_multi_population_rps(self):
game = pyspiel.load_matrix_game('matrix_rps')
payoff_matrix = game_payoffs_array(game)
rd = dynamics.replicator
dyn = dynamics.MultiPopulationDynamics(payoff_matrix, [rd] * 2)
x = np.concatenate([np.ones(k) / float(k) for k in payoff_matrix.shape[1:]])
np.testing.assert_allclose(dyn(x), np.zeros((6,)), atol=1e-15)
def test_multi_population_three_populations(self):
payoff_matrix = np.arange(3 * 2 * 3 * 4).reshape(3, 2, 3, 4)
rd = dynamics.replicator
dyn = dynamics.MultiPopulationDynamics(payoff_matrix, [rd] * 3)
x = np.concatenate([np.ones(k) / float(k) for k in payoff_matrix.shape[1:]])
self.assertEqual(dyn(x).shape, (9,))
def test_multi_population_four_populations(self):
payoff_matrix = np.zeros((4, 2, 2, 2, 2))
payoff_matrix[:, 0, 0, 0, 0] = np.ones((4,))
rd = dynamics.replicator
dyn = dynamics.MultiPopulationDynamics(payoff_matrix, [rd] * 4)
x = np.concatenate([np.ones(k) / float(k) for k in payoff_matrix.shape[1:]])
avg_fitness = 1. / float(2**4) # if all players play uniform random
dx = dyn(x)
np.testing.assert_allclose(dx[::2], np.ones((4,)) * avg_fitness / 2.)
np.testing.assert_allclose(dx[1::2], np.ones((4,)) * (-avg_fitness) / 2.)
def test_time_average(self):
n, k = 10, 3
traj = np.ones(shape=(n, k))
time_avg = dynamics.time_average(traj)
np.testing.assert_allclose(time_avg, np.ones(shape=(n, k)))
traj[1::2] = -1. * traj[1::2]
time_avg = dynamics.time_average(traj)
np.testing.assert_allclose(time_avg[-1], np.zeros(shape=(k,)))
np.testing.assert_allclose(time_avg[-2],
1. / (n - 1.) * np.ones(shape=(k,)))
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/egt/dynamics_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.egt.utils."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python.egt import utils
import pyspiel
def _generate_prob_profiles(num_items, num_slots):
"""Another implementation of `distribution` for test purposes.
This function is the original implementation from Karl. jblespiau@ find it
useful to add it here as: 1) an additional test of our function 2) a check
that the initial code is correct too.
Args:
num_items: The number of items to distribute.
num_slots: The number of slots.
Returns:
A numpy array of shape [num_distributions, num_slots].
"""
if num_slots == 1:
return np.array([num_items])
num_rows = utils.n_choose_k(num_items + num_slots - 1, num_items)
distributions = np.empty([num_rows, num_slots])
ind = 0
for i in range(0, num_items + 1):
n_tmp = num_items - i
k_tmp = num_slots - 1
distributions_tmp = _generate_prob_profiles(n_tmp, k_tmp)
distributions[ind:ind +
np.shape(distributions_tmp)[0], :] = np.column_stack(
(np.array((np.ones(np.shape(distributions_tmp)[0]) * i)),
distributions_tmp))
ind = ind + np.shape(distributions_tmp)[0]
return distributions
class UtilsTest(parameterized.TestCase):
@parameterized.parameters(
(5, 3, False),
(2, 2, True),
)
def test_distribution(self, num_items, num_slots, normalize):
distribution = list(utils.distribute(num_items, num_slots, normalize))
# Correct length.
# See https://en.wikipedia.org/wiki/Stars_and_bars_%28combinatorics%29.
self.assertLen(distribution,
utils.n_choose_k(num_items + num_slots - 1, num_items))
# No duplicates.
self.assertLen(distribution, len(set(distribution)))
sum_distribution = num_items if not normalize else 1
for d in distribution:
self.assertTrue(sum_distribution, sum(d))
self.assertTrue((np.asarray(d) >= 0).all())
@parameterized.parameters(
(5, 3),
(2, 2),
(3, 3),
(10, 5),
)
def test_distribution_equivalent_implementation(self, num_items, num_slots):
dist_list = list(utils.distribute(num_items, num_slots, normalize=False))
distribution = np.vstack(dist_list)
other_implementation = _generate_prob_profiles(num_items, num_slots)
np.testing.assert_array_equal(
utils.sort_rows_lexicographically(distribution),
utils.sort_rows_lexicographically(other_implementation))
def test_sort_rows_lexicographically(self):
array = np.asarray([
[1, 1, 0],
[1, 2, 0],
[3, 1, 0],
[0, 0, 4],
])
expected = np.asarray([
[0, 0, 4],
[1, 1, 0],
[1, 2, 0],
[3, 1, 0],
])
np.testing.assert_equal(expected, utils.sort_rows_lexicographically(array))
def test_id_profile_mapping(self):
"""Tests forward and backward mapping of pure strategy profiles to IDs."""
num_strats_per_population = np.asarray([4, 4, 4, 9])
num_pure_profiles = np.prod(num_strats_per_population)
strat_ranges = [
range(num_strats) for num_strats in num_strats_per_population
]
id_list = []
for strat_profile in itertools.product(strat_ranges[0], strat_ranges[1],
strat_ranges[2], strat_ranges[3]):
profile_id = utils.get_id_from_strat_profile(num_strats_per_population,
strat_profile)
id_list.append(profile_id)
# Tests backward mapping (ID-to-profile lookup)
strat_profile_from_id = utils.get_strat_profile_from_id(
num_strats_per_population, profile_id)
np.testing.assert_array_equal(strat_profile, strat_profile_from_id)
# Tests forward mapping (profile-to-ID lookup)
np.testing.assert_array_equal(id_list, range(num_pure_profiles))
def test_get_valid_next_profiles(self):
"""Tests next-profile generator."""
num_strats_per_population = np.asarray([4, 5, 9, 7])
cur_profile = np.asarray([1, 1, 2, 1])
next_profiles = utils.get_valid_next_profiles(num_strats_per_population,
cur_profile)
num_next_profiles = 0
for _, _ in next_profiles:
num_next_profiles += 1
expected = (num_strats_per_population - 1).sum()
np.testing.assert_equal(expected, num_next_profiles)
def test_constant_sum_checker(self):
"""Tests if verification of constant-sum game is correct."""
game = pyspiel.load_matrix_game("matrix_rps")
payoff_tables = utils.game_payoffs_array(game)
payoffs_are_hpt_format = utils.check_payoffs_are_hpt(payoff_tables)
game_is_constant_sum, payoff_sum = utils.check_is_constant_sum(
payoff_tables[0], payoffs_are_hpt_format)
self.assertTrue(game_is_constant_sum)
self.assertEqual(payoff_sum, 0.)
def test_game_payoffs_array_rps(self):
"""Test `game_payoffs_array` for rock-paper-scissors."""
game = pyspiel.load_matrix_game("matrix_rps")
payoff_matrix = np.empty(shape=(2, 3, 3))
payoff_row = np.array([[0., -1., 1.], [1., 0., -1.], [-1., 1., 0.]])
payoff_matrix[0] = payoff_row
payoff_matrix[1] = -1. * payoff_row
np.testing.assert_allclose(utils.game_payoffs_array(game), payoff_matrix)
def test_game_payoffs_array_pd(self):
"""Test `game_payoffs_array` for prisoners' dilemma."""
game = pyspiel.load_matrix_game("matrix_pd")
payoff_matrix = np.empty(shape=(2, 2, 2))
payoff_row = np.array([[5., 0.], [10., 1.]])
payoff_matrix[0] = payoff_row
payoff_matrix[1] = payoff_row.T
np.testing.assert_allclose(utils.game_payoffs_array(game), payoff_matrix)
@parameterized.parameters(
(100, 2, 0.),
(100, 3, 0.),
(100, 4, 0.),
(100, 2, 0.05),
)
def test_sample_from_simplex(self, n, dim, vmin):
"""Test `sample_from_simplex`."""
x = utils.sample_from_simplex(n, dim=dim, vmin=vmin)
np.testing.assert_allclose(np.sum(x, axis=1), np.ones(n))
self.assertTrue(np.all(x <= 1. - vmin))
self.assertTrue(np.all(x >= vmin))
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/egt/utils_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Alpha-Rank for general games.
Namely, computes fixation probabilities, Markov chain, and associated
stationary distribution given a population size and payoff matrix involving
n-strategy interactions.
All equations and variable names correspond to the following paper:
https://arxiv.org/abs/1903.01373
"""
import numpy as np
import scipy.linalg as la
from open_spiel.python.egt import alpharank_visualizer
from open_spiel.python.egt import utils
def _get_payoff(payoff_table_k, payoffs_are_hpt_format, strat_profile, k=None):
"""Gets the payoff of the k-th agent in a single or multi-population game.
Namely, accepts the payoff table of the k-th agent (which can be matrix or
HPT format), the index k of the agent of interest (so its payoff can be looked
up in case of an HPT format payoff table), and the pure strategy profile.
For multipopulation games, we currently only support games where the k-th
agent's payoff is a function of the HPT distribution (a vector
indicating the number of players playing each strategy), as opposed to the
strategy profile (a vector indicating the strategy of each player). This is
due to the nature of the PayoffTable class, which currently only tracks
distributions in the first k columns (rather than profiles).
Args:
payoff_table_k: The k-th agent's payoff table, in matrix or HPT format.
payoffs_are_hpt_format: Boolean indicating whether payoff_table_k is a
_PayoffTableInterface object (AKA Heuristic Payoff Table or HPT) or a
numpy array. True indicates HPT format, False indicates numpy array.
strat_profile: The pure strategy profile.
k: The index of the agent of interest. Only used for HPT case, and only >0
for a multi-population game.
Returns:
The k-th agent's payoff.
"""
if payoffs_are_hpt_format:
# All games are supported when using HPTs
assert k is not None
# Compute HPT distribution (vector of # of players per strategy)
distribution = payoff_table_k.get_distribution_from_profile(strat_profile)
# Lookup the payoff profile (HPT row) corresponding to the distribution
payoff_profile = payoff_table_k[tuple(distribution)]
# Return the payoff corresponding to the k-th agent's strategy
return payoff_profile[strat_profile[k]]
else:
# Only 2 player symmetric/asymmetric games supported using matrix payoffs
return payoff_table_k[tuple(strat_profile)]
def _get_singlepop_2player_fitness(payoff_table, payoffs_are_hpt_format, m,
my_popsize, my_strat, opponent_strat,
use_local_selection_model):
"""Gets a target agent fitness given a finite population of competitors.
Note that this is only applicable to 2-player symmetric games.
Namely, gets fitness of an agent i playing my_strat in underlying population
of (my_popsize agents playing my_strat) and (m-my_popsize agents playing
opponent_strat).
Args:
payoff_table: A payoff table.
payoffs_are_hpt_format: Boolean indicating whether payoff_table is a
_PayoffTableInterface object (AKA Heuristic Payoff Table or HPT), or a
numpy array. True indicates HPT format, False indicates numpy array.
m: The total number of agents in the population.
my_popsize: The number of agents in the population playing my strategy.
my_strat: Index of my strategy.
opponent_strat: Index of the opposing strategy.
use_local_selection_model: Enable local evolutionary selection model, which
considers fitness against the current opponent only, rather than the
global population state.
Returns:
The fitness of agent i.
"""
if use_local_selection_model:
fitness = payoff_table[tuple([my_strat, opponent_strat])]
else:
fitness = ((my_popsize-1)/(m-1)*
_get_payoff(payoff_table, payoffs_are_hpt_format,
strat_profile=[my_strat, my_strat], k=0) +
(m-my_popsize)/(m-1)*
_get_payoff(payoff_table, payoffs_are_hpt_format,
strat_profile=[my_strat, opponent_strat], k=0))
return fitness
def _get_rho_sr(payoff_table,
payoffs_are_hpt_format,
m,
r,
s,
alpha,
game_is_constant_sum,
use_local_selection_model,
payoff_sum=None):
"""Gets fixation probability of rogue strategy r in population playing s.
Args:
payoff_table: A payoff table.
payoffs_are_hpt_format: Boolean indicating whether payoff_table is a
_PayoffTableInterface object (AKA Heuristic Payoff Table or HPT), or a
numpy array. True indicates HPT format, False indicates numpy array.
m: The total number of agents in the population.
r: Rogue strategy r.
s: Population strategy s.
alpha: Fermi distribution temperature parameter.
game_is_constant_sum: Boolean indicating if the game is constant sum.
use_local_selection_model: Enable local evolutionary selection model, which
considers fitness against the current opponent only, rather than the
global population state.
payoff_sum: The payoff sum if the game is constant sum, or None otherwise.
Returns:
The fixation probability.
"""
if use_local_selection_model or game_is_constant_sum:
payoff_rs = _get_payoff(
payoff_table, payoffs_are_hpt_format, strat_profile=[r, s], k=0)
if use_local_selection_model:
# Row plays s, column plays r
payoff_sr = _get_payoff(
payoff_table, payoffs_are_hpt_format, strat_profile=[s, r], k=0)
u = alpha * (payoff_rs - payoff_sr)
else:
assert payoff_sum is not None
u = alpha * m / (m - 1) * (payoff_rs - payoff_sum / 2)
if np.isclose(u, 0, atol=1e-14):
# To avoid divide by 0, use first-order approximation when u is near 0
result = 1 / m
else:
result = (1 - np.exp(-u)) / (1 - np.exp(-m * u))
else:
assert payoff_sum is None
summed = 0
for l in range(1, m):
t_mult = 1.
for p_r in range(1, l + 1):
# Probabilities of strategy r decreasing/increasing
p_s = m - p_r
# Fitness of agent playing r against rest of current population
f_ri = _get_singlepop_2player_fitness(
payoff_table,
payoffs_are_hpt_format,
m,
my_popsize=p_r,
my_strat=r,
opponent_strat=s,
use_local_selection_model=use_local_selection_model)
# Fitness of agent playing s against rest of current population
f_sj = _get_singlepop_2player_fitness(
payoff_table,
payoffs_are_hpt_format,
m,
my_popsize=p_s,
my_strat=s,
opponent_strat=r,
use_local_selection_model=use_local_selection_model)
t_mult *= np.exp(-alpha * (f_ri - f_sj))
summed += t_mult
result = (1 + summed)**(-1)
return result
def _get_rho_sr_multipop(payoff_table_k,
payoffs_are_hpt_format,
k,
m,
r,
s,
alpha,
use_fast_compute=True):
"""Gets fixation probability for multi-population games.
Specifically, considers the fitnesses of two strategy profiles r and s given
the payoff table of the k-th population. Profile s is the current profile and
r is a mutant profile. Profiles r and s are identical except for the k-th
element, which corresponds to the deviation of the k-th population's
monomorphic strategy from s[k] to r[k].
Args:
payoff_table_k: The k-th population's payoff table.
payoffs_are_hpt_format: Boolean indicating whether payoff_table_k is a
_PayoffTableInterface object (AKA Heuristic Payoff Table or HPT), or numpy
array. True indicates HPT format, False indicates numpy array.
k: Index of the k-th population.
m: Total number of agents in the k-th population.
r: Strategy profile containing mutant strategy r for population k.
s: Current strategy profile.
alpha: Fermi distribution temperature parameter.
use_fast_compute: Boolean indicating whether closed-form computation should
be used.
Returns:
Probability of strategy r fixating in population k.
"""
# Fitnesses are not dependent on population sizes for multipopulation case, so
# can be computed outside the loops
# Fitness of population k agent given strategy profile r
f_r = _get_payoff(payoff_table_k, payoffs_are_hpt_format, r, k)
# Fitness of population k agent given strategy profile s
f_s = _get_payoff(payoff_table_k, payoffs_are_hpt_format, s, k)
if use_fast_compute:
u = alpha * (f_r - f_s)
if np.isclose(u, 0, atol=1e-14):
# To avoid divide by 0, use first-order approximation when u is near 0
result = 1 / m
else:
result = (1 - np.exp(-u)) / (1 - np.exp(-m * u))
else:
summed = 0
for l in range(1, m):
t_mult = 1.
for p_r in range(1, l + 1): # pylint: disable= unused-variable
t_mult *= np.exp(-alpha * (f_r - f_s))
summed += t_mult
result = (1 + summed)**(-1)
return result
def _get_singlepop_transition_matrix(payoff_table,
payoffs_are_hpt_format,
m,
alpha,
game_is_constant_sum,
use_local_selection_model,
payoff_sum,
use_inf_alpha=False,
inf_alpha_eps=0.1):
"""Gets the Markov transition matrix for a single-population game.
Args:
payoff_table: A payoff table.
payoffs_are_hpt_format: Boolean indicating whether payoff_table is a
_PayoffTableInterface object (AKA Heuristic Payoff Table or HPT), or a
numpy array. True indicates HPT format, False indicates numpy array.
m: Total number of agents in the k-th population.
alpha: Fermi distribution temperature parameter.
game_is_constant_sum: Boolean indicating if the game is constant sum.
use_local_selection_model: Enable local evolutionary selection model, which
considers fitness against the current opponent only, rather than the
global population state.
payoff_sum: The payoff sum if the game is constant sum, or None otherwise.
use_inf_alpha: Use infinite-alpha alpharank model.
inf_alpha_eps: Noise term (epsilon) used in infinite-alpha alpharank model.
Returns:
Markov transition matrix.
"""
num_strats_per_population = utils.get_num_strats_per_population(
[payoff_table], payoffs_are_hpt_format)
num_strats = num_strats_per_population[0]
c = np.zeros((num_strats, num_strats))
rhos = np.zeros((num_strats, num_strats))
# r and s are, respectively, the column and row strategy profiles
for s in range(num_strats): # Current strategy
for r in range(num_strats): # Next strategy
if s != r: # Compute off-diagonal fixation probabilities
if use_inf_alpha:
eta = 1. / (num_strats - 1)
# Payoff of r when played against s
payoff_rs = _get_payoff(
payoff_table, payoffs_are_hpt_format, strat_profile=[r, s], k=0)
# Payoff of s when played against r
payoff_sr = _get_payoff(
payoff_table, payoffs_are_hpt_format, strat_profile=[s, r], k=0)
if np.isclose(payoff_rs, payoff_sr, atol=1e-14):
c[s, r] = eta * 0.5
elif payoff_rs > payoff_sr:
# Transition to r since its payoff is higher than s, but remove some
# small amount of mass, inf_alpha_eps, to keep the chain irreducible
c[s, r] = eta * (1 - inf_alpha_eps)
else:
# Transition with very small probability
c[s, r] = eta * inf_alpha_eps
else:
rhos[s, r] = _get_rho_sr(payoff_table, payoffs_are_hpt_format, m, r,
s, alpha, game_is_constant_sum,
use_local_selection_model, payoff_sum)
eta = 1. / (num_strats - 1)
c[s, r] = eta * rhos[s, r]
# Fixation probability of competing only against one's own strategy is 1
# rhos[s,s] = 1. # Commented as self-fixations are not interesting (for now)
c[s, s] = 1 - sum(c[s, :]) # Diagonals
return c, rhos
def _get_multipop_transition_matrix(payoff_tables,
payoffs_are_hpt_format,
m,
alpha,
use_inf_alpha=False,
inf_alpha_eps=0.1):
"""Gets Markov transition matrix for multipopulation games."""
num_strats_per_population = utils.get_num_strats_per_population(
payoff_tables, payoffs_are_hpt_format)
num_profiles = utils.get_num_profiles(num_strats_per_population)
eta = 1. / (np.sum(num_strats_per_population - 1))
c = np.zeros((num_profiles, num_profiles))
rhos = np.zeros((num_profiles, num_profiles))
for id_row_profile in range(num_profiles):
row_profile = utils.get_strat_profile_from_id(num_strats_per_population,
id_row_profile)
next_profile_gen = utils.get_valid_next_profiles(num_strats_per_population,
row_profile)
for index_population_that_changed, col_profile in next_profile_gen:
id_col_profile = utils.get_id_from_strat_profile(
num_strats_per_population, col_profile)
if use_inf_alpha:
payoff_col = _get_payoff(
payoff_tables[index_population_that_changed],
payoffs_are_hpt_format,
col_profile,
k=index_population_that_changed)
payoff_row = _get_payoff(
payoff_tables[index_population_that_changed],
payoffs_are_hpt_format,
row_profile,
k=index_population_that_changed)
if np.isclose(payoff_col, payoff_row, atol=1e-14):
c[id_row_profile, id_col_profile] = eta * 0.5
elif payoff_col > payoff_row:
# Transition to col strategy since its payoff is higher than row
# strategy, but remove some small amount of mass, inf_alpha_eps, to
# keep the chain irreducible
c[id_row_profile, id_col_profile] = eta * (1 - inf_alpha_eps)
else:
# Transition with very small probability
c[id_row_profile, id_col_profile] = eta * inf_alpha_eps
else:
rhos[id_row_profile, id_col_profile] = _get_rho_sr_multipop(
payoff_table_k=payoff_tables[index_population_that_changed],
payoffs_are_hpt_format=payoffs_are_hpt_format,
k=index_population_that_changed,
m=m,
r=col_profile,
s=row_profile,
alpha=alpha)
c[id_row_profile,
id_col_profile] = eta * rhos[id_row_profile, id_col_profile]
# Special case of self-transition
c[id_row_profile, id_row_profile] = 1 - sum(c[id_row_profile, :])
return c, rhos
def _get_stationary_distr(c):
"""Gets stationary distribution of transition matrix c."""
eigenvals, left_eigenvecs, _ = la.eig(c, left=True, right=True)
mask = abs(eigenvals - 1.) < 1e-10
left_eigenvecs = left_eigenvecs[:, mask]
num_stationary_eigenvecs = np.shape(left_eigenvecs)[1]
if num_stationary_eigenvecs != 1:
raise ValueError('Expected 1 stationary distribution, but found %d' %
num_stationary_eigenvecs)
left_eigenvecs *= 1. / sum(left_eigenvecs)
return left_eigenvecs.real.flatten()
def print_results(payoff_tables,
payoffs_are_hpt_format,
rhos=None,
rho_m=None,
c=None,
pi=None):
"""Prints the finite-population analysis results."""
print('Payoff tables:\n')
if payoffs_are_hpt_format:
for payoff_table in payoff_tables:
print(payoff_table())
else:
print(payoff_tables)
if rho_m is not None:
print('\nNeutral fixation probability (rho_m):\n', rho_m)
if rhos is not None and rho_m is not None:
print('\nFixation probability matrix (rho_{r,s}/rho_m):\n',
np.around(rhos / rho_m, decimals=2))
if c is not None:
print('\nMarkov transition matrix (c):\n', np.around(c, decimals=2))
if pi is not None:
print('\nStationary distribution (pi):\n', pi)
def sweep_pi_vs_epsilon(payoff_tables,
strat_labels=None,
warm_start_epsilon=None,
visualize=False,
return_epsilon=False,
min_iters=10,
max_iters=100,
min_epsilon=1e-14,
num_strats_to_label=10,
legend_sort_clusters=False):
"""Computes infinite-alpha distribution for a range of perturbations.
The range of response graph perturbations is defined in epsilon_list.
Note that min_iters and max_iters is necessary as it may sometimes appear the
stationary distribution has converged for a game in the first few iterations,
where in reality a sufficiently smaller epsilon is needed for the distribution
to first diverge, then reconverge. This behavior is dependent on both the
payoff structure and bounds, so the parameters min_iters and max_iters can be
used to fine-tune this.
Args:
payoff_tables: List of game payoff tables, one for each agent identity.
Each payoff_table may be either a numpy array, or a
_PayoffTableInterface object.
strat_labels: Human-readable strategy labels. See get_strat_profile_labels()
in utils.py for formatting details.
warm_start_epsilon: Initial value of epsilon to use.
visualize: Plot the sweep results.
return_epsilon: Whether to return the final epsilon used.
min_iters: the minimum number of sweep iterations.
max_iters: the maximum number of sweep iterations.
min_epsilon: the minimum value of epsilon to be tested, at which point the
sweep terminates (if not converged already).
num_strats_to_label: Number of strats to label in legend
legend_sort_clusters: If true, strategies in the same cluster are sorted in
the legend according to orderings for earlier alpha values. Primarily for
visualization purposes! Rankings for lower alpha values should be
interpreted carefully.
Returns:
pi: AlphaRank stationary distribution.
epsilon: The AlphaRank transition matrix noise level resulting from sweep.
"""
payoffs_are_hpt_format = utils.check_payoffs_are_hpt(payoff_tables)
num_populations = len(payoff_tables)
num_strats_per_population = utils.get_num_strats_per_population(
payoff_tables, payoffs_are_hpt_format)
if num_populations == 1:
num_profiles = num_strats_per_population[0]
else:
num_profiles = utils.get_num_profiles(num_strats_per_population)
assert (strat_labels is None or isinstance(strat_labels, dict)
or (len(strat_labels) == num_profiles))
pi_list = np.empty((num_profiles, 0))
pi, alpha, m = None, None, None # Unused in infinite-alpha regime
epsilon_list = []
epsilon_pi_hist = {}
num_iters = 0
epsilon_mult_factor = 0.5
alpharank_succeeded_once = False
if warm_start_epsilon is not None:
epsilon = warm_start_epsilon
else:
epsilon = 0.5
while True:
try:
pi_prev = pi
_, _, pi, _, _ = compute(payoff_tables, m=m, alpha=alpha,
use_inf_alpha=True, inf_alpha_eps=epsilon)
epsilon_pi_hist[epsilon] = pi
# Stop when pi converges
if num_iters > min_iters and np.allclose(pi, pi_prev):
break
epsilon *= epsilon_mult_factor
num_iters += 1
alpharank_succeeded_once = True
assert num_iters < max_iters, ('Alpharank stationary distr. not found'
'after {} iterations of pi_vs_epsilon'
'sweep'.format(num_iters))
except ValueError as _:
print('Error: ', _, epsilon, min_epsilon)
# Case where epsilon has been decreased beyond desirable limits but no
# distribution found.
assert epsilon >= min_epsilon, ('AlphaRank stationary distr. not found &'
'epsilon < min_epsilon.')
# Case where epsilon >= min_epsilon, but still small enough that it causes
# causes exceptions due to precision issues. So increase it.
epsilon /= epsilon_mult_factor
# Case where alpharank_succeeded_once (i.e., epsilon_list and pi_list have
# at least one entry), and a) has not converged yet and b) failed on this
# instance due to epsilon being too small. I.e., the rate of decreasing
# of epsilon is too high.
if alpharank_succeeded_once:
epsilon_mult_factor = (epsilon_mult_factor+1.)/2.
epsilon *= epsilon_mult_factor
epsilon_list, pi_list = zip(*[(epsilon, epsilon_pi_hist[epsilon])
for epsilon in sorted(epsilon_pi_hist.keys(),
reverse=True)])
pi_list = np.asarray(pi_list)
if visualize:
if strat_labels is None:
strat_labels = utils.get_strat_profile_labels(payoff_tables,
payoffs_are_hpt_format)
alpharank_visualizer.plot_pi_vs_alpha(
pi_list.T,
epsilon_list,
num_populations,
num_strats_per_population,
strat_labels,
num_strats_to_label=num_strats_to_label,
legend_sort_clusters=legend_sort_clusters,
xlabel=r'Infinite-AlphaRank Noise $\epsilon$')
if return_epsilon:
return pi_list[-1], epsilon_list[-1]
else:
return pi_list[-1]
def sweep_pi_vs_alpha(payoff_tables,
strat_labels=None,
warm_start_alpha=None,
visualize=False,
return_alpha=False,
m=50,
rtol=1e-5,
atol=1e-8,
num_strats_to_label=10,
legend_sort_clusters=False):
"""Computes stationary distribution, pi, for range of selection intensities.
The range of selection intensities is defined in alpha_list and corresponds
to the temperature of the Fermi selection function.
Args:
payoff_tables: List of game payoff tables, one for each agent identity. Each
payoff_table may be either a numpy array, or a _PayoffTableInterface
object.
strat_labels: Human-readable strategy labels. See get_strat_profile_labels()
in utils.py for formatting details.
warm_start_alpha: Initial value of alpha to use.
visualize: Plot the sweep results.
return_alpha: Whether to return the final alpha used.
m: AlphaRank population size.
rtol: The relative tolerance parameter for np.allclose calls.
atol: The absolute tolerance parameter for np.allclose calls.
num_strats_to_label: Number of strats to label in legend
legend_sort_clusters: If true, strategies in the same cluster are sorted in
the legend according to orderings for earlier alpha values. Primarily for
visualization purposes! Rankings for lower alpha values should be
interpreted carefully.
Returns:
pi: AlphaRank stationary distribution.
alpha: The AlphaRank selection-intensity level resulting from sweep.
"""
payoffs_are_hpt_format = utils.check_payoffs_are_hpt(payoff_tables)
num_populations = len(payoff_tables)
num_strats_per_population = utils.get_num_strats_per_population(
payoff_tables, payoffs_are_hpt_format)
if num_populations == 1:
num_profiles = num_strats_per_population[0]
else:
num_profiles = utils.get_num_profiles(num_strats_per_population)
assert (strat_labels is None or isinstance(strat_labels, dict)
or (len(strat_labels) == num_profiles))
pi_list = np.empty((num_profiles, 0))
alpha_list = []
num_iters = 0
alpha_mult_factor = 2.
if warm_start_alpha is not None:
alpha = warm_start_alpha
alpharank_succeeded_once = False
else:
alpha = 1e-4 # Reasonable default for most games, can be user-overridden
while 1:
try:
_, _, pi, _, _ = compute(payoff_tables, alpha=alpha, m=m)
pi_list = np.append(pi_list, np.reshape(pi, (-1, 1)), axis=1)
alpha_list.append(alpha)
# Stop when pi converges
if num_iters > 0 and np.allclose(pi, pi_list[:, num_iters - 1], rtol,
atol):
break
alpha *= alpha_mult_factor
num_iters += 1
alpharank_succeeded_once = True
except ValueError as _:
if warm_start_alpha is not None and not alpharank_succeeded_once:
# When warm_start_alpha is used, there's a chance that
# the initial warm_start_alpha is too large and causes exceptions due to
# the Markov transition matrix being reducible. So keep decreasing until
# a single success occurs.
alpha /= 2
elif not np.allclose(pi_list[:, -1], pi_list[:, -2], rtol, atol):
# Sweep stopped due to multiple stationary distributions, but pi had
# not converged due to the alpha scaling being too large.
alpha /= alpha_mult_factor
alpha_mult_factor = (alpha_mult_factor + 1.) / 2.
alpha *= alpha_mult_factor
else:
break
if visualize:
if strat_labels is None:
strat_labels = utils.get_strat_profile_labels(payoff_tables,
payoffs_are_hpt_format)
alpharank_visualizer.plot_pi_vs_alpha(
pi_list.T,
alpha_list,
num_populations,
num_strats_per_population,
strat_labels,
num_strats_to_label=num_strats_to_label,
legend_sort_clusters=legend_sort_clusters)
if return_alpha:
return pi, alpha
else:
return pi
def compute_and_report_alpharank(payoff_tables,
m=50,
alpha=100,
verbose=False,
num_top_strats_to_print=8):
"""Computes and visualizes Alpha-Rank outputs.
Args:
payoff_tables: List of game payoff tables, one for each agent identity. Each
payoff_table may be either a numpy array, or a _PayoffTableInterface
object.
m: Finite population size.
alpha: Fermi distribution temperature parameter.
verbose: Set to True to print intermediate results.
num_top_strats_to_print: Number of top strategies to print.
Returns:
pi: AlphaRank stationary distribution/rankings.
"""
payoffs_are_hpt_format = utils.check_payoffs_are_hpt(payoff_tables)
rhos, rho_m, pi, _, _ = compute(payoff_tables, m=m, alpha=alpha)
strat_labels = utils.get_strat_profile_labels(payoff_tables,
payoffs_are_hpt_format)
if verbose:
print_results(payoff_tables, payoffs_are_hpt_format, pi=pi)
utils.print_rankings_table(
payoff_tables,
pi,
strat_labels,
num_top_strats_to_print=num_top_strats_to_print)
m_network_plotter = alpharank_visualizer.NetworkPlot(
payoff_tables, rhos, rho_m, pi, strat_labels, num_top_profiles=8)
m_network_plotter.compute_and_draw_network()
return pi
def compute(payoff_tables,
m=50,
alpha=100,
use_local_selection_model=True,
verbose=False,
use_inf_alpha=False,
inf_alpha_eps=0.01):
"""Computes the finite population stationary statistics.
Args:
payoff_tables: List of game payoff tables, one for each agent identity. Each
payoff_table may be either a numpy array, or a _PayoffTableInterface
object.
m: Finite population size.
alpha: Fermi distribution temperature parameter.
use_local_selection_model: Enable local evolutionary selection model, which
considers fitness against the current opponent only, rather than the
global population state.
verbose: Set to True to print intermediate results.
use_inf_alpha: Use infinite-alpha alpharank model.
inf_alpha_eps: Noise term to use in infinite-alpha alpharank model.
Returns:
rhos: Matrix of strategy-to-strategy fixation probabilities.
rho_m: Neutral fixation probability.
pi: Finite population stationary distribution.
num_strats: Number of available strategies.
"""
payoffs_are_hpt_format = utils.check_payoffs_are_hpt(payoff_tables)
num_populations = len(payoff_tables)
num_strats_per_population = utils.get_num_strats_per_population(
payoff_tables, payoffs_are_hpt_format)
# Handles the trivial case of Markov chain with one state
if np.array_equal(num_strats_per_population,
np.ones(len(num_strats_per_population))):
rhos = np.asarray([[1]])
rho_m = 1. / m if not use_inf_alpha else 1
num_profiles = 1
pi = np.asarray([1.])
return rhos, rho_m, pi, num_profiles, num_strats_per_population
if verbose:
print('Constructing c matrix')
print('num_strats_per_population:', num_strats_per_population)
if num_populations == 1:
# User fast closed-form analysis for constant-sum single-population games
game_is_constant_sum, payoff_sum = utils.check_is_constant_sum(
payoff_tables[0], payoffs_are_hpt_format)
if verbose:
print('game_is_constant_sum:', game_is_constant_sum, 'payoff sum: ',
payoff_sum)
# Single-population/symmetric game just uses the first player's payoffs
c, rhos = _get_singlepop_transition_matrix(
payoff_tables[0],
payoffs_are_hpt_format,
m,
alpha,
game_is_constant_sum,
use_local_selection_model,
payoff_sum,
use_inf_alpha=use_inf_alpha,
inf_alpha_eps=inf_alpha_eps)
num_profiles = num_strats_per_population[0]
else:
c, rhos = _get_multipop_transition_matrix(
payoff_tables,
payoffs_are_hpt_format,
m,
alpha,
use_inf_alpha=use_inf_alpha,
inf_alpha_eps=inf_alpha_eps)
num_profiles = utils.get_num_profiles(num_strats_per_population)
pi = _get_stationary_distr(c)
rho_m = 1. / m if not use_inf_alpha else 1 # Neutral fixation probability
if verbose:
print_results(payoff_tables, payoffs_are_hpt_format, rhos, rho_m, c, pi)
return rhos, rho_m, pi, num_profiles, num_strats_per_population
def suggest_alpha(payoff_tables, tol=.1):
"""Suggests an alpha for use in alpha-rank.
The suggested alpha is approximately the smallest possible alpha such that
the ranking has 'settled out'. It is calculated as
-ln(tol)/min_gap_between_payoffs.
The logic behind this settling out is that the fixation probabilities can be
expanded as a series, and the relative size of each term in this series
changes with alpha. As alpha gets larger and larger, one of the terms in
this series comes to dominate, and this causes the ranking to settle
down. Just how fast this domination happens is easy to calculate, and this
function uses it to estimate the alpha by which the ranking has settled.
You can find further discussion at the PR:
https://github.com/deepmind/open_spiel/pull/403
Args:
payoff_tables: List of game payoff tables, one for each agent identity. Each
payoff_table may be either a numpy array, or a _PayoffTableInterface
object.
tol: the desired gap between the first and second terms in the fixation
probability expansion. A smaller tolerance leads to a larger alpha, and
a 'more settled out' ranking.
Returns:
A suggested alpha.
"""
payoffs_are_hpt_format = utils.check_payoffs_are_hpt(payoff_tables)
num_strats_per_population = utils.get_num_strats_per_population(
payoff_tables, payoffs_are_hpt_format)
num_profiles = utils.get_num_profiles(num_strats_per_population)
gap = np.inf
for id_row_profile in range(num_profiles):
row_profile = utils.get_strat_profile_from_id(num_strats_per_population,
id_row_profile)
next_profile_gen = utils.get_valid_next_profiles(num_strats_per_population,
row_profile)
for index_population_that_changed, col_profile in next_profile_gen:
payoff_table_k = payoff_tables[index_population_that_changed]
f_r = _get_payoff(payoff_table_k, payoffs_are_hpt_format, col_profile,
index_population_that_changed)
f_s = _get_payoff(payoff_table_k, payoffs_are_hpt_format, row_profile,
index_population_that_changed)
if f_r > f_s:
gap = min(gap, f_r - f_s)
return -np.log(tol)/gap
| open_spiel-master | open_spiel/python/egt/alpharank.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various visualization tools for Alpha-Rank.
All equations and variable names correspond to the following paper:
https://arxiv.org/abs/1903.01373
"""
from absl import logging
try:
from matplotlib import patches # pylint: disable=g-import-not-at-top
import matplotlib.patheffects as PathEffects # pylint: disable=g-import-not-at-top
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
except ImportError as e:
logging.info("If your tests failed with the error 'ImportError: No module "
"named functools_lru_cache', this is a known bug in matplotlib "
"and there is a workaround (run sudo apt install "
"python-backports.functools-lru-cache. See: "
"https://github.com/matplotlib/matplotlib/issues/9344.")
raise e
import networkx as nx # pylint: disable=g-import-not-at-top
import numpy as np
from open_spiel.python.egt import utils
class NetworkPlot(object):
"""A class for visualizing the Alpha-Rank interaction network."""
def __init__(self,
payoff_tables,
rhos,
rho_m,
pi,
state_labels,
num_top_profiles=None):
"""Initializes a network plotting object.
Args:
payoff_tables: List of game payoff tables, one for each agent identity.
Each payoff_table may be either a 2D numpy array, or a
_PayoffTableInterface object.
rhos: Fixation probabilities.
rho_m: Neutral fixation probability.
pi: Stationary distribution of fixation Markov chain defined by rhos.
state_labels: Labels corresponding to Markov states. For the
single-population case, state_labels should be a list of pure strategy
names. For the multi-population case, it
should be a dict with (key,value) pairs: (population
index,list of strategy names)
num_top_profiles: Set to (int) to show only the graph nodes corresponding
to the top k elements of stationary distribution, or None to show all.
"""
self.fig = plt.figure(figsize=(10, 10))
self.num_populations = len(payoff_tables)
payoffs_are_hpt_format = utils.check_payoffs_are_hpt(payoff_tables)
self.num_strats_per_population = (
utils.get_num_strats_per_population(payoff_tables,
payoffs_are_hpt_format))
self.rhos = rhos
self.rho_m = rho_m
self.pi = pi
self.num_profiles = len(pi)
self.state_labels = state_labels
self.first_run = True
self.num_top_profiles = num_top_profiles
if self.num_top_profiles:
# More than total number of strats requested for plotting
if self.num_top_profiles > self.num_profiles:
self.num_top_profiles = self.num_profiles
# Skip the bottom num_profiles-k stationary strategies.
self.nodes_to_skip = list(self.pi.argsort()[:self.num_profiles -
self.num_top_profiles])
else:
self.nodes_to_skip = []
self._reset_cycle_counter()
def _reset_cycle_counter(self):
self.i_cycle_to_show = -1
def _draw_network(self):
"""Draws the NetworkX object representing the underlying graph."""
plt.clf()
if self.num_populations == 1:
node_sizes = 5000
node_border_width = 1.
else:
node_sizes = 15000
node_border_width = 3.
vmin, vmax = 0, np.max(self.pi) + 0.1
nx.draw_networkx_nodes(
self.g,
self.pos,
node_size=node_sizes,
node_color=self.node_colors,
edgecolors="k",
cmap=plt.cm.Blues,
vmin=vmin,
vmax=vmax,
linewidths=node_border_width)
nx.draw_networkx_edges(
self.g,
self.pos,
node_size=node_sizes,
arrowstyle="->",
arrowsize=10,
edge_color=self.edge_colors,
edge_cmap=plt.cm.Blues,
width=5)
nx.draw_networkx_edge_labels(self.g, self.pos, edge_labels=self.edge_labels)
if self.num_populations > 1:
subnode_separation = 0.1
subgraph = nx.Graph()
for i_population in range(self.num_populations):
subgraph.add_node(i_population)
for i_strat_profile in self.g:
x, y = self.pos[i_strat_profile]
if self.num_populations == 1:
node_text = "$\\pi_{" + self.state_labels[i_strat_profile] + "}=$"
node_text += str(np.round(self.pi[i_strat_profile], decimals=2))
else:
node_text = "" # No text for multi-population case as plot gets messy
txt = plt.text(
x,
y,
node_text,
horizontalalignment="center",
verticalalignment="center",
fontsize=12)
txt.set_path_effects(
[PathEffects.withStroke(linewidth=3, foreground="w")])
if self.num_populations > 1:
sub_pos = nx.circular_layout(subgraph)
subnode_labels = dict()
strat_profile = utils.get_strat_profile_from_id(
self.num_strats_per_population, i_strat_profile)
for i_population in subgraph.nodes():
i_strat = strat_profile[i_population]
subnode_labels[i_population] = "$s^{" + str(i_population + 1) + "}="
subnode_labels[i_population] += (
self.state_labels[i_population][i_strat] + "$")
# Adjust the node positions generated by NetworkX's circular_layout(),
# such that the node for the 1st strategy starts on the left.
sub_pos[i_population] = (-sub_pos[i_population] * subnode_separation +
self.pos[i_strat_profile])
nx.draw(
subgraph,
pos=sub_pos,
with_labels=True,
width=0.,
node_color="w",
labels=subnode_labels,
node_size=2500)
def compute_and_draw_network(self):
"""Computes the various node/edge connections of the graph and draws it."""
if np.max(self.rhos) < self.rho_m:
print("All node-to-node fixation probabilities (not including self-cycles"
" are lower than neutral. Thus, no graph will be drawn.")
return
self.g = nx.MultiDiGraph()
self.edge_labels = {}
self.edge_alphas = []
rho_max = np.max(self.rhos / self.rho_m)
rho_m_alpha = 0.1 # Transparency of neutral selection edges
for i in range(self.num_profiles):
for j in range(self.num_profiles):
# Do not draw edge if any node involved is skipped
if j not in self.nodes_to_skip and i not in self.nodes_to_skip:
rate = self.rhos[i][j] / self.rho_m
# Draws edges when fixation from one strategy to another occurs (i.e.,
# rate > 1), or with fixation equal to neutral selection probability
# (i.e., rate == 1). This is consistent with visualizations used in
# finite-population literature.
if rate > 1:
# Compute alphas. Clip needed due to numerical precision.
alpha = np.clip(rho_m_alpha + (1 - rho_m_alpha) * rate / rho_max,
None, 1.)
self.g.add_edge(i, j, weight=alpha, label="{:.01f}".format(rate))
self.edge_alphas.append(alpha)
elif np.isclose(rate, 1):
alpha = rho_m_alpha
self.g.add_edge(i, j, weight=alpha, label="{:.01f}".format(rate))
self.edge_alphas.append(alpha)
# Label edges for non-self-loops with sufficient flowrate
if i != j and rate > 1:
edge_string = "$" + str(np.round(rate, decimals=2)) + "\\rho_m$"
else:
edge_string = ""
self.edge_labels[(i, j)] = edge_string
# MultiDiGraph nodes are not ordered, so order the node colors accordingly
self.node_colors = [self.pi[node] for node in self.g.nodes()]
self.cycles = list(nx.simple_cycles(self.g))
self.num_cycles = len(self.cycles)
# Color the edges of cycles if user requested it
if self.i_cycle_to_show >= 0:
all_cycle_edges = [
zip(nodes, (nodes[1:] + nodes[:1])) for nodes in self.cycles
]
cur_cycle_edges = all_cycle_edges[self.i_cycle_to_show]
self.edge_colors = []
for u, v in self.g.edges():
if (u, v) in cur_cycle_edges:
self.edge_colors.append([1., 0., 0.])
else:
self.edge_colors.append([1. - self.g[u][v][0]["weight"]] * 3)
else:
self.edge_colors = [
[1. - self.g[u][v][0]["weight"]] * 3 for u, v in self.g.edges()
]
self.edge_alphas = [self.g[u][v][0]["weight"] for u, v in self.g.edges()]
ax = plt.gca()
# Centered circular pose
self.pos = nx.layout.circular_layout(self.g)
all_x = [node_pos[0] for node, node_pos in self.pos.items()]
all_y = [node_pos[1] for node, node_pos in self.pos.items()]
min_x = np.min(all_x)
max_x = np.max(all_x)
min_y = np.min(all_y)
max_y = np.max(all_y)
for _, node_pos in self.pos.items():
node_pos[0] -= (max_x + min_x) / 2
node_pos[1] -= (max_y + min_y) / 2
# Rendering
self._draw_network()
if self.first_run:
ax.autoscale_view()
ax.set_axis_off()
ax.set_aspect("equal")
plt.ylim(-1.3, 1.3)
plt.xlim(-1.3, 1.3)
if self.first_run:
self.first_run = False
plt.axis("off")
plt.show()
def _draw_pie(ax,
ratios,
colors,
x_center=0,
y_center=0,
size=100,
clip_on=True,
zorder=0):
"""Plots a pie chart.
Args:
ax: plot axis.
ratios: list indicating size of each pie slice, with elements summing to 1.
colors: list indicating color of each pie slice.
x_center: x coordinate of pie center.
y_center: y coordinate of pie center.
size: pie size.
clip_on: control clipping of pie (e.g., to show it when it's out of axis).
zorder: plot z order (e.g., to show pie on top of other plot elements).
"""
xy = []
start = 0.
for ratio in ratios:
x = [0] + np.cos(
np.linspace(2 * np.pi * start, 2 * np.pi *
(start + ratio), 30)).tolist()
y = [0] + np.sin(
np.linspace(2 * np.pi * start, 2 * np.pi *
(start + ratio), 30)).tolist()
xy.append(list(zip(x, y)))
start += ratio
for i, xyi in enumerate(xy):
ax.scatter([x_center], [y_center],
marker=xyi,
s=size,
facecolor=colors[i],
edgecolors="none",
clip_on=clip_on,
zorder=zorder)
def generate_sorted_masses_strats(pi_list, curr_alpha_idx, strats_to_go):
"""Generates a sorted list of (mass, strats) tuples.
Args:
pi_list: List of stationary distributions, pi
curr_alpha_idx: Index in alpha_list for which to start clustering
strats_to_go: List of strategies that still need to be ordered
Returns:
Sorted list of (mass, strats) tuples.
"""
if curr_alpha_idx > 0:
sorted_masses_strats = list()
masses_to_strats = utils.cluster_strats(pi_list[curr_alpha_idx,
strats_to_go])
for mass, strats in sorted(masses_to_strats.items(), reverse=True):
if len(strats) > 1:
to_append = generate_sorted_masses_strats(pi_list, curr_alpha_idx - 1,
strats)
to_append = [(mass, [strats_to_go[s]
for s in strats_list])
for (mass, strats_list) in to_append]
sorted_masses_strats.extend(to_append)
else:
sorted_masses_strats.append((mass, [
strats_to_go[strats[0]],
]))
return sorted_masses_strats
else:
to_return = sorted(
utils.cluster_strats(pi_list[curr_alpha_idx, strats_to_go]).items(),
reverse=True)
to_return = [(mass, [strats_to_go[s]
for s in strats_list])
for (mass, strats_list) in to_return]
return to_return
def plot_pi_vs_alpha(pi_list,
alpha_list,
num_populations,
num_strats_per_population,
strat_labels,
num_strats_to_label,
plot_semilogx=True,
xlabel=r"Ranking-intensity $\alpha$",
ylabel=r"Strategy mass in stationary distribution $\pi$",
legend_sort_clusters=False):
"""Plots stationary distributions, pi, against selection intensities, alpha.
Args:
pi_list: List of stationary distributions, pi.
alpha_list: List of selection intensities, alpha.
num_populations: The number of populations.
num_strats_per_population: List of the number of strategies per population.
strat_labels: Human-readable strategy labels.
num_strats_to_label: The number of top strategies to label in the legend.
plot_semilogx: Boolean set to enable/disable semilogx plot.
xlabel: Plot xlabel.
ylabel: Plot ylabel.
legend_sort_clusters: If true, strategies in the same cluster are sorted in
the legend according to orderings for earlier alpha values. Primarily for
visualization purposes! Rankings for lower alpha values should be
interpreted carefully.
"""
# Cluster strategies for which the stationary distribution has similar masses
masses_to_strats = utils.cluster_strats(pi_list[-1, :])
# Set colors
num_strat_profiles = np.shape(pi_list)[1]
num_strats_to_label = min(num_strats_to_label, num_strat_profiles)
cmap = plt.get_cmap("Paired")
colors = [cmap(i) for i in np.linspace(0, 1, num_strat_profiles)]
# Plots stationary distribution vs. alpha series
plt.figure(facecolor="w")
axes = plt.gca()
legend_line_objects = []
legend_labels = []
rank = 1
num_strats_printed = 0
add_legend_entries = True
if legend_sort_clusters:
sorted_masses_strats = generate_sorted_masses_strats(
pi_list, pi_list.shape[0] - 1, range(pi_list.shape[1]))
else:
sorted_masses_strats = sorted(masses_to_strats.items(), reverse=True)
for mass, strats in sorted_masses_strats:
for profile_id in strats:
if num_populations == 1:
strat_profile = profile_id
else:
strat_profile = utils.get_strat_profile_from_id(
num_strats_per_population, profile_id)
if plot_semilogx:
series = plt.semilogx(
alpha_list,
pi_list[:, profile_id],
color=colors[profile_id],
linewidth=2)
else:
series = plt.plot(
alpha_list,
pi_list[:, profile_id],
color=colors[profile_id],
linewidth=2)
if add_legend_entries:
if num_strats_printed >= num_strats_to_label:
# Placeholder blank series for remaining entries
series = plt.semilogx(np.NaN, np.NaN, "-", color="none")
label = "..."
add_legend_entries = False
else:
label = utils.get_label_from_strat_profile(num_populations,
strat_profile,
strat_labels)
legend_labels.append(label)
legend_line_objects.append(series[0])
num_strats_printed += 1
rank += 1
# Plots pie charts on far right of figure to indicate clusters of strategies
# with identical rank
for mass, strats in iter(masses_to_strats.items()):
_draw_pie(
axes,
ratios=[1 / len(strats)] * len(strats),
colors=[colors[i] for i in strats],
x_center=alpha_list[-1],
y_center=mass,
size=200,
clip_on=False,
zorder=10)
# Axes ymax set slightly above highest stationary distribution mass
max_mass = np.amax(pi_list)
axes_y_max = np.ceil(
10. * max_mass) / 10 # Round upward to nearest first decimal
axes_y_max = np.clip(axes_y_max, 0., 1.)
# Plots a rectangle highlighting the rankings on the far right of the figure
box_x_min = alpha_list[-1] * 0.7
box_y_min = np.min(pi_list[-1, :]) - 0.05 * axes_y_max
width = 0.7 * alpha_list[-1]
height = np.max(pi_list[-1, :]) - np.min(
pi_list[-1, :]) + 0.05 * axes_y_max * 2
axes.add_patch(
patches.Rectangle((box_x_min, box_y_min),
width,
height,
edgecolor="b",
facecolor=(1, 0, 0, 0),
clip_on=False,
linewidth=5,
zorder=20))
# Plot formatting
axes.set_xlim(np.min(alpha_list), np.max(alpha_list))
axes.set_ylim([0.0, axes_y_max])
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_axisbelow(True) # Axes appear below data series in terms of zorder
# Legend on the right side of the current axis
box = axes.get_position()
axes.set_position([box.x0, box.y0, box.width * 0.8, box.height])
axes.legend(
legend_line_objects,
legend_labels,
loc="center left",
bbox_to_anchor=(1.05, 0.5))
plt.grid()
plt.show()
| open_spiel-master | open_spiel/python/egt/alpharank_visualizer.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An object to store the heuristic payoff table for a game."""
import abc
import collections
import numpy as np
from open_spiel.python.egt import utils
def _inc_average(count, average, value):
"""Computes the incremental average, `a_n = ((n - 1)a_{n-1} + v_n) / n`."""
count += 1
average = ((count - 1) * average + value) / count
return (count, average)
def from_match_results(df, consider_agents):
"""Builds a heuristic payoff table from average win probabilities.
Args:
df: a Pandas dataframe of match results. Must contain a column "agents"
consisting of tuples of agent names, and a column "scores" consisting of
the score for each agent in the match.
consider_agents: a list of agent names. Will only consider matches in which
exclusively these agents appeared.
Returns:
A PayoffTable object.
Raises:
ValueError: if dataframe is empty, or columns 'agents' and 'scores' not
specified, or games have zero players.
"""
if df.empty:
raise ValueError("Please provide a non-empty dataframe.")
if "agents" not in df.columns:
raise ValueError("Dataframe must contain a column 'agents'.")
if "scores" not in df.columns:
raise ValueError("Dataframe must contain a column 'scores'.")
num_strategies = len(consider_agents)
num_players = len(df["agents"][0])
if num_players == 0:
raise ValueError("Games must have > 0 players.")
count_per_distribution = {}
win_prob_per_distribution = {}
for i, row in df.iterrows():
print("Parsing row {} / {} ...".format(i, len(df)), end="\r")
agents = row["agents"]
scores = row["scores"]
assert len(agents) == len(scores) == num_players
if not set(agents).issubset(set(consider_agents)):
# Ignore agents outside those we are supposed to consider.
continue
elif len(set(agents)) == 1:
# Special case of self-play: deal with separately.
continue
# Find winner(s): In each match one must determine a winning strategy. One
# way of doing this is to average over the returns for each strategy and
# then say that the one with the greatest returns is the winner.
# Get unique score per agent by averaging.
count_per_agent = collections.defaultdict(int)
average_score_per_agent = collections.defaultdict(int)
for agent, score in zip(agents, scores):
count_per_agent[agent], average_score_per_agent[agent] = _inc_average(
count_per_agent[agent], average_score_per_agent[agent], score)
winner_score = max(average_score_per_agent.values())
winner_agents = [
k for k, v in average_score_per_agent.items() if v == winner_score
]
winner_strategy_idxs = [
consider_agents.index(winner) for winner in winner_agents
]
# Select the winner as the one maximizing the selected statistics.
win_probabilities = np.zeros(num_strategies)
for winner_strategy_idx in winner_strategy_idxs:
win_probabilities[winner_strategy_idx] = 1 / len(winner_strategy_idxs)
distribution = np.zeros(num_strategies)
for agent, count in count_per_agent.items():
strategy_idx = consider_agents.index(agent)
distribution[strategy_idx] = count
distribution = tuple(distribution)
if distribution not in count_per_distribution:
count_per_distribution[distribution] = 1
win_prob_per_distribution[distribution] = win_probabilities
continue
(count_per_distribution[distribution],
win_prob_per_distribution[distribution]) = _inc_average(
count_per_distribution[distribution],
win_prob_per_distribution[distribution], win_probabilities)
# Populate self-play case (strategy both wins and loses).
for idx, agent in enumerate(consider_agents):
distribution = np.zeros(num_strategies)
distribution[idx] = num_players
distribution = tuple(distribution)
win_prob = np.zeros(num_strategies)
win_prob[idx] = 0.5
win_prob_per_distribution[distribution] = win_prob
# Create empty (nan) payoff table.
table = PayoffTable(num_players, num_strategies)
# Populate with win probabilities.
for distribution, payoff in win_prob_per_distribution.items():
table[distribution] = payoff
return table
def from_matrix_game(matrix_game):
"""Returns a PayOffTable given a symmetric 2-player matrix game.
Args:
matrix_game: The payoff matrix corresponding to a 2-player symmetric game.
"""
if not isinstance(matrix_game, np.ndarray):
raise ValueError("The matrix game should be a numpy array, not a {}".format(
type(matrix_game)))
num_strats_per_population = (
utils.get_num_strats_per_population(
payoff_tables=[matrix_game], payoffs_are_hpt_format=False))
assert len(num_strats_per_population) == 2
assert num_strats_per_population[0] == num_strats_per_population[1]
num_strategies = num_strats_per_population[0]
num_profiles = utils.get_num_profiles(num_strats_per_population)
table = PayoffTable(num_players=2, num_strategies=num_strategies)
# Construct the HPT by filling in the corresponding payoffs for each profile
for id_profile in range(num_profiles):
strat_profile = utils.get_strat_profile_from_id(num_strats_per_population,
id_profile)
distribution = table.get_distribution_from_profile(strat_profile)
# For symmetric matrix games, multiple strategy profiles correspond to the
# same distribution and payoffs. Thus, ensure the table entry has not
# already been filled by a previous strategy profile.
if table.item_is_uninitialized(tuple(distribution)):
payoffs = np.zeros(num_strategies)
payoffs[strat_profile[0]] = matrix_game[strat_profile[0],
strat_profile[1]]
payoffs[strat_profile[1]] = matrix_game[strat_profile[1],
strat_profile[0]]
table[tuple(distribution)] = payoffs
return table
def from_heuristic_payoff_table(hpt):
"""Returns a `PayoffTable` instance from a numpy 2D HPT."""
[num_rows, num_columns] = hpt.shape
assert num_columns % 2 == 0
num_strategies = int(num_columns / 2)
num_players = np.sum(hpt[0, :num_strategies])
obj = PayoffTable(num_players, num_strategies, initialize_payoff_table=False)
# pylint: disable=protected-access
for row in hpt:
payoff_row = np.array(row[num_strategies:])
obj._payoff_table[tuple(row[:num_strategies])] = payoff_row
assert len(obj._payoff_table) == num_rows
# pylint: enable=protected-access
return obj
def _compute_win_probability_from_elo(rating_1, rating_2):
"""Computes the win probability of 1 vs 2 based on the provided Elo ratings.
Args:
rating_1: The Elo rating of player 1.
rating_2: The Elo rating of player 2.
Returns:
The win probability of player 1, when playing against 2.
"""
m = max(rating_1, rating_2) # We subtract the max for numerical stability.
m1 = 10**((rating_1 - m) / 400)
m2 = 10**((rating_2 - m) / 400)
return m1 / (m1 + m2)
def from_elo_scores(elo_ratings, num_agents=2):
"""Computes the Elo win probability payoff matrix `X` from the Elo scores.
Args:
elo_ratings: The elo scores vector of length [num_strategies].
num_agents: The number of agents. Only 2 agents are supported for now.
Returns:
The HPT associated to the Elo win probability payoff matrix `X`. The score
for a given agent is given by its win probability given its Elo score.
Raises:
ValueError: If `num_agents != 2`.
"""
if num_agents != 2:
raise ValueError("Only 2 agents are supported, because we need to compute "
"the win probability and that can only be computed with "
"2 players.")
num_strategies = len(elo_ratings)
hpt_rows = []
possible_teams = utils.distribute(num_agents, num_strategies, normalize=False)
for distribution_row in possible_teams:
payoff_row = np.zeros([num_strategies])
non_zero_index = np.nonzero(distribution_row)[0] # Why [0]?
assert len(non_zero_index.shape) == 1
if len(non_zero_index) > 1:
index_first_player, index_second_player = non_zero_index
prob = _compute_win_probability_from_elo(elo_ratings[index_first_player],
elo_ratings[index_second_player])
payoff_row[index_first_player] = prob
payoff_row[index_second_player] = 1 - prob
elif len(non_zero_index) == 1:
payoff_row[non_zero_index[0]] = 0.5
else:
assert False, "Impossible case, we have at least one strategy used."
hpt_rows.append(np.hstack([distribution_row, payoff_row]))
return NumpyPayoffTable(np.vstack(hpt_rows))
class _PayoffTableInterface(metaclass=abc.ABCMeta):
"""An interface for the PayoffTable classes."""
@abc.abstractmethod
def __call__(self):
"""Returns a view of the table as a np.array."""
@abc.abstractproperty
def num_strategies(self):
pass
@abc.abstractproperty
def num_players(self):
pass
@abc.abstractproperty
def num_rows(self):
pass
def expected_payoff(self, strategy):
"""The expected payoff of each pure strategy against the mixed strategy.
We define the expected payoff of a strategy A as the expected payoff of
that strategy over the space of 2 randomly sampled
The mixed strategy is equivalently the composition of an infinitely large
population. To find the expected payoff, we:
1. Compute the probabilities of sampling each player distribution in the
heuristic payoff table from the population.
2. Compute the expected payoff of pure strategy against the mixed
strategy by averaging over the payoff rows with these probabilities.
For each pure strategy we must normalize by the probability that it appeared
in the player distribution at all; otherwise we would be undercounting.
For more details, see https://arxiv.org/pdf/1803.06376.pdf.
Args:
strategy: an `np.array(shape=self._num_strategies)` of probabilities.
Returns:
An `np.array(shape=self._num_strategies)` of payoffs for pure strategies.
Raises:
ValueError: if the provided strategy probabilities do not define a valid
distribution over `self._num_strategies` strategies.
"""
if strategy.shape != (self.num_strategies,):
raise ValueError("The strategy probabilities should be of shape "
"({},), not {}".format(self.num_strategies,
strategy.shape))
if np.around(np.sum(strategy), decimals=3) != 1.0:
raise ValueError("The strategy probabilities should sum to 1.")
if not all([p >= 0 for p in strategy]):
raise ValueError("The strategy probabilities should all be >= 0.")
distributions = self._distributions.astype(int)
if not np.all(np.isclose(self._distributions, distributions, 1e-10)):
raise ValueError("Conversion to integers for distributions failed.")
# Multinomial coefficients (one per distribution).
coefficients = _multinomial_coefficients(distributions)
# Probabilities of sampling each distribution given population composition.
probabilities = _row_probabilities(coefficients, distributions, strategy)
return _expected_payoff(probabilities, self._payoffs, strategy,
self._num_players)
@property
def _payoffs(self):
"""Returns an np.array containing the payoffs."""
return self()[:, self.num_strategies:]
@property
def _distributions(self):
"""Returns an np.array containing the distribution over pure strategies."""
return self()[:, :self.num_strategies]
class NumpyPayoffTable(object):
"""An object wrapping a Numpy array heuristic payoff table for a metagame.
NOTE: We assume the number of players to be equal to the number of
replicators.
"""
def __init__(self, payoff_table, writeable=False):
"""Initializes an immutable payoff table.
Let p be the number of players, k be the number of strategies. Then, there
are Combinations(p + k - 1, k - 1) distinct configurations for the
strategies of the p players.
The payoff table is of shape [(p + k - 1)! / (p! * (k - 1)!), 2 * k].
The first k columns encode the number of players playing each strategies.
The second k columns encode the average payoff of each strategy in that
game.
Args:
payoff_table: A numpy heuristic payoff table, which is assumed to be
correctly constructed.
writeable: Whether the numpy array payoff_table should be writeable. See
https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.ndarray.flags.html.
However, locking a base object does not lock any views that already
reference it,
"""
self._writeable = writeable
self._payoff_table = payoff_table
[self._num_rows, num_columns] = self._payoff_table.shape
assert num_columns % 2 == 0
self._num_strategies = int(num_columns / 2)
self._num_players = np.sum(self._payoff_table[0, :self._num_strategies])
def __call__(self):
"""Returns a view of the table as a np.array.
The mutability of the object is controlled by `writeable`.
"""
if self._writeable:
return self._payoff_table
else:
return np.copy(self._payoff_table)
@property
def writeable(self):
return self._writeable
@writeable.setter
def writeable(self, writeable):
self._writeable = writeable
@property
def num_strategies(self):
return self._num_strategies
@property
def num_players(self):
return self._num_players
@property
def num_rows(self):
return self._num_rows
class PayoffTable(_PayoffTableInterface):
"""A mutable object to store the heuristic payoff table for a metagame."""
def __init__(self, num_players, num_strategies, initialize_payoff_table=True):
"""A heuristic payoff table encodes payoffs from various strategy profiles.
See `NumpyPayoffTable` for the description of the heuristic payoff table.
Internally, this is represented as an OrderedDict {distribution: payoff}.
Args:
num_players: The number of players in the game.
num_strategies: The number of strategies an individual could play.
initialize_payoff_table: If `True`, nan entries will be created for all
rows. If `False`, no rows are created at all.
"""
super(PayoffTable, self).__init__()
self.is_hpt = True
self._num_players = num_players
self._num_strategies = num_strategies
self._payoff_table = collections.OrderedDict()
if initialize_payoff_table:
# Populate empty (nan) payoff table.
player_distributions = utils.distribute(self._num_players,
self._num_strategies)
for d in player_distributions:
self._payoff_table[d] = np.full(self._num_strategies, np.nan)
def __call__(self):
"""Returns a view of the table as a np.array."""
return np.concatenate((self._distributions, self._payoffs), axis=1)
@property
def _payoffs(self):
"""Returns an np.array containing the payoffs."""
return np.array(list(self._payoff_table.values()))
@property
def _distributions(self):
"""Returns an np.array containing the distribution over pure strategies."""
return np.array(list(self._payoff_table))
@property
def num_strategies(self):
return self._num_strategies
@property
def num_players(self):
return self._num_players
@property
def num_rows(self):
return len(self._payoff_table)
def __setitem__(self, distribution, payoff):
assert distribution in self._payoff_table
assert len(payoff) == self._num_strategies
self._payoff_table[distribution] = payoff
def __getitem__(self, distribution):
"""Returns the payoff profile for a given strategy distribution.
Args:
distribution: strategy profile tuple.
Returns:
Payoff profile for the corresponding strategy distribution.
"""
return self._payoff_table[distribution]
def item_is_uninitialized(self, distribution):
return np.isnan(np.sum(self._payoff_table[distribution]))
def get_distribution_from_profile(self, strat_profile):
distribution = [0] * self.num_strategies
for s in strat_profile:
distribution[s] += 1
return distribution
# The following provides utility functions to compute the expected payoff of
# a given strategy profile.
# See https://arxiv.org/pdf/1803.06376.pdf, page 3, left column.
#
# Usage:
#
# coefficients = _multinomial_coefficients(distributions, strategies):
# row_probabilities = _row_probabilities(coefficients, distributions, strategy)
# expected_payoff = _expected_payoff(row_probabilities, payoffs, composition,
# num_players)
#
#
def _multinomial_coefficients(distributions):
"""Returns the multinomial coefficients.
Args:
distributions: The distributions table [num_rows, num_strategies].
"""
v_factorial = np.vectorize(np.math.factorial)
# Multinomial coefficients (one per distribution Ni).
# ( P )
# ( Ni1, Ni1, ... Nik )
coefficients = (
v_factorial(np.sum(distributions, axis=1)) /
np.prod(v_factorial(distributions), axis=1))
return coefficients
def _row_probabilities(coefficients, distributions, strategy):
"""Returns the row probabilities [num_rows].
Args:
coefficients: The multinomial coefficients [num_rows].
distributions: The distributions table [num_rows, num_strategies].
strategy: The strategy array [num_strategies].
"""
row_probabilities = coefficients * np.prod(
np.power(strategy, distributions), axis=1)
return row_probabilities
def _expected_payoff(row_probabilities, payoffs, strategy, num_players):
# pylint: disable=g-doc-args
r"""Returns the expected payoff.
Computes (with p=num_players):
r_j = \sum_i row_probabilities[i] * payoffs[i, j] / (1 - (1-strategy[j])^p)
"""
# pylint: enable=g-doc-args
[num_rows] = row_probabilities.shape
[num_rows_2, num_strategies] = payoffs.shape
[num_strategies_2] = strategy.shape
assert num_rows == num_rows_2
assert num_strategies == num_strategies_2
# One per pure strategy.
numerators = np.dot(np.transpose(payoffs), row_probabilities)
# One per pure strategy.
denominators = 1 - np.power(1 - strategy, num_players)
return numerators / denominators
| open_spiel-master | open_spiel/python/egt/heuristic_payoff_table.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.egt.alpharank_visualizer."""
from absl.testing import absltest
# pylint: disable=g-import-not-at-top
import matplotlib
matplotlib.use("agg") # switch backend for testing
import mock
import numpy as np
from open_spiel.python.egt import alpharank
from open_spiel.python.egt import alpharank_visualizer
from open_spiel.python.egt import utils
import pyspiel
class AlpharankVisualizerTest(absltest.TestCase):
@mock.patch("%s.alpharank_visualizer.plt" % __name__)
def test_plot_pi_vs_alpha(self, mock_plt):
# Construct game
game = pyspiel.load_matrix_game("matrix_rps")
payoff_tables = utils.game_payoffs_array(game)
_, payoff_tables = utils.is_symmetric_matrix_game(payoff_tables)
payoffs_are_hpt_format = utils.check_payoffs_are_hpt(payoff_tables)
# Compute alpharank
alpha = 1e2
_, _, pi, num_profiles, num_strats_per_population = (
alpharank.compute(payoff_tables, alpha=alpha))
strat_labels = utils.get_strat_profile_labels(payoff_tables,
payoffs_are_hpt_format)
num_populations = len(payoff_tables)
# Construct synthetic pi-vs-alpha history
pi_list = np.empty((num_profiles, 0))
alpha_list = []
for _ in range(2):
pi_list = np.append(pi_list, np.reshape(pi, (-1, 1)), axis=1)
alpha_list.append(alpha)
# Test plotting code (via pyplot mocking to prevent plot pop-up)
alpharank_visualizer.plot_pi_vs_alpha(
pi_list.T,
alpha_list,
num_populations,
num_strats_per_population,
strat_labels,
num_strats_to_label=0)
self.assertTrue(mock_plt.show.called)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/egt/alpharank_visualizer_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/egt/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the heuristic_payoff_table library."""
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python.egt import heuristic_payoff_table
from open_spiel.python.egt import utils
import pyspiel
class ModuleLevelTest(absltest.TestCase):
def test__multinomial_coefficients(self):
distributions = np.asarray([
[2, 0],
[1, 1],
[1, 0],
])
coefficients = heuristic_payoff_table._multinomial_coefficients(
distributions)
np.testing.assert_array_equal([1., 2., 1.], coefficients)
distributions = np.asarray([
[3, 0],
[2, 1],
[1, 2],
[0, 3],
])
coefficients = heuristic_payoff_table._multinomial_coefficients(
distributions)
np.testing.assert_array_equal([1., 3., 3., 1.], coefficients)
distributions = np.asarray([
[2, 0, 0],
[0, 2, 0],
[0, 0, 2],
[1, 1, 0],
[1, 0, 1],
[0, 1, 1],
])
coefficients = heuristic_payoff_table._multinomial_coefficients(
distributions)
np.testing.assert_array_equal([1., 1., 1., 2., 2., 2.], coefficients)
class PayoffTableTest(parameterized.TestCase):
@parameterized.parameters(
(5, 2),
(2, 2),
)
def test_construction(self, num_players, num_strategies):
logging.info("Testing payoff table construction.")
table = heuristic_payoff_table.PayoffTable(num_players, num_strategies)
num_rows = utils.n_choose_k(num_players + num_strategies - 1, num_players)
distributions = np.array(
list(utils.distribute(num_players, num_strategies)))
payoffs = np.full([int(num_rows), num_strategies], np.nan)
np.testing.assert_array_equal(
np.concatenate([distributions, payoffs], axis=1), table())
def test_from_heuristic_payoff_table(self):
team_compositions = np.asarray([
[2, 0],
[1, 1],
[0, 2],
])
payoffs = np.asarray([
[1, 2],
[3, 4],
[5, 6],
])
hpt = np.hstack([team_compositions, payoffs])
table = heuristic_payoff_table.from_heuristic_payoff_table(hpt)
np.testing.assert_array_equal(team_compositions, table._distributions)
np.testing.assert_array_equal(payoffs, table._payoffs)
self.assertEqual(3, table.num_rows)
distributions = np.asarray([
[2, 0, 0],
[0, 2, 0],
[0, 0, 2],
[1, 1, 0],
[1, 0, 1],
[0, 1, 1],
])
shape = distributions.shape
payoffs = np.reshape(np.arange(np.prod(shape)), shape)
hpt = np.hstack([distributions, payoffs])
table = heuristic_payoff_table.from_heuristic_payoff_table(hpt)
np.testing.assert_array_equal(distributions, table._distributions)
np.testing.assert_array_equal(payoffs, table._payoffs)
self.assertEqual(distributions.shape[0], table.num_rows)
@parameterized.parameters(("matrix_rps",))
def test_from_matrix_game(self, game):
game = pyspiel.load_matrix_game(game)
payoff_tables = utils.game_payoffs_array(game)
logging.info("Testing payoff table construction for matrix game.")
table = heuristic_payoff_table.from_matrix_game(payoff_tables[0])
print(table())
@parameterized.parameters((np.array([0.7, 0.2, 0.1]),))
def test_expected_payoff(self, strategy):
logging.info("Testing expected payoff for matrix game.")
game = pyspiel.load_matrix_game("matrix_rps")
payoff_tables = utils.game_payoffs_array(game)
table = heuristic_payoff_table.from_matrix_game(payoff_tables[0])
expected_payoff = table.expected_payoff(strategy)
print(expected_payoff)
assert len(expected_payoff) == table._num_strategies
def test_from_elo_scores(self):
elo_scores = [800, 400, 400]
elo_1 = 10**(800 / 400)
elo_2 = 10**(400 / 400) # This is also the associated value for player 3.
expected = np.asarray([
[2, 0, 0, 1 / 2, 0, 0],
[0, 2, 0, 0, 1 / 2, 0],
[0, 0, 2, 0, 0, 1 / 2],
[1, 1, 0, elo_1 / (elo_1 + elo_2), elo_2 / (elo_1 + elo_2), 0],
[1, 0, 1, elo_1 / (elo_1 + elo_2), 0, elo_2 / (elo_1 + elo_2)],
[0, 1, 1, 0, 1 / 2, 1 / 2],
])
htp = heuristic_payoff_table.from_elo_scores(elo_scores)
np.testing.assert_array_almost_equal(
utils.sort_rows_lexicographically(expected),
utils.sort_rows_lexicographically(htp()),
verbose=True)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/egt/heuristic_payoff_table_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visualization for single/multi-population dynamics in normal-form games.
Example:
game = pyspiel.load_game("matrix_pd")
payoff_tensor = utils.game_payoffs_array(game)
dyn = dynamics.MultiPopulationDynamics(payoff_tensor, dynamics.replicator)
ax = plt.subplot(projection="2x2")
ax.quiver(dyn)
"""
from absl import logging
# pylint: disable=g-import-not-at-top
try:
from matplotlib import axes
from matplotlib import projections
from matplotlib import transforms
from matplotlib import font_manager
from matplotlib import rcParams
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.patches import FancyArrowPatch
from matplotlib.collections import LineCollection
import matplotlib.cm
import matplotlib.colors
except ImportError as e:
logging.info("If your tests failed with the error 'ImportError: No module "
"named functools_lru_cache', this is a known bug in matplotlib "
"and there is a workaround (run sudo apt install "
"python-backports.functools-lru-cache. See: "
"https://github.com/matplotlib/matplotlib/issues/9344.")
raise ImportError(str(e)) from e
import numpy as np
from open_spiel.python.egt import utils
def _eval_dynamics_2x2_grid(dynamics, num_points):
"""Evaluates dynamics on a 2-D mesh-grid.
Args:
dynamics: Population dynamics of type `dynamics.MultiPopulationDynamics`.
num_points: Number of points along each dimension of the grid.
Returns:
Mesh-grid (x, y) and corresponding derivatives of the first action for
player 1 and 2 (u, v).
"""
assert dynamics.payoff_tensor.shape == (2, 2, 2)
x = np.linspace(0., 1., num_points + 2)[1:-1]
x, y = np.meshgrid(x, x)
u = np.empty(x.shape)
v = np.empty(x.shape)
for i in range(num_points):
for j in range(num_points):
row_state = np.array([x[i, j], 1. - x[i, j]])
col_state = np.array([y[i, j], 1. - y[i, j]])
state = np.concatenate((row_state, col_state))
dstate = dynamics(state)
u[i][j] = dstate[0]
v[i][j] = dstate[2]
return x, y, u, v
def _rk12_step(func, y0, dt):
"""Improved Euler-Integration step to integrate dynamics.
Args:
func: Function handle to time derivative.
y0: Current state.
dt: Integration step.
Returns:
Next state.
"""
dy = func(y0)
y_ = y0 + dt * dy
return y0 + dt / 2. * (dy + func(y_))
class Dynamics2x2Axes(axes.Axes):
"""Axes for 2x2 game dynamics.
This class provides plotting functions for dynamics in two-player 2x2 games.
Attributes:
name: Used for projection keyword when creating a new axes.
"""
name = "2x2"
def cla(self):
"""Clear the current axes."""
super(Dynamics2x2Axes, self).cla()
self.set_aspect("equal")
self.set_xlim(0, 1)
self.set_ylim(0, 1)
def quiver(self,
dynamics,
num_points=9,
normalize=False,
pivot="middle",
**kwargs):
"""Visualizes the dynamics as a directional field plot.
Args:
dynamics: Population dynamics of type `dynamics.MultiPopulationDynamics`.
num_points: Number of points along each dimension of the plot.
normalize: Normalize each arrow to unit-length.
pivot: In `{"tail", "middle", "tip"}`, optional, default: "middle". The
part of the arrow that is anchored to the X, Y grid. The arrow rotates
about this point.
**kwargs: Additional keyword arguments passed on to `Axes.quiver`.
Returns:
The `quiver.Quiver` object created by calling `Axes.quiver`.
"""
x, y, u, v = _eval_dynamics_2x2_grid(dynamics, num_points)
if normalize:
norm = np.sqrt(u**2 + v**2)
u = np.divide(u, norm, out=np.zeros_like(u), where=norm != 0)
v = np.divide(v, norm, out=np.zeros_like(v), where=norm != 0)
return super(Dynamics2x2Axes, self).quiver(
x, y, u, v, pivot=pivot, **kwargs)
def streamplot(self,
dynamics,
num_points=50,
linewidth=None,
color=None,
**kwargs):
"""Visualizes the dynamics as a streamline plot.
Args:
dynamics: Population dynamics of type `dynamics.MultiPopulationDynamics`.
num_points: Number of points along each dimension of the plot.
linewidth: In `{None, float, "velocity"}`, optional, default: None. If
`linewidth="velocity"`, line width is scaled by the velocity of the
dynamics. Defaults to `rcParams` if `linewidth=None`.
color: In `{None, string, (r,g,b), (r,g,b,a), "velocity"}`, default: None.
If `color="velocity"`, velocity of dynamics is used to color the
streamlines. Defaults to `rcParams` if `color=None`.
**kwargs: Additional keyword arguments passed on to `Axes.streamplot`.
Returns:
The `streamplot.StreamplotSet` created by calling `Axes.streamplot`.
"""
x, y, u, v = _eval_dynamics_2x2_grid(dynamics, num_points)
if linewidth == "velocity" or color == "velocity":
vel = np.sqrt(u**2 + v**2)
vel = vel - np.min(vel)
vel = vel / np.max(vel)
if linewidth == "velocity":
linewidth = 3. * vel
if color == "velocity":
color = vel
return super(Dynamics2x2Axes, self).streamplot(
x, y, u, v, minlength=0.1, linewidth=linewidth, color=color, **kwargs)
projections.register_projection(Dynamics2x2Axes)
class SimplexTransform(transforms.Transform):
"""Affine transform to project the 2-simplex to 2D Cartesian space."""
input_dims = 3
output_dims = 2
_MATRIX = np.array([[0., 0.], [1., 0.], [0.5, np.sqrt(3) / 2.]])
def transform_affine(self, values):
return np.matmul(values, SimplexTransform._MATRIX)
class SimplexStreamMask(object):
"""Mask of regular discrete cells to track trajectories/streamlines.
Also see `matplotlib.streamplot.StreamMask`.
"""
def __init__(self, density=1.):
self._n = int(30. * density)
self._mask = np.zeros([self._n + 1] * 2 + [2], dtype=bool)
self.shape = self._mask.shape
def index(self, point):
"""Computes index given a point on the simplex."""
point = np.array(point)
idx = np.floor(point[:2] * self._n).astype(int)
x, y = point[:2] * self._n - idx
z = int(x + y > 1)
return tuple(idx.tolist() + [z])
def point(self, index):
"""Computes point on the simplex given an index."""
p = np.empty((3,))
p[0] = (index[0] + (1 + index[2]) / 3.) / float(self._n)
p[1] = (index[1] + (1 + index[2]) / 3.) / float(self._n)
p[2] = 1. - p[0] - p[1]
return p if p[2] > 0. else None
def __getitem__(self, point):
return self._mask.__getitem__(self.index(point))
def __setitem__(self, point, val):
return self._mask.__setitem__(self.index(point), val)
class Dynamics3x3Axes(axes.Axes):
"""Axes for 3x3 game dynamics.
This class provides plotting functions for dynamics in symmetric 3x3 games.
Attributes:
name: Used for projection keyword when creating a new axes.
"""
name = "3x3"
_VERTICES = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
def __init__(self, fig, rect, *args, **kwargs):
self._simplex_transform = SimplexTransform()
self._labels = None
super(axes.Axes, self).__init__(fig, rect, *args, **kwargs)
def cla(self):
"""Clear the current axes."""
super(axes.Axes, self).cla()
self.set_aspect("equal")
self.get_xaxis().set_visible(False)
self.get_yaxis().set_visible(False)
self.patch.set_visible(False)
self.set_frame_on(False)
# draw invisiple vertices to set x/y limits of plot
self.scatter(Dynamics3x3Axes._VERTICES, alpha=0.)
self.margins(0.15)
self.bgpatch = self._create_bgpatch(
facecolor=rcParams["axes.facecolor"],
edgecolor=rcParams["axes.edgecolor"],
linewidth=rcParams["axes.linewidth"],
zorder=-1)
self.add_artist(self.bgpatch)
if rcParams["axes.grid"]:
self.grid = self._create_grid(
color=rcParams["grid.color"],
alpha=rcParams["grid.alpha"],
linestyle=rcParams["grid.linestyle"],
linewidth=rcParams["grid.linewidth"],
zorder=0)
self.add_collection(self.grid)
self.ticks, self.tick_labels = self._create_ticks(
color=rcParams["xtick.color"], zorder=0)
self.add_collection(self.ticks)
for label in self.tick_labels:
self.add_artist(label)
def _create_bgpatch(self, **kwargs):
codes = [Path.MOVETO] + [Path.LINETO] * 2 + [Path.CLOSEPOLY]
vertices = self._VERTICES + [self._VERTICES[0]]
vertices = self._simplex_transform.transform(np.array(vertices))
return PathPatch(Path(vertices, codes), **kwargs)
def _create_grid(self, step=0.2, **kwargs):
x = np.arange(step, 1., step)
n = x.shape[0]
line_start, line_end = np.zeros((n, 3)), np.zeros((n, 3))
line_start[:, 0] = line_end[::-1, 1] = x
line_start[:, 2] = line_end[::-1, 0] = 1. - x
segs = np.zeros((3 * n, 2, 2))
for i, perm in enumerate([(0, 2, 1), (1, 0, 2), (2, 1, 0)]):
start = self._simplex_transform.transform(line_start[:, perm])
end = self._simplex_transform.transform(line_end[:, perm])
segs[i * n:(i + 1) * n, 0, :], segs[i * n:(i + 1) * n, 1, :] = start, end
line_segments = LineCollection(segs, **kwargs)
return line_segments
def _create_ticks(self, step=0.2, tick_length=0.025, **kwargs):
x = np.arange(step, 1., step)
n = x.shape[0]
tick_start, tick_end = np.zeros((n, 3)), np.zeros((n, 3))
tick_start[:, 0] = x
tick_start[:, 2] = 1. - x
tick_end[:, 0] = x
tick_end[:, 2] = 1. - x + tick_length
tick_end[:, 1] = -tick_length
tick_labels = []
ha = ["center", "left", "right"]
va = ["top", "bottom", "center"]
rot = [-60, 60, 0]
segs = np.zeros((n * 3, 2, 2))
for i, perm in enumerate([(0, 2, 1), (1, 0, 2), (2, 1, 0)]):
start = self._simplex_transform.transform(tick_start[:, perm])
end = self._simplex_transform.transform(tick_end[:, perm])
segs[i * n:(i + 1) * n, 0, :], segs[i * n:(i + 1) * n, 1, :] = start, end
for j, x_ in enumerate(x):
tick_labels.append(
Text(
end[j, 0],
end[j, 1],
"{0:.1f}".format(x_),
horizontalalignment=ha[i],
verticalalignment=va[i],
rotation=rot[i],
color=kwargs["color"],
fontsize=rcParams["xtick.labelsize"]))
line_segments = LineCollection(segs, **kwargs)
return line_segments, tick_labels
def _create_labels(self, labels, padding):
artists = []
aligns = ["top", "top", "bottom"]
for label, pos, align in zip(labels, self._VERTICES, aligns):
x, y = self._simplex_transform.transform(pos)
labelpad = padding if align == "bottom" else -padding
label = Text(
x=x,
y=y + labelpad,
text=label,
fontproperties=font_manager.FontProperties(
size=rcParams["axes.labelsize"],
weight=rcParams["axes.labelweight"]),
color=rcParams["axes.labelcolor"],
verticalalignment=align,
horizontalalignment="center")
artists.append(label)
return artists
def get_labels(self):
return self._labels
def set_labels(self, labels, padding=0.02):
assert len(labels) == 3
if self._labels is None:
self._labels = self._create_labels(labels, padding)
for label in self._labels:
self.add_artist(label)
else:
for artist, label in zip(self._labels, labels):
artist.set_text(label)
labels = property(get_labels, set_labels)
def can_zoom(self):
return False
def can_pan(self):
return False
def plot(self, points, **kwargs):
"""Creates a line plot.
Args:
points: Points in policy space.
**kwargs: Additional keyword arguments passed on to `Axes.plot`.
Returns:
The line plot.
"""
points = np.array(points)
assert points.shape[1] == 3
points = self._simplex_transform.transform(points)
return super(Dynamics3x3Axes, self).plot(points[:, 0], points[:, 1],
**kwargs)
def scatter(self, points, **kwargs):
"""Creates a scatter plot.
Args:
points: Points in policy space.
**kwargs: Additional keyword arguments passed on to `Axes.scatter`.
Returns:
The scatter plot.
"""
points = np.array(points)
assert points.shape[1] == 3
points = self._simplex_transform.transform(points)
return super(Dynamics3x3Axes, self).scatter(points[:, 0], points[:, 1],
**kwargs)
def quiver(self,
dynamics,
step=0.05,
boundary=False,
normalize=False,
pivot="middle",
**kwargs):
"""Visualizes the dynamics as a directional field plot.
Args:
dynamics: Population dynamics of type `dynamics.SinglePopulationDynamics`.
step: Distance between arrows along one dimension.
boundary: Include arrows on the boundary/face of the simplex.
normalize: Normalize each arrow to unit-length.
pivot: In `{"tail", "middle", "tip"}`, optional, default: "middle". The
part of the arrow that is anchored to the X, Y grid. The arrow rotates
about this point.
**kwargs: Additional keyword arguments passed on to `Axes.quiver`.
Returns:
The `quiver.Quiver` object created by calling `Axes.quiver`.
"""
x = np.array([x for x in utils.grid_simplex(step=step, boundary=boundary)])
dx = np.apply_along_axis(dynamics, 1, x)
p = self._simplex_transform.transform(x)
v = self._simplex_transform.transform(dx)
x, y = p[:, 0], p[:, 1]
u, v = v[:, 0], v[:, 1]
if normalize:
norm = np.sqrt(u**2 + v**2)
u, v = u / norm, v / norm
if "pivot" not in kwargs:
kwargs["pivot"] = "middle"
return super(Dynamics3x3Axes, self).quiver(x, y, u, v, **kwargs)
def _linecollection(self, points, linewidth, color):
points = self._simplex_transform.transform(points).reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, linewidths=linewidth, color=color)
return lc
def _integrate(self, x, func, mask, dt, min_dist=0.01):
cells = []
trajectory = [x]
x_ = x
for dt in [dt, -dt]:
while not mask[x]:
cell = mask.index(x)
cells.append(cell)
while mask.index(x) == cell:
# integrate up to cell boundary
if np.sqrt(np.sum((x_ - x)**2)) > min_dist:
x_ = x
if dt > 0:
trajectory.append(x)
else:
trajectory.insert(0, x)
x = _rk12_step(func, x, dt=dt)
if dt > 0:
mask[trajectory[-1]] = True
else:
mask[trajectory[0]] = True
# restore to integrate backwards
if dt > 0. and len(cells):
trajectory.append(_rk12_step(func, x, dt=-dt))
mask[mask.point(cells[0])] = False
x = trajectory[0]
x_ = x
else:
trajectory.insert(0, _rk12_step(func, x, dt=-dt))
return (np.array(trajectory), cells) if len(trajectory) > 2 else None
def streamplot(self,
dynamics,
initial_points=None,
dt=0.01,
density=1.,
min_length=0.4,
linewidth=None,
color="k",
**kwargs):
"""Visualizes the dynamics as a streamline plot.
Mimics the visuals of `Axes.streamplot` for simplex plots.
Args:
dynamics: Population dynamics of type `dynamics.SinglePopulationDynamics`.
initial_points: Starting points for streamlines
dt: Integration step.
density: Controls the density of streamlines in the plot.
min_length: Streamlines with length < min_length will be discarded.
linewidth: In `{None, float, "velocity"}`, optional, default: None. If
`linewidth="velocity"`, line width is scaled by the velocity of the
dynamics. Defaults to `rcParams` if `linewidth=None`.
color: In `{None, string, (r,g,b), (r,g,b,a), "velocity"}`, default: None.
If `color="velocity"`, velocity of dynamics is used to color the
streamlines. Defaults to `rcParams` if `color=None`.
**kwargs: Additional keyword arguments passed on to `Axes.streamplot`.
Returns:
The `SimplexStreamMask`.
"""
mask = SimplexStreamMask(density=density)
trajectories = []
if initial_points is None:
eps = 0.1
initial_points = np.array([[1. - eps, eps / 2., eps / 2.],
[eps / 2., 1. - eps, eps / 2.],
[eps / 2., eps / 2., 1. - eps]])
initial_points = np.vstack(
(initial_points, utils.sample_from_simplex(100)))
# TODO(author10): add heuristic for initial points
else:
initial_points = np.array(initial_points)
assert initial_points.ndim == 2
assert initial_points.shape[1] == 3
# generate trajectories
for p in initial_points:
# center initial point on grid cell
p = mask.point(mask.index(p))
res = self._integrate(p, dynamics, mask, dt=dt)
if res is not None:
t, cells = res # pylint: disable=unpacking-non-sequence
cum_len = np.cumsum(
np.sqrt(
np.diff(t[:, 0])**2 + np.diff(t[:, 1])**2 +
np.diff(t[:, 2])**2))
if cum_len[-1] < min_length:
for cell in cells:
mask[mask.point(cell)] = False
continue
trajectories.append(t)
lc_color = arrow_color = color
lc_linewidth = linewidth
if linewidth == "velocity" or color == "velocity":
vel_max = 0
vel_min = float("inf")
velocities = []
for t in trajectories:
dx = np.apply_along_axis(dynamics, 1, t)
vel = np.sqrt(np.sum(dx**2, axis=1))
vel_max = max(np.max(vel), vel_max)
vel_min = min(np.min(vel), vel_min)
velocities.append(vel)
# add trajectories to plot
for i, t in enumerate(trajectories):
cum_len = np.cumsum(
np.sqrt(
np.diff(t[:, 0])**2 + np.diff(t[:, 1])**2 + np.diff(t[:, 2])**2))
mid_idx = np.searchsorted(cum_len, cum_len[-1] / 2.)
if linewidth == "velocity" or color == "velocity":
vel = (velocities[i] - vel_min) / vel_max
if linewidth == "velocity":
lc_linewidth = 3. * vel + 0.5
if color == "velocity":
cmap = matplotlib.cm.get_cmap(rcParams["image.cmap"])
lc_color = cmap(vel)
arrow_color = cmap(vel[mid_idx])
lc = self._linecollection(t, linewidth=lc_linewidth, color=lc_color)
self.add_collection(lc)
# add arrow centered on trajectory
arrow_tail = self._simplex_transform.transform(t[mid_idx - 1])
arrow_head = self._simplex_transform.transform(t[mid_idx])
arrow_kw = dict(arrowstyle="-|>", mutation_scale=10 * 1.)
arrow_patch = FancyArrowPatch(
arrow_tail,
arrow_head,
linewidth=None,
color=arrow_color,
zorder=3,
**arrow_kw)
self.add_patch(arrow_patch)
return mask
projections.register_projection(Dynamics3x3Axes)
| open_spiel-master | open_spiel/python/egt/visualization.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.egt.alpharank."""
from absl.testing import absltest
# pylint: disable=g-import-not-at-top
import matplotlib
matplotlib.use("agg") # switch backend for testing
import numpy as np
from open_spiel.python.egt import alpharank
from open_spiel.python.egt import heuristic_payoff_table
from open_spiel.python.egt import utils
import pyspiel
class AlphaRankTest(absltest.TestCase):
def test_stationary_distribution(self):
"""Tests stationary distribution using payoffs from Han et al., 2013."""
r = 1.
t = 2.
p = 0.
s = -1.
delta = 4.
eps = 0.25
payoff_tables = [
np.asarray([[r - eps / 2., r - eps, 0, s + delta - eps, r - eps],
[r, r, s, s, s], [0, t, p, p, p], [t - delta, t, p, p, p],
[r, t, p, p, p]])
]
m = 20
alpha = 0.1
expected_pi = np.asarray(
[0.40966787, 0.07959841, 0.20506998, 0.08505983, 0.2206039])
# Test payoffs in matrix format
_, _, pi_matrix, _, _ = alpharank.compute(
payoff_tables, m=m, alpha=alpha, use_local_selection_model=False)
np.testing.assert_array_almost_equal(pi_matrix, expected_pi, decimal=4)
# Test payoffs in HPT format
hpts = [heuristic_payoff_table.from_matrix_game(payoff_tables[0])]
_, _, pi_hpts, _, _ = alpharank.compute(
hpts, m=m, alpha=alpha, use_local_selection_model=False)
np.testing.assert_array_almost_equal(pi_hpts, expected_pi, decimal=4)
def test_constant_sum_transition_matrix(self):
"""Tests closed-form transition matrix computation for constant-sum case."""
game = pyspiel.load_matrix_game("matrix_rps")
payoff_tables = utils.game_payoffs_array(game)
# Checks if the game is symmetric and runs single-population analysis if so
_, payoff_tables = utils.is_symmetric_matrix_game(payoff_tables)
payoffs_are_hpt_format = utils.check_payoffs_are_hpt(payoff_tables)
m = 20
alpha = 0.1
# Case 1) General-sum game computation (slower)
game_is_constant_sum = False
use_local_selection_model = False
payoff_sum = None
c1, rhos1 = alpharank._get_singlepop_transition_matrix(
payoff_tables[0], payoffs_are_hpt_format, m, alpha,
game_is_constant_sum, use_local_selection_model, payoff_sum)
# Case 2) Constant-sum closed-form computation (faster)
game_is_constant_sum, payoff_sum = utils.check_is_constant_sum(
payoff_tables[0], payoffs_are_hpt_format)
c2, rhos2 = alpharank._get_singlepop_transition_matrix(
payoff_tables[0], payoffs_are_hpt_format, m, alpha,
game_is_constant_sum, use_local_selection_model, payoff_sum)
# Ensure both cases match
np.testing.assert_array_almost_equal(c1, c2)
np.testing.assert_array_almost_equal(rhos1, rhos2)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/egt/alpharank_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for evolutionary game theoretic analysis of games."""
import itertools
import math
import numpy as np
import pyspiel
def n_choose_k(n, k):
"""Returns the combination choose k among n items."""
f = math.factorial
return int(f(n) / f(k) / f(n - k))
def grid_simplex(step=.1, boundary=False):
"""Generator for regular 'lattice' on the 2-simplex.
Args:
step: Defines spacing along one dimension.
boundary: Include points on the boundary/face of the simplex.
Yields:
Next point on the grid.
"""
eps = 1e-8
start = 0. if boundary else step
stop = 1. + eps if boundary else 1. - step + eps
for a in np.arange(start, stop, step, dtype=np.double):
for b in np.arange(start, stop - a, step, dtype=np.double):
yield [a, b, 1. - a - b]
def sample_from_simplex(n, dim=3, vmin=0.):
"""Samples random points from a k-simplex.
See Donald B. Rubin (1981) "The Bayesian Bootstrap", page 131.
Args:
n: Number of points that are sampled.
dim: Dimension of the points to be sampled, e.g. dim=3 samples points from
the 2-simplex.
vmin: Minimum value of any coordinate of the resulting points, e.g. set
vmin>0. to exclude points on the faces of the simplex.
Returns:
`ndarray(shape=(k, dim))` of uniformly random points on the (num-1)-simplex.
"""
assert vmin >= 0.
p = np.random.rand(n, dim - 1)
p = np.sort(p, axis=1)
p = np.hstack((np.zeros((n, 1)), p, np.ones((n, 1))))
return (p[:, 1:] - p[:, 0:-1]) * (1 - 2 * vmin) + vmin
def game_payoffs_array(game):
"""Returns a `numpy.ndarray` of utilities for a game.
NOTE: if the game is not a MatrixGame or a TensorGame then this may be costly.
Args:
game: A game.
Returns:
`numpy.ndarray` of dimension `num_players` + 1.
First dimension is the player, followed by the actions of all players, e.g.
a 3x3 game (2 players) has dimension [2,3,3].
"""
if isinstance(game, pyspiel.MatrixGame):
return np.stack([game.row_utilities(), game.col_utilities()])
if not isinstance(game, pyspiel.TensorGame):
game = pyspiel.extensive_to_tensor_game(game)
return np.stack(
[game.player_utilities(player) for player in range(game.num_players())])
def distribute(num_items, num_slots, normalize=False):
"""Yields all ways of distributing `num_items` items over `num_slots` slots.
We assume that the ordering of the slots doesn't matter.
Args:
num_items: The number of items to distribute.
num_slots: The number of slots.
normalize: Normalizes the yielded tuple to contain floats in [0, 1] summing
to 1.
Yields:
A tuple T containing `num_slots` positive integers such that
`np.sum(T) == num_items` if `normalize == False` or `np.sum(T) == 1` if
`normalize == True'.
"""
normalization = 1
if normalize:
normalization = num_items
# This is just the standard "bars and stars" problem.
# See https://stackoverflow.com/questions/28965734/general-bars-and-stars.
for c in itertools.combinations(
range(num_items + num_slots - 1), num_slots - 1):
# The combinations give you the indices of the internal bars.
# pylint: disable=g-complex-comprehension
yield tuple((b - a - 1) / normalization
for (a, b) in zip([
-1,
] + list(c),
list(c) + [num_items + num_slots - 1]))
def assert_is_1d_numpy_array(array):
if not isinstance(array, np.ndarray):
raise ValueError("The argument must be a numpy array, not a {}.".format(
type(array)))
if len(array.shape) != 1:
raise ValueError(
"The argument must be 1-dimensional, not of shape {}.".format(
array.shape))
def assert_probabilities(array):
if not all([item >= 0 for item in array]):
raise ValueError("The vector must have all elements >= 0 items, not"
"{}".format(array))
sum_ = np.sum(array)
if not np.isclose(1, sum_):
raise ValueError(
"The sum of the probabilities must be 1, not {}".format(sum_))
def sort_rows_lexicographically(array):
"""Returns a numpy array with lexicographic-ordered rows.
This function can be used to check that 2 Heuristic Payoff Tables are equal,
by normalizing them using a fixed ordering of the rows.
Args:
array: The 2D numpy array to sort by rows.
"""
return np.array(sorted(array.tolist()))
def get_valid_next_profiles(num_strats_per_population, cur_profile):
"""Generates monomorphic strategy profile transitions given cur_profile.
Given a current strategy profile, cur_profile, this generates all follow-up
profiles that involve only a single other population changing its current
monomorphic strategy to some other monomorphic strategy. Note that
self-transitions from cur_profile to cur_profile are not included here, as
they are a special case in our Markov chain.
Args:
num_strats_per_population: List of strategy sizes for each population.
cur_profile: Current strategy profile.
Yields:
The next valid strategy profile transition.
"""
num_populations = len(num_strats_per_population)
for i_population_to_change in range(num_populations):
for new_strat in range(num_strats_per_population[i_population_to_change]):
# Ensure a transition will actually happen
if new_strat != cur_profile[i_population_to_change]:
next_profile = cur_profile.copy()
next_profile[i_population_to_change] = new_strat
yield i_population_to_change, next_profile
def get_num_strats_per_population(payoff_tables, payoffs_are_hpt_format):
"""Returns a [num_populations] array of the num.
of strategies per population.
E.g., for a 3 population game, this returns
[num_strats_population1, num_strats_population2, num_strats_population3]
Args:
payoff_tables: List of game payoff tables, one for each agent identity. Each
payoff_table may be either a 2D numpy array, or a _PayoffTableInterface
object.
payoffs_are_hpt_format: True indicates HPT format (i.e.
_PayoffTableInterface object, False indicates 2D numpy array.
"""
if payoffs_are_hpt_format:
return np.asarray(
[payoff_table.num_strategies for payoff_table in payoff_tables])
else:
# Non-HPT payoffs are matrices, so can directly return the payoff size
return np.asarray(np.shape(payoff_tables[0]))
def get_num_profiles(num_strats_per_population):
"""Returns the total number of pure strategy profiles.
Args:
num_strats_per_population: A list of size `num_populations` of the number of
strategies per population.
Returns:
The total number of pure strategy profiles.
"""
return np.prod(num_strats_per_population)
def get_strat_profile_labels(payoff_tables, payoffs_are_hpt_format):
"""Returns strategy labels corresponding to a payoff_table.
Namely, for games where strategies have no human-understandable labels
available, this function returns a labels object corresponding to the
strategy profiles.
Examples:
Generated labels for a single-population game with 3 strategies:
['0','1','2'].
Generated labels for a 3-population game with 2 strategies per population:
{0: ['0','1'], 1: ['0','1'], 2: ['0','1']}
Args:
payoff_tables: List of game payoff tables, one for each agent identity. Each
payoff_table may be either a 2D numpy array, or a _PayoffTableInterface
object.
payoffs_are_hpt_format: Boolean indicating whether each payoff table in
payoff_tables is a 2D numpy array, or a _PayoffTableInterface object (AKA
Heuristic Payoff Table or HPT). True indicates HPT format, False indicates
2D numpy array.
Returns:
Strategy labels.
"""
num_populations = len(payoff_tables)
if num_populations == 1:
num_strats_per_population = get_num_strats_per_population(
payoff_tables, payoffs_are_hpt_format)
labels = [str(x) for x in range(num_strats_per_population[0])]
else:
num_strats_per_population = get_num_strats_per_population(
payoff_tables, payoffs_are_hpt_format)
labels = dict()
label_text = []
# Construct a list of strategy labels for each population
for num_strats in num_strats_per_population:
label_text.append([str(i_strat) for i_strat in range(num_strats)])
population_ids = range(num_populations)
labels = dict(zip(population_ids, label_text))
return labels
def get_strat_profile_from_id(num_strats_per_population, profile_id):
"""Returns the strategy profile corresponding to a requested strategy ID.
This is the inverse of the function get_id_from_strat_profile(). See that
function for the indexing mechanism.
Args:
num_strats_per_population: List of strategy sizes for each population.
profile_id: Integer ID of desired strategy profile, in
{0,...,get_num_profiles-1}.
Returns:
The strategy profile whose ID was looked up.
"""
num_populations = len(num_strats_per_population)
strat_profile = np.zeros(num_populations, dtype=np.int32)
for i_population in range(num_populations - 1, -1, -1):
strat_profile[i_population] = (
profile_id % num_strats_per_population[i_population])
profile_id = profile_id // num_strats_per_population[i_population]
return strat_profile
def get_label_from_strat_profile(num_populations, strat_profile, strat_labels):
"""Returns a human-readable label corresponding to the strategy profile.
E.g., for Rock-Paper-Scissors, strategies 0,1,2 have labels "R","P","S".
For strat_profile (1,2,0,1), this returns "(P,S,R,P)". If strat_profile is a
single strategy (e.g., 0) this returns just its label (e.g., "R").
Args:
num_populations: Number of populations.
strat_profile: Strategy profile of interest.
strat_labels: Strategy labels.
Returns:
Human-readable label string.
"""
if num_populations == 1:
return strat_labels[strat_profile]
else:
label = "("
for i_population, i_strat in enumerate(strat_profile):
label += strat_labels[i_population][i_strat]
if i_population < len(strat_profile) - 1:
label += ","
label += ")"
return label
def get_id_from_strat_profile(num_strats_per_population, strat_profile):
"""Returns a unique integer ID representing the requested strategy profile.
Map any `strat_profile` (there are `np.prod(num_strats_per_population)` such
profiles) to {0,..., num_strat_profiles - 1}.
The mapping is done using a usual counting strategy: With
num_strats_per_population = [a1, ..., a_n]
strat_profile = [b1, ..., b_n]
we have
id = b_1 + a1 * (b2 + a_2 * (b3 + a_3 *...))
This is helpful for querying the element of our finite-population Markov
transition matrix that corresponds to a transition between a specific pair of
strategy profiles.
Args:
num_strats_per_population: List of strategy sizes for each population.
strat_profile: The strategy profile (list of integers corresponding to the
strategy of each agent) whose ID is requested.
Returns:
Unique ID of strat_profile.
"""
if len(strat_profile) == 1:
return strat_profile[0]
return strat_profile[-1] + (num_strats_per_population[-1] *
get_id_from_strat_profile(
num_strats_per_population[:-1],
strat_profile[:-1]))
def compute_payoff(row_profile, col_profile, row_payoff_table):
"""Returns row's expected payoff in a bimatrix game.
Args:
row_profile: Row's strategy profile.
col_profile: Column's strategy profile.
row_payoff_table: Row's payoff table.
"""
return np.dot(np.dot(row_profile.T, row_payoff_table), col_profile)
def check_is_constant_sum(payoff_table, payoffs_are_hpt_format):
"""Checks if single-population matrix game is constant-sum.
Args:
payoff_table: Either a 2D numpy array, or a _PayoffTableInterface object.
payoffs_are_hpt_format: Boolean indicating whether payoff table is a
_PayoffTableInterface object (AKA Heuristic Payoff Table or HPT), or a 2D
numpy array. True indicates HPT, and False indicates numpy array.
Returns:
is_constant_sum: Boolean, True if constant-sum game.
payoff_sum: Payoff sum if game is constant-sum, or None if not.
"""
if payoffs_are_hpt_format:
payoff_sum_table = np.asarray(payoff_table._payoffs).sum(axis=1) # pylint: disable=protected-access
is_constant_sum = np.isclose(
payoff_sum_table, payoff_sum_table[0], atol=1e-14).all()
payoff_sum = payoff_sum_table[0] if is_constant_sum else None
else:
payoff_sum_table = payoff_table + payoff_table.T
is_constant_sum = np.isclose(
payoff_sum_table, payoff_sum_table[0, 0], atol=1e-14).all()
payoff_sum = payoff_sum_table[0, 0] if is_constant_sum else None
return is_constant_sum, payoff_sum
def cluster_strats(pi, matching_decimals=4):
"""Clusters strategies using stationary distribution (pi) masses.
Args:
pi: stationary distribution.
matching_decimals: the number of stationary distribution decimals that
should match for strategies to be considered in the same cluster.
Returns:
Dictionary that maps unique stationary distribution masses to strategies.
"""
rounded_masses = pi.round(decimals=matching_decimals)
masses_to_strats = {}
for i in np.unique(rounded_masses):
masses_to_strats[i] = np.where(rounded_masses == i)[0]
return masses_to_strats
def print_rankings_table(payoff_tables,
pi,
strat_labels,
num_top_strats_to_print=8):
"""Prints nicely-formatted table of strategy rankings.
Args:
payoff_tables: List of game payoff tables, one for each agent identity. Each
payoff_table may be either a 2D numpy array, or a _PayoffTableInterface
object.
pi: Finite-population Markov chain stationary distribution.
strat_labels: Strategy labels.
num_top_strats_to_print: Number of top strategies to print.
"""
num_populations = len(payoff_tables)
payoffs_are_hpt_format = check_payoffs_are_hpt(payoff_tables)
num_strats_per_population = get_num_strats_per_population(
payoff_tables, payoffs_are_hpt_format)
# More than total number of strats requested for printing, compute top and
# use an extra row to indicate additional strategies not shown.
row_for_lowrank_strats = True
if num_top_strats_to_print >= len(pi):
num_top_strats_to_print = len(pi)
row_for_lowrank_strats = False
# Cluster strategies according to stationary distr. (in case of tied ranks)
masses_to_strats = cluster_strats(pi)
def print_3col(col1, col2, col3):
print("%-12s %-12s %-12s" % (col1, col2, col3))
print_3col("Agent", "Rank", "Score")
print_3col("-----", "----", "-----")
rank = 1
num_strats_printed = 0
# Print a table of strategy rankings from highest to lowest mass
for _, strats in sorted(masses_to_strats.items(), reverse=True):
for strat in strats:
if num_strats_printed >= num_top_strats_to_print:
break
rounded_pi = np.round(pi[strat], decimals=2)
if num_populations == 1:
strat_profile = strat
else:
strat_profile = get_strat_profile_from_id(num_strats_per_population,
strat)
label = get_label_from_strat_profile(num_populations, strat_profile,
strat_labels)
print_3col(label, str(rank), str(np.abs(rounded_pi)))
num_strats_printed += 1
rank += 1
if num_strats_printed >= num_top_strats_to_print:
break
# Ellipses to signify additional low-rank strategies are not printed
if row_for_lowrank_strats:
print_3col("...", "...", "...")
def is_symmetric_matrix_game(payoff_tables):
"""Checks if payoff_tables corresponds to a symmetric matrix game."""
payoffs_are_hpt_format = check_payoffs_are_hpt(payoff_tables)
if len(payoff_tables) == 2:
if payoffs_are_hpt_format and np.array_equal(payoff_tables[0](),
payoff_tables[1]()):
return True, [payoff_tables[0]]
elif ~payoffs_are_hpt_format and np.array_equal(payoff_tables[0],
payoff_tables[1].T):
return True, [payoff_tables[0]]
return False, payoff_tables
def check_payoffs_are_hpt(payoff_tables):
"""Returns True if payoffs are in HPT format."""
if isinstance(payoff_tables[0], np.ndarray):
return False
elif hasattr(payoff_tables[0], "is_hpt") and payoff_tables[0].is_hpt:
return True
else:
raise TypeError("payoff_tables should be a list of payoff matrices/hpts.")
| open_spiel-master | open_spiel/python/egt/utils.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.egt.visualization."""
from absl import logging
from absl.testing import absltest
# pylint: disable=g-import-not-at-top
try:
from matplotlib.figure import Figure
from matplotlib.quiver import Quiver
from matplotlib.streamplot import StreamplotSet
except ImportError as e:
logging.info("If your tests failed with the error 'ImportError: No module "
"named functools_lru_cache', this is a known bug in matplotlib "
"and there is a workaround (run sudo apt install "
"python-backports.functools-lru-cache. See: "
"https://github.com/matplotlib/matplotlib/issues/9344.")
raise e
import numpy as np
from open_spiel.python.egt import dynamics
from open_spiel.python.egt import utils
from open_spiel.python.egt import visualization
import pyspiel
def _build_dynamics2x2():
"""Build multi-population dynamics."""
game = pyspiel.load_game("matrix_pd")
payoff_tensor = utils.game_payoffs_array(game)
return dynamics.MultiPopulationDynamics(payoff_tensor, dynamics.replicator)
def _build_dynamics3x3():
"""Build single-population dynamics."""
game = pyspiel.load_game("matrix_rps")
payoff_tensor = utils.game_payoffs_array(game)
return dynamics.SinglePopulationDynamics(payoff_tensor, dynamics.replicator)
def _identity_dynamics(x):
"""Returns same input as output."""
return x
class VisualizationTest(absltest.TestCase):
def test_meshgrid(self):
n = 10
payoff_tensor = np.ones(shape=(2, 2, 2))
identity = lambda x, f: x
allzero = lambda x, f: np.zeros(x.shape)
dyn = dynamics.MultiPopulationDynamics(payoff_tensor, (identity, allzero))
x, y, u, v = visualization._eval_dynamics_2x2_grid(dyn, n)
np.testing.assert_allclose(x, u)
np.testing.assert_allclose(v, np.zeros(shape=(n, n)))
dyn = dynamics.MultiPopulationDynamics(payoff_tensor, (allzero, identity))
x, y, u, v = visualization._eval_dynamics_2x2_grid(dyn, n)
np.testing.assert_allclose(u, np.zeros(shape=(n, n)))
np.testing.assert_allclose(y, v)
def test_quiver2x2(self):
"""Test 2x2 quiver plot."""
dyn = _build_dynamics2x2()
fig = Figure(figsize=(4, 4))
ax = fig.add_subplot(111, projection="2x2")
res = ax.quiver(dyn)
self.assertIsInstance(res, Quiver)
def test_streamplot2x2(self):
"""Test 2x2 quiver plot."""
dyn = _build_dynamics2x2()
fig = Figure(figsize=(4, 4))
ax = fig.add_subplot(111, projection="2x2")
res = ax.streamplot(dyn)
self.assertIsInstance(res, StreamplotSet)
def test_quiver3x3(self):
"""Test 3x3 quiver plot."""
dyn = _build_dynamics3x3()
fig = Figure(figsize=(4, 4))
ax = fig.add_subplot(111, projection="3x3")
res = ax.quiver(dyn)
self.assertIsInstance(res, Quiver)
def test_streamplot3x3(self):
"""Test 3x3 quiver plot."""
dyn = _build_dynamics3x3()
fig = Figure(figsize=(4, 4))
ax = fig.add_subplot(111, projection="3x3")
res = ax.streamplot(dyn)
self.assertIsInstance(res, visualization.SimplexStreamMask)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/egt/visualization_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Continuous-time population dynamics."""
import numpy as np
def replicator(state, fitness):
"""Continuous-time replicator dynamics.
This is the standard form of the continuous-time replicator dynamics also
known as selection dynamics.
For more details, see equation (5) page 9 in
https://jair.org/index.php/jair/article/view/10952
Args:
state: Probability distribution as an `np.array(shape=num_strategies)`.
fitness: Fitness vector as an `np.array(shape=num_strategies)`.
Returns:
Time derivative of the population state.
"""
avg_fitness = state.dot(fitness)
return state * (fitness - avg_fitness)
def boltzmannq(state, fitness, temperature=1.):
"""Selection-mutation dynamics modeling Q-learning with Boltzmann exploration.
For more details, see equation (10) page 15 in
https://jair.org/index.php/jair/article/view/10952
Args:
state: Probability distribution as an `np.array(shape=num_strategies)`.
fitness: Fitness vector as an `np.array(shape=num_strategies)`.
temperature: A scalar parameter determining the rate of exploration.
Returns:
Time derivative of the population state.
"""
exploitation = (1. / temperature) * replicator(state, fitness)
exploration = (np.log(state) - state.dot(np.log(state).transpose()))
return exploitation - state * exploration
def qpg(state, fitness):
"""Q-based policy gradient dynamics (QPG).
For more details, see equation (12) on page 18 in
https://arxiv.org/pdf/1810.09026.pdf
Args:
state: Probability distribution as an `np.array(shape=num_strategies)`.
fitness: Fitness vector as an `np.array(shape=num_strategies)`.
Returns:
Time derivative of the population state.
"""
regret = fitness - state.dot(fitness)
return state * (state * regret - np.sum(state**2 * regret))
class SinglePopulationDynamics(object):
"""Continuous-time single population dynamics.
Attributes:
payoff_matrix: The payoff matrix as an `numpy.ndarray` of shape `[2, k_1,
k_2]`, where `k_1` is the number of strategies of the first player and
`k_2` for the second player. The game is assumed to be symmetric.
dynamics: A callback function that returns the time-derivative of the
population state.
"""
def __init__(self, payoff_matrix, dynamics):
"""Initializes the single-population dynamics."""
assert payoff_matrix.ndim == 3
assert payoff_matrix.shape[0] == 2
assert np.allclose(payoff_matrix[0], payoff_matrix[1].T)
self.payoff_matrix = payoff_matrix[0]
self.dynamics = dynamics
def __call__(self, state=None, time=None):
"""Time derivative of the population state.
Args:
state: Probability distribution as list or
`numpy.ndarray(shape=num_strategies)`.
time: Time is ignored (time-invariant dynamics). Including the argument in
the function signature supports numerical integration via e.g.
`scipy.integrate.odeint` which requires that the callback function has
at least two arguments (state and time).
Returns:
Time derivative of the population state as
`numpy.ndarray(shape=num_strategies)`.
"""
state = np.array(state)
assert state.ndim == 1
assert state.shape[0] == self.payoff_matrix.shape[0]
# (Ax')' = xA'
fitness = np.matmul(state, self.payoff_matrix.T)
return self.dynamics(state, fitness)
class MultiPopulationDynamics(object):
"""Continuous-time multi-population dynamics.
Attributes:
payoff_tensor: The payoff tensor as an numpy.ndarray of size `[n, k0, k1,
k2, ...]`, where n is the number of players and `k0` is the number of
strategies of the first player, `k1` of the second player and so forth.
dynamics: List of callback functions for the time-derivative of the
population states, where `dynamics[i]` computes the time-derivative of the
i-th player's population state. If at construction, only a single callback
function is provided, the same function is used for all populations.
"""
def __init__(self, payoff_tensor, dynamics):
"""Initializes the multi-population dynamics."""
if isinstance(dynamics, list) or isinstance(dynamics, tuple):
assert payoff_tensor.shape[0] == len(dynamics)
else:
dynamics = [dynamics] * payoff_tensor.shape[0]
self.payoff_tensor = payoff_tensor
self.dynamics = dynamics
def __call__(self, state, time=None):
"""Time derivative of the population states.
Args:
state: Combined population state for all populations as a list or flat
`numpy.ndarray` (ndim=1). Probability distributions are concatenated in
order of the players.
time: Time is ignored (time-invariant dynamics). Including the argument in
the function signature supports numerical integration via e.g.
`scipy.integrate.odeint` which requires that the callback function has
at least two arguments (state and time).
Returns:
Time derivative of the combined population state as `numpy.ndarray`.
"""
state = np.array(state)
n = self.payoff_tensor.shape[0] # number of players
ks = self.payoff_tensor.shape[1:] # number of strategies for each player
assert state.shape[0] == sum(ks)
states = np.split(state, np.cumsum(ks)[:-1])
dstates = [None] * n
for i in range(n):
# move i-th population to front
fitness = np.moveaxis(self.payoff_tensor[i], i, 0)
# marginalize out all other populations
for i_ in set(range(n)) - {i}:
fitness = np.tensordot(states[i_], fitness, axes=[0, 1])
dstates[i] = self.dynamics[i](states[i], fitness)
return np.concatenate(dstates)
def time_average(traj):
"""Time-averaged population state trajectory.
Args:
traj: Trajectory as `numpy.ndarray`. Time is along the first dimension,
types/strategies along the second.
Returns:
Time-averaged trajectory.
"""
n = traj.shape[0]
sum_traj = np.cumsum(traj, axis=0)
norm = 1. / np.arange(1, n + 1)
return sum_traj * norm[:, np.newaxis]
| open_spiel-master | open_spiel/python/egt/dynamics.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/egt/examples/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running AlphaRank on OpenSpiel games.
AlphaRank output variable names corresponds to the following paper:
https://arxiv.org/abs/1903.01373
"""
from absl import app
from open_spiel.python.algorithms import fictitious_play
from open_spiel.python.egt import alpharank
from open_spiel.python.egt import alpharank_visualizer
from open_spiel.python.egt import utils
import pyspiel
def get_kuhn_poker_data(num_players=3):
"""Returns the kuhn poker data for the number of players specified."""
game = pyspiel.load_game('kuhn_poker', {'players': num_players})
xfp_solver = fictitious_play.XFPSolver(game, save_oracles=True)
for _ in range(3):
xfp_solver.iteration()
# Results are seed-dependent, so show some interesting cases
if num_players == 2:
meta_games = xfp_solver.get_empirical_metagame(100, seed=1)
elif num_players == 3:
meta_games = xfp_solver.get_empirical_metagame(100, seed=5)
elif num_players == 4:
meta_games = xfp_solver.get_empirical_metagame(100, seed=2)
# Metagame utility matrices for each player
payoff_tables = []
for i in range(num_players):
payoff_tables.append(meta_games[i])
return payoff_tables
def main(unused_arg):
# Construct meta-game payoff tables
payoff_tables = get_kuhn_poker_data()
payoffs_are_hpt_format = utils.check_payoffs_are_hpt(payoff_tables)
strat_labels = utils.get_strat_profile_labels(payoff_tables,
payoffs_are_hpt_format)
# Run AlphaRank
rhos, rho_m, pi, _, _ = alpharank.compute(payoff_tables, alpha=1e2)
# Report & plot results
alpharank.print_results(
payoff_tables, payoffs_are_hpt_format, rhos=rhos, rho_m=rho_m, pi=pi)
utils.print_rankings_table(payoff_tables, pi, strat_labels)
m_network_plotter = alpharank_visualizer.NetworkPlot(
payoff_tables, rhos, rho_m, pi, strat_labels, num_top_profiles=8)
m_network_plotter.compute_and_draw_network()
if __name__ == '__main__':
app.run(main)
| open_spiel-master | open_spiel/python/egt/examples/alpharank_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides tools to evaluate bots against specific scenarios."""
import dataclasses
from typing import Text, List
from absl import logging
@dataclasses.dataclass
class Scenario(object):
name: Text
init_actions: List[Text]
expected_action_str: Text
expected_prob: float
player_id: int
CATCH_SCENARIOS = [
Scenario("Ball in column 1, chooses left.", [
"Initialized ball to 0", "LEFT", "STAY", "STAY", "STAY", "STAY", "STAY",
"STAY", "STAY"
], "LEFT", 1., 0),
Scenario("Ball in column 2, chooses left.", [
"Initialized ball to 1", "STAY", "STAY", "STAY", "STAY", "STAY", "STAY",
"STAY", "STAY"
], "LEFT", 1., 0),
Scenario("Ball in column 3, chooses left.", [
"Initialized ball to 2", "RIGHT", "STAY", "STAY", "STAY", "STAY",
"STAY", "STAY", "STAY"
], "LEFT", 1., 0),
]
SCENARIOS = {
"catch": CATCH_SCENARIOS,
}
def get_default_scenarios(game_name):
"""Loads the default scenarios for a given game.
Args:
game_name: The game to load scenarios for.
Returns:
A List[Scenario] detailing the scenarios for that game.
"""
return SCENARIOS[game_name]
def play_bot_in_scenarios(game, bots, scenarios=None):
"""Plays a bot against a number of scenarios.
Args:
game: The game the bot is playing.
bots: A list of length game.num_players() of pyspiel.Bots (or equivalent).
Must implement the apply_action and step methods.
scenarios: The scenarios we evaluate the bot in. A List[Scenario].
Returns:
A dict mapping scenarios to their scores (with an additional "mean_score"
field containing the mean score across all scenarios).
The average score across all scenarios.
"""
if scenarios is None:
scenarios = get_default_scenarios(game.get_type().short_name)
results = []
total_score = 0
for scenario in scenarios:
state = game.new_initial_state()
bot = bots[scenario.player_id]
bot.restart()
for action_str in scenario.init_actions:
action = state.string_to_action(action_str)
if state.current_player() == scenario.player_id:
bot.force_action(state, action)
state.apply_action(action)
actions_and_probs, _ = bot.step(state)
expected_action = state.string_to_action(scenario.expected_action_str)
for action, prob in actions_and_probs:
if action == expected_action:
actual_prob = prob
break
score = 1 - abs(actual_prob - scenario.expected_prob)
results.append((scenario.name, score, scenario.expected_action_str,
scenario.expected_prob, actual_prob))
total_score += score
if scenarios:
total_score /= len(scenarios)
logging.info("Average score across all scenarios: %.4f.", total_score)
results_dict = {}
for name, score, expected_action, expected_prob, actual_prob in results:
logging.info("************************************************************")
logging.info("Scenario: '%s'. Score: %.4f.", name, score)
logging.info("Expected action %s with probability %.4f but assigned %.4f.",
expected_action, expected_prob, actual_prob)
logging.info("***************************")
results_dict["scenario_score: " + name] = score
results_dict["mean_score"] = total_score
return results_dict
| open_spiel-master | open_spiel/python/bots/scenarios.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for the HIGC random bot.
This test mimics the basic C++ tests in higc/bots/random_bot.py and is
duplicated here to make automated wheels tests work in the absence
of the higc/ directory.
"""
import base64
import sys
import numpy as np
from open_spiel.python.observation import make_observation
import pyspiel
game_name = input()
play_as = int(input())
game = pyspiel.load_game(game_name)
public_observation = make_observation(
game,
pyspiel.IIGObservationType(
perfect_recall=False,
public_info=True,
private_info=pyspiel.PrivateInfoType.NONE))
private_observation = make_observation(
game,
pyspiel.IIGObservationType(
perfect_recall=False,
public_info=False,
private_info=pyspiel.PrivateInfoType.SINGLE_PLAYER))
print("ready")
while True:
print("start")
state = game.new_initial_state()
while True:
message = input()
if message == "tournament over":
print("tournament over")
sys.exit(0)
if message.startswith("match over"):
print("match over")
break
public_buf, private_buf, *legal_actions = message.split(" ")
public_observation.decompress(base64.b64decode(public_buf))
private_observation.decompress(base64.b64decode(private_buf))
if legal_actions:
print(np.random.choice(legal_actions))
else:
print("ponder")
assert message.startswith("match over")
score = int(message.split(" ")[-1])
print("score:", score, file=sys.stderr)
| open_spiel-master | open_spiel/python/bots/higc_random_bot_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.bots.bluechip_bridge_uncontested_bidding."""
import itertools
from absl.testing import absltest
from open_spiel.python.bots import bluechip_bridge_uncontested_bidding
import pyspiel
_BID_1D = bluechip_bridge_uncontested_bidding._string_to_action("1D")
_BID_1H = bluechip_bridge_uncontested_bidding._string_to_action("1H")
_BID_2H = bluechip_bridge_uncontested_bidding._string_to_action("2H")
class BluechipBridgeWrapperTest(absltest.TestCase):
def test_complete_session_east(self):
game = pyspiel.load_game("bridge_uncontested_bidding")
mock_client = absltest.mock.Mock(
**{
"read_line.side_effect": [
'Connecting "WBridge5" as ANYPL using protocol version 18',
"EAST ready for teams",
"EAST ready to start",
"EAST ready for deal",
"EAST ready for cards",
"EAST ready for WEST's bid",
"EAST ready for NORTH's bid",
"EAST bids 1H",
"EAST ready for SOUTH's bid",
"EAST ready for WEST's bid",
"EAST ready for NORTH's bid",
"EAST PASSES",
]
})
bot = bluechip_bridge_uncontested_bidding.BlueChipBridgeBot(
game, 1, mock_client)
state = game.deserialize_state("A86.J543.K642.A3 J.KQ962.T953.J96")
state.apply_action(_BID_1D)
policy, action = bot.step(state)
self.assertEqual(action, _BID_1H)
self.assertEqual(policy, (_BID_1H, 1.0))
state.apply_action(action)
state.apply_action(_BID_2H)
policy, action = bot.step(state)
self.assertEqual(action, bluechip_bridge_uncontested_bidding._PASS_ACTION)
self.assertEqual(policy,
(bluechip_bridge_uncontested_bidding._PASS_ACTION, 1.0))
# Finished - now check that the game state is correct.
self.assertEqual(str(state), "A86.J543.K642.A3 J.KQ962.T953.J96 1D-1H-2H")
# Check that we received the expected messages.
mock_client.assert_has_calls([
absltest.mock.call.start(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('EAST ("WBridge5") seated'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('Teams: N/S "opponents" E/W "bidders"'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line("start of board"),
absltest.mock.call.read_line(),
absltest.mock.call.send_line(
"Board number 8. Dealer WEST. Neither vulnerable."),
absltest.mock.call.read_line(),
absltest.mock.call.send_line(
"EAST's cards: S J. H K Q 9 6 2. D T 9 5 3. C J 9 6."),
absltest.mock.call.read_line(),
absltest.mock.call.send_line("WEST bids 1D"),
absltest.mock.call.read_line(),
absltest.mock.call.send_line("NORTH PASSES"),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line("SOUTH PASSES"),
absltest.mock.call.read_line(),
absltest.mock.call.send_line("WEST bids 2H"),
absltest.mock.call.read_line(),
absltest.mock.call.send_line("NORTH PASSES"),
])
def test_complete_session_west(self):
game = pyspiel.load_game("bridge_uncontested_bidding")
mock_client = absltest.mock.Mock(
**{
"read_line.side_effect": [
'Connecting "WBridge5" as ANYPL using protocol version 18',
"WEST ready for teams",
"WEST ready to start",
"WEST ready for deal",
"WEST ready for cards",
"WEST bids 1D Alert.",
"WEST ready for NORTH's bid",
"WEST ready for EAST's bid",
"WEST ready for SOUTH's bid",
"WEST bids 2H",
"WEST ready for NORTH's bid",
"WEST ready for EAST's bid",
"WEST ready for SOUTH's bid",
]
})
bot = bluechip_bridge_uncontested_bidding.BlueChipBridgeBot(
game, 0, mock_client)
state = game.deserialize_state("A86.J543.K642.A3 J.KQ962.T953.J96")
policy, action = bot.step(state)
self.assertEqual(action, _BID_1D)
self.assertEqual(policy, (_BID_1D, 1.0))
state.apply_action(action)
state.apply_action(_BID_1H)
policy, action = bot.step(state)
self.assertEqual(action, _BID_2H)
self.assertEqual(policy, (_BID_2H, 1.0))
state.apply_action(action)
# Finished - now check that the game state is correct.
self.assertEqual(str(state), "A86.J543.K642.A3 J.KQ962.T953.J96 1D-1H-2H")
# Check that we received the expected messages.
mock_client.assert_has_calls([
absltest.mock.call.start(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('WEST ("WBridge5") seated'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('Teams: N/S "opponents" E/W "bidders"'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line("start of board"),
absltest.mock.call.read_line(),
absltest.mock.call.send_line(
"Board number 8. Dealer WEST. Neither vulnerable."),
absltest.mock.call.read_line(),
absltest.mock.call.send_line(
"WEST's cards: S A 8 6. H J 5 4 3. D K 6 4 2. C A 3."),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line("NORTH PASSES"),
absltest.mock.call.read_line(),
absltest.mock.call.send_line("EAST bids 1H"),
absltest.mock.call.read_line(),
absltest.mock.call.send_line("SOUTH PASSES"),
absltest.mock.call.read_line(),
])
def test_invalid_fixed_message(self):
game = pyspiel.load_game("bridge_uncontested_bidding")
mock_client = absltest.mock.Mock(
**{
"read_line.side_effect": [
'Connecting "WBridge5" as ANYPL using protocol version 18',
"WEST ready for cards",
]
})
bot = bluechip_bridge_uncontested_bidding.BlueChipBridgeBot(
game, 0, mock_client)
state = game.deserialize_state("A86.J543.K642.A3 J.KQ962.T953.J96")
with self.assertRaisesRegex(
ValueError,
"Received 'WEST ready for cards' but expected 'WEST ready for teams'"):
bot.step(state)
def test_invalid_variable_message(self):
game = pyspiel.load_game("bridge_uncontested_bidding")
mock_client = absltest.mock.Mock(
**{
"read_line.side_effect": [
'Connecting "WBridge5" as ANYPL using protocol version 18',
"WEST ready for teams",
"WEST ready to start",
"WEST ready for deal",
"WEST ready for cards",
"NORTH bids 1S",
]
})
bot = bluechip_bridge_uncontested_bidding.BlueChipBridgeBot(
game, 0, mock_client)
state = game.deserialize_state("A86.J543.K642.A3 J.KQ962.T953.J96")
with self.assertRaisesRegex(
ValueError,
"Received 'NORTH bids 1S' which does not match regex 'WEST"):
bot.step(state)
def test_string_to_action_to_string_roundtrip(self):
for level, trump in itertools.product(
range(1, 8), bluechip_bridge_uncontested_bidding._TRUMP_SUIT):
bid = str(level) + trump
action = bluechip_bridge_uncontested_bidding._string_to_action(bid)
self.assertEqual(
bid, bluechip_bridge_uncontested_bidding._action_to_string(action))
def test_action_to_string_to_action_roundtrip(self):
for action in range(1, 36):
bid = bluechip_bridge_uncontested_bidding._action_to_string(action)
self.assertEqual(
action, bluechip_bridge_uncontested_bidding._string_to_action(bid))
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/bots/bluechip_bridge_uncontested_bidding_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A bot that samples from legal actions based on a policy."""
import pyspiel
class PolicyBot(pyspiel.Bot):
"""Samples an action from action probabilities based on a policy.
This bot plays actions as specified by the underlying Policy. Problems may
occur if the policy assigns non-zero probability to invalid actions, or if the
policy is not complete, or if probabilities don't sum to 1.
"""
def __init__(self, player_id, rng, policy):
"""Initializes a policy bot.
Args:
player_id: The integer id of the player for this bot, e.g. `0` if acting
as the first player.
rng: A random number generator supporting a `choice` method, e.g.
`np.random`
policy: A policy to get action distributions
"""
pyspiel.Bot.__init__(self)
self._player_id = player_id
self._rng = rng
self._policy = policy
def player_id(self):
return self._player_id
def restart_at(self, state):
pass
def step_with_policy(self, state):
"""Returns the stochastic policy and selected action in the given state.
Args:
state: The current state of the game.
Returns:
A `(policy, action)` pair, where policy is a `list` of
`(action, probability)` pairs for each legal action, with
`probability` defined by the policy action probabilities.
The `action` is sampled from the distribution,
or `pyspiel.INVALID_ACTION` if there are no actions available.
"""
policy = self._policy.action_probabilities(state, self._player_id)
action_list = list(policy.keys())
if not any(action_list):
return [], pyspiel.INVALID_ACTION
action = self._rng.choice(action_list, p=list(policy.values()))
return list(policy.items()), action
def step(self, state):
return self.step_with_policy(state)[1]
| open_spiel-master | open_spiel/python/bots/policy.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/bots/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A bot that asks the user which action to play."""
import math
import os
import pyspiel
_MAX_WIDTH = int(os.getenv("COLUMNS", "80")) # Get your TTY width.
def _print_columns(strings):
"""Prints a list of strings in columns."""
padding = 2
longest = max(len(s) for s in strings)
max_columns = math.floor((_MAX_WIDTH - 1) / (longest + 2 * padding))
rows = math.ceil(len(strings) / max_columns)
columns = math.ceil(len(strings) / rows) # Might not fill all max_columns.
for r in range(rows):
for c in range(columns):
i = r + c * rows
if i < len(strings):
print(" " * padding + strings[i].ljust(longest + padding), end="")
print()
class HumanBot(pyspiel.Bot):
"""Asks the user which action to play."""
def step_with_policy(self, state):
"""Returns the stochastic policy and selected action in the given state."""
legal_actions = state.legal_actions(state.current_player())
if not legal_actions:
return [], pyspiel.INVALID_ACTION
p = 1 / len(legal_actions)
policy = [(action, p) for action in legal_actions]
action_map = {
state.action_to_string(state.current_player(), action): action
for action in legal_actions
}
while True:
action_str = input("Choose an action (empty to print legal actions): ")
if not action_str:
print("Legal actions(s):")
longest_num = max(len(str(action)) for action in legal_actions)
_print_columns([
"{}: {}".format(str(action).rjust(longest_num), action_str)
for action_str, action in sorted(action_map.items())
])
continue
if action_str in action_map:
return policy, action_map[action_str]
try:
action = int(action_str)
except ValueError:
print("Could not parse the action:", action_str)
continue
if action in legal_actions:
return policy, action
print("Illegal action selected:", action_str)
def step(self, state):
return self.step_with_policy(state)[1]
def restart_at(self, state):
pass
| open_spiel-master | open_spiel/python/bots/human.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wraps third-party bridge bots to make them usable in OpenSpiel.
This code enables OpenSpiel interoperation for bots which implement the BlueChip
bridge protocol. This is widely used, e.g. in the World computer bridge
championships. For a rough outline of the protocol, see:
http://www.bluechipbridge.co.uk/protocol.htm
No formal specification is available. This implementation has been verified
to work correctly with WBridge5.
This bot controls a single player in the game of uncontested bridge bidding. It
chooses its actions by invoking an external bot which plays the full game of
bridge. This means that each time the bot is asked for an action, it sends up to
three actions (forced passes from both opponents, plus partner's most recent
action) to the external bridge bot, and obtains an action in return.
Since we are restricting ourselves to the uncontested bidding game, we have
no support for Doubling, Redoubling, or the play of the cards.
"""
import re
import pyspiel
# Example session:
#
# Recv: Connecting "WBridge5" as ANYPL using protocol version 18
# Send: WEST ("WBridge5") seated
# Recv: WEST ready for teams
# Send: Teams: N/S "silent" E/W "bidders"
# Recv: WEST ready to start
# Send: Start of board
# Recv: WEST ready for deal
# Send: Board number 8. Dealer WEST. Neither vulnerable.
# Recv: WEST ready for cards
# Send: WEST's cards: S A T 9 5. H K 6 5. D Q J 8 7 6. C 7.
# Recv: WEST PASSES
# Recv: WEST ready for NORTH's bid
# Send: EAST PASSES
# Recv: WEST ready for EAST's bid
# Send: EAST bids 1C
# Recv: WEST ready for SOUTH's bid
# Template regular expressions for messages we receive
_CONNECT = 'Connecting "(?P<client_name>.*)" as ANYPL using protocol version 18'
_SELF_BID_OR_PASS = "{seat} ((?P<pass>PASSES)|bids (?P<bid>[^ ]*))( Alert.)?"
# Templates for fixed messages we receive
_READY_FOR_TEAMS = "{seat} ready for teams"
_READY_TO_START = "{seat} ready to start"
_READY_FOR_DEAL = "{seat} ready for deal"
_READY_FOR_CARDS = "{seat} ready for cards"
_READY_FOR_BID = "{seat} ready for {other}'s bid"
# Templates for messages we send
_SEATED = '{seat} ("{client_name}") seated'
_TEAMS = 'Teams: N/S "opponents" E/W "bidders"'
_START_BOARD = "start of board"
# The board number is arbitrary, but "8" is consistent with the dealer and
# vulnerability we want (in the standard numbering). See Law 2:
# http://web2.acbl.org/documentlibrary/play/Laws-of-Duplicate-Bridge.pdf
_DEAL = "Board number 8. Dealer WEST. Neither vulnerable."
_CARDS = "{seat}'s cards: {hand}"
_OTHER_PLAYER_PASS = "{player} PASSES"
_OTHER_PLAYER_BID = "{player} bids {bid}"
# BlueChip bridge protocol message constants
_SEATS = ["WEST", "EAST"]
_OPPONENTS = ["NORTH", "SOUTH"]
_TRUMP_SUIT = ["C", "D", "H", "S", "NT"]
_NUMBER_TRUMP_SUITS = len(_TRUMP_SUIT)
_RANKS = ["2", "3", "4", "5", "6", "7", "8", "9", "T", "J", "Q", "K", "A"]
# OpenSpiel constants
_PASS_ACTION = 0
def _string_to_action(call_str):
"""Converts a BlueChip bid string to an OpenSpiel action id (an integer).
Args:
call_str: string representing a bid in the BlueChip format, i.e. "[level]
(as a digit) + [trump suit (S, H, D, C or NT)]", e.g. "1C".
Returns:
An integer action id - see `bridge_uncontested_bidding.cc`, functions
`Denomination` and `Level`.
0 is reserved for Pass, so bids are in order from 1 upwards: 1 = 1C,
2 = 1D, etc.
"""
level = int(call_str[0])
trumps = _TRUMP_SUIT.index(call_str[1:])
return (level - 1) * _NUMBER_TRUMP_SUITS + trumps + 1
def _action_to_string(action):
"""Converts OpenSpiel action id (an integer) to a BlueChip bid string.
Args:
action: an integer action id corresponding to a bid.
Returns:
A string in BlueChip format.
Inverse of `_string_to_action`. See documentation there.
"""
level = str((action - 1) // _NUMBER_TRUMP_SUITS + 1)
trumps = _TRUMP_SUIT[(action - 1) % _NUMBER_TRUMP_SUITS]
return level + trumps
def _expect_regex(client, regex):
"""Reads a line from the client, parses it using the regular expression."""
line = client.read_line()
match = re.match(regex, line)
if not match:
raise ValueError("Received '{}' which does not match regex '{}'".format(
line, regex))
return match.groupdict()
def _expect(client, expected):
"""Reads a line from the client, checks it matches expected line exactly."""
line = client.read_line()
if expected != line:
raise ValueError("Received '{}' but expected '{}'".format(line, expected))
def _hand_string(state_vec):
"""Returns the hand of the to-play player in the state in BlueChip format."""
# See UncontestedBiddingState::InformationStateTensor
# The first 52 elements are whether or not we hold the given card (cards
# ordered suit-by-suit, in ascending order of rank).
suits = []
for suit in reversed(range(4)):
cards = []
for rank in reversed(range(13)):
if state_vec[rank * 4 + suit]:
cards.append(_RANKS[rank])
suits.append(_TRUMP_SUIT[suit] + " " + (" ".join(cards) if cards else "-") +
".")
return " ".join(suits)
def _actions(state_vec):
"""Returns the player actions that have been taken in the game so far."""
# See UncontestedBiddingState::InformationStateTensor
# The first 52 elements are the cards held, then two elements for each
# possible action, specifying which of the two players has taken it (if
# either player has). Then two elements specifying which player we are.
actions = state_vec[52:-2]
return [index // 2 for index, value in enumerate(actions) if value]
def _connect(client, seat, state_vec):
"""Performs the initial handshake with a BlueChip bot."""
client.start()
client_name = _expect_regex(client, _CONNECT)["client_name"]
client.send_line(_SEATED.format(seat=seat, client_name=client_name))
_expect(client, _READY_FOR_TEAMS.format(seat=seat))
client.send_line(_TEAMS)
_expect(client, _READY_TO_START.format(seat=seat))
client.send_line(_START_BOARD)
_expect(client, _READY_FOR_DEAL.format(seat=seat))
client.send_line(_DEAL)
_expect(client, _READY_FOR_CARDS.format(seat=seat))
client.send_line(_CARDS.format(seat=seat, hand=_hand_string(state_vec)))
class BlueChipBridgeBot(pyspiel.Bot):
"""An OpenSpiel bot, wrapping a BlueChip bridge bot implementation."""
def __init__(self, game, player_id, client):
"""Initializes an OpenSpiel `Bot` wrapping a BlueChip-compatible bot.
Args:
game: The OpenSpiel game object, should be an instance of
bridge_uncontested_bidding, without forced actions.
player_id: The id of the player the bot will act as, 0 = West (dealer), 1
= East.
client: The BlueChip bot; must support methods `start`, `read_line`, and
`send_line`.
"""
pyspiel.Bot.__init__(self)
self._game = game
self._player_id = player_id
self._client = client
self._seat = _SEATS[player_id]
self._partner = _SEATS[1 - player_id]
self._left_hand_opponent = _OPPONENTS[player_id]
self._right_hand_opponent = _OPPONENTS[1 - player_id]
self._connected = False
def player_id(self):
return self._player_id
def restart(self):
"""Indicates that the next step may be from a non-sequential state."""
self._connected = False
def restart_at(self, state):
"""Indicates that the next step may be from a non-sequential state."""
self._connected = False
def step(self, state):
"""Returns the action and policy for the bot in this state."""
state_vec = state.information_state_tensor(self.player_id())
# Connect if necessary.
if not self._connected:
_connect(self._client, self._seat, state_vec)
self._connected = True
# Get the actions in the game so far.
actions = _actions(state_vec)
# Unless this is the first or second action in the game, our
# left-hand-opponent will have passed since our last turn.
if len(actions) > 1:
_expect(
self._client,
_READY_FOR_BID.format(
seat=self._seat, other=self._left_hand_opponent))
self._client.send_line(
_OTHER_PLAYER_PASS.format(player=self._left_hand_opponent))
# Unless there aren't any prior actions, our partner will have bid
# or passed since our last turn, and so we need to send partner's action
# to the bot.
if actions:
_expect(self._client,
_READY_FOR_BID.format(seat=self._seat, other=self._partner))
if actions[-1] == _PASS_ACTION:
self._client.send_line(_OTHER_PLAYER_PASS.format(player=self._partner))
else:
self._client.send_line(
_OTHER_PLAYER_BID.format(
player=self._partner, bid=_action_to_string(actions[-1])))
# Unless there aren't any prior actions, our right-hand-opponent will have
# passed since our last turn.
if actions:
_expect(
self._client,
_READY_FOR_BID.format(
seat=self._seat, other=self._right_hand_opponent))
self._client.send_line(
_OTHER_PLAYER_PASS.format(player=self._right_hand_opponent))
# Get our action from the bot.
our_action = _expect_regex(self._client,
_SELF_BID_OR_PASS.format(seat=self._seat))
action = 0 if our_action["pass"] else _string_to_action(our_action["bid"])
return (action, 1.0), action
| open_spiel-master | open_spiel/python/bots/bluechip_bridge_uncontested_bidding.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Wraps third-party bridge bots to make them usable in OpenSpiel.
This code enables OpenSpiel interoperation for bots which implement the BlueChip
bridge protocol. This is widely used, e.g. in the World computer bridge
championships. For a rough outline of the protocol, see:
http://www.bluechipbridge.co.uk/protocol.htm
No formal specification is available. This implementation has been verified
to work correctly with WBridge5.
This bot controls a single player in the full game of bridge, including both the
bidding and play phase. It chooses its actions by invoking an external bot which
plays the full game of bridge. This means that each time the bot is asked for an
action, it sends up to three actions (one for each other player) to the external
bridge bot, and obtains an action in return.
"""
import re
import pyspiel
# Example session:
#
# Recv: Connecting "WBridge5" as ANYPL using protocol version 18
# Send: WEST ("WBridge5") seated
# Recv: WEST ready for teams
# Send: Teams: N/S "silent" E/W "bidders"
# Recv: WEST ready to start
# Send: Start of board
# Recv: WEST ready for deal
# Send: Board number 8. Dealer WEST. Neither vulnerable.
# Recv: WEST ready for cards
# Send: WEST's cards: S A T 9 5. H K 6 5. D Q J 8 7 6. C 7.
# Recv: WEST PASSES
# Recv: WEST ready for NORTH's bid
# Send: EAST PASSES
# Recv: WEST ready for EAST's bid
# Send: EAST bids 1C
# Recv: WEST ready for SOUTH's bid
# The game we support
GAME_STR = "bridge(use_double_dummy_result=False)"
# Template regular expressions for messages we receive
_CONNECT = 'Connecting "(?P<client_name>.*)" as ANYPL using protocol version 18'
_PLAYER_ACTION = ("(?P<seat>NORTH|SOUTH|EAST|WEST) "
"((?P<pass>PASSES)|(?P<dbl>DOUBLES)|(?P<rdbl>REDOUBLES)|bids "
"(?P<bid>[^ ]*)|(plays (?P<play>[23456789tjqka][cdhs])))"
"(?P<alert> Alert.)?")
_READY_FOR_OTHER = ("{seat} ready for "
"(((?P<other>[^']*)'s ((bid)|(card to trick \\d+)))"
"|(?P<dummy>dummy))")
# Templates for fixed messages we receive
_READY_FOR_TEAMS = "{seat} ready for teams"
_READY_TO_START = "{seat} ready to start"
_READY_FOR_DEAL = "{seat} ready for deal"
_READY_FOR_CARDS = "{seat} ready for cards"
_READY_FOR_BID = "{seat} ready for {other}'s bid"
# Templates for messages we send
_SEATED = '{seat} ("{client_name}") seated'
_TEAMS = 'Teams: N/S "north-south" E/W "east-west"'
_START_BOARD = "start of board"
_DEAL = "Board number {board}. Dealer NORTH. Neither vulnerable."
_CARDS = "{seat}'s cards: {hand}"
_OTHER_PLAYER_ACTION = "{player} {action}"
_PLAYER_TO_LEAD = "{seat} to lead"
_DUMMY_CARDS = "Dummy's cards: {}"
# BlueChip bridge protocol message constants
_SEATS = ["NORTH", "EAST", "SOUTH", "WEST"]
_TRUMP_SUIT = ["C", "D", "H", "S", "NT"]
_NUMBER_TRUMP_SUITS = len(_TRUMP_SUIT)
_SUIT = _TRUMP_SUIT[:4]
_NUMBER_SUITS = len(_SUIT)
_RANKS = ["2", "3", "4", "5", "6", "7", "8", "9", "T", "J", "Q", "K", "A"]
_LSUIT = [x.lower() for x in _SUIT]
_LRANKS = [x.lower() for x in _RANKS]
# OpenSpiel action ids
_ACTION_PASS = 52
_ACTION_DBL = 53
_ACTION_RDBL = 54
_ACTION_BID = 55 # First bid, i.e. 1C
def _bid_to_action(action_str):
"""Returns an OpenSpiel action id (an integer) from a BlueChip bid string."""
level = int(action_str[0])
trumps = _TRUMP_SUIT.index(action_str[1:])
return _ACTION_BID + (level - 1) * _NUMBER_TRUMP_SUITS + trumps
def _play_to_action(action_str):
"""Returns an OpenSpiel action id (an integer) from a BlueChip card string."""
rank = _LRANKS.index(action_str[0])
suit = _LSUIT.index(action_str[1])
return rank * _NUMBER_SUITS + suit
def _action_to_string(action):
"""Converts OpenSpiel action id (an integer) to a BlueChip action string.
Args:
action: an integer action id corresponding to a bid.
Returns:
A string in BlueChip format, e.g. 'PASSES' or 'bids 1H', or 'plays ck'.
"""
if action == _ACTION_PASS:
return "PASSES"
elif action == _ACTION_DBL:
return "DOUBLES"
elif action == _ACTION_RDBL:
return "REDOUBLES"
elif action >= _ACTION_BID:
level = str((action - _ACTION_BID) // _NUMBER_TRUMP_SUITS + 1)
trumps = _TRUMP_SUIT[(action - _ACTION_BID) % _NUMBER_TRUMP_SUITS]
return "bids " + level + trumps
else:
rank = action // _NUMBER_SUITS
suit = action % _NUMBER_SUITS
return "plays " + _LRANKS[rank] + _LSUIT[suit]
def _expect_regex(controller, regex):
"""Reads a line from the controller, parses it using the regular expression."""
line = controller.read_line()
match = re.match(regex, line)
if not match:
raise ValueError("Received '{}' which does not match regex '{}'".format(
line, regex))
return match.groupdict()
def _expect(controller, expected):
"""Reads a line from the controller, checks it matches expected line exactly."""
line = controller.read_line()
if expected != line:
raise ValueError("Received '{}' but expected '{}'".format(line, expected))
def _hand_string(cards):
"""Returns the hand of the to-play player in the state in BlueChip format."""
if len(cards) != 13:
raise ValueError("Must have 13 cards")
suits = [[] for _ in range(4)]
for card in reversed(sorted(cards)):
suit = card % 4
rank = card // 4
suits[suit].append(_RANKS[rank])
for i in range(4):
if suits[i]:
suits[i] = _TRUMP_SUIT[i] + " " + " ".join(suits[i]) + "."
else:
suits[i] = _TRUMP_SUIT[i] + " -."
return " ".join(suits)
def _connect(controller, seat):
"""Performs the initial handshake with a BlueChip bot."""
client_name = _expect_regex(controller, _CONNECT)["client_name"]
controller.send_line(_SEATED.format(seat=seat, client_name=client_name))
_expect(controller, _READY_FOR_TEAMS.format(seat=seat))
controller.send_line(_TEAMS)
_expect(controller, _READY_TO_START.format(seat=seat))
def _new_deal(controller, seat, hand, board):
"""Informs a BlueChip bots that there is a new deal."""
controller.send_line(_START_BOARD)
_expect(controller, _READY_FOR_DEAL.format(seat=seat))
controller.send_line(_DEAL.format(board=board))
_expect(controller, _READY_FOR_CARDS.format(seat=seat))
controller.send_line(_CARDS.format(seat=seat, hand=hand))
class BlueChipBridgeBot(pyspiel.Bot):
"""An OpenSpiel bot, wrapping a BlueChip bridge bot implementation."""
def __init__(self, game, player_id, controller_factory):
"""Initializes an OpenSpiel `Bot` wrapping a BlueChip-compatible bot.
Args:
game: The OpenSpiel game object, should be an instance of
`bridge(use_double_dummy_result=false)`.
player_id: The id of the player the bot will act as, 0 = North (dealer), 1
= East, 2 = South, 3 = West.
controller_factory: Callable that returns new BlueChip controllers which
must support methods `read_line` and `send_line`, and `terminate`.
"""
pyspiel.Bot.__init__(self)
if str(game) != GAME_STR:
raise ValueError(f"BlueChipBridgeBot invoked with {game}")
self._game = game
self._player_id = player_id
self._controller_factory = controller_factory
self._seat = _SEATS[player_id]
self._num_actions = 52
self.dummy = None
self.is_play_phase = False
self.cards_played = 0
self._board = 0
self._state = self._game.new_initial_state()
self._controller = None
def player_id(self):
return self._player_id
def restart(self):
"""Indicates that we are starting a new episode."""
# If we already have a fresh state, there is nothing to do.
if not self._state.history():
return
self._num_actions = 52
self.dummy = None
self.is_play_phase = False
self.cards_played = 0
# We didn't see the end of the episode, so the external bot will still
# be expecting it. If we can autoplay other people's actions to the end
# (e.g. everyone passes or players play their last card), then do that.
if not self._state.is_terminal():
state = self._state.clone()
while (not state.is_terminal()
and state.current_player() != self._player_id):
legal_actions = state.legal_actions()
if _ACTION_PASS in legal_actions:
state.apply(_ACTION_PASS)
elif len(legal_actions) == 1:
state.apply_action(legal_actions[0])
if state.is_terminal():
self.inform_state(state)
# Otherwise, we will have to restart the external bot, because
# the protocol makes no provision for this case.
if not self._state.is_terminal():
self._controller.terminate()
self._controller = None
self._state = self._game.new_initial_state()
def _update_for_state(self):
"""Called for all non-chance nodes, whether or not we have to act."""
# Get the actions in the game so far.
actions = self._state.history()
self.is_play_phase = (not self._state.is_terminal() and
max(self._state.legal_actions()) < 52)
self.cards_played = sum(1 if a < 52 else 0 for a in actions) - 52
# If this is the first time we've seen the deal, send our hand.
if len(actions) == 52:
self._board += 1
_new_deal(self._controller, self._seat,
_hand_string(actions[self._player_id:52:4]), self._board)
# Send actions since last `step` call.
for other_player_action in actions[self._num_actions:]:
other = _expect_regex(self._controller,
_READY_FOR_OTHER.format(seat=self._seat))
other_player = other["other"]
if other_player == "Dummy":
other_player = _SEATS[self.dummy]
self._controller.send_line(
_OTHER_PLAYER_ACTION.format(
player=other_player,
action=_action_to_string(other_player_action)))
self._num_actions = len(actions)
# If the opening lead has just been made, give the dummy.
if self.is_play_phase and self.cards_played == 1:
self.dummy = self._state.current_player() ^ 2
if self._player_id != self.dummy:
other = _expect_regex(self._controller,
_READY_FOR_OTHER.format(seat=self._seat))
dummy_cards = _hand_string(actions[self.dummy:52:4])
self._controller.send_line(_DUMMY_CARDS.format(dummy_cards))
# If the episode is terminal, send (fake) timing info.
if self._state.is_terminal():
self._controller.send_line(
"Timing - N/S : this board [1:15], total [0:11:23]. "
"E/W : this board [1:18], total [0:10:23]"
)
self.dummy = None
self.is_play_phase = False
self.cards_played = 0
def inform_action(self, state, player, action):
del player, action
self.inform_state(state)
def inform_state(self, state):
# Connect if we need to.
if self._controller is None:
self._controller = self._controller_factory()
_connect(self._controller, self._seat)
full_history = state.history()
known_history = self._state.history()
if full_history[:len(known_history)] != known_history:
raise ValueError(
"Supplied state is inconsistent with bot's internal state\n"
f"Supplied state:\n{state}\n"
f"Internal state:\n{self._state}\n")
for action in full_history[len(known_history):]:
self._state.apply_action(action)
if not self._state.is_chance_node():
self._update_for_state()
def step(self, state):
"""Returns an action for the given state."""
# Bring the external bot up-to-date.
self.inform_state(state)
# If we're on a new trick, tell the bot it is its turn.
if self.is_play_phase and self.cards_played % 4 == 0:
self._controller.send_line(_PLAYER_TO_LEAD.format(seat=self._seat))
# Get our action from the bot.
our_action = _expect_regex(self._controller, _PLAYER_ACTION)
self._num_actions += 1
if our_action["pass"]:
return _ACTION_PASS
elif our_action["dbl"]:
return _ACTION_DBL
elif our_action["rdbl"]:
return _ACTION_RDBL
elif our_action["bid"]:
return _bid_to_action(our_action["bid"])
elif our_action["play"]:
return _play_to_action(our_action["play"])
def terminate(self):
self._controller.terminate()
self._controller = None
| open_spiel-master | open_spiel/python/bots/bluechip_bridge.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for uniform random bot."""
import random
from absl.testing import absltest
from open_spiel.python.bots import uniform_random
import pyspiel
class BotTest(absltest.TestCase):
def test_policy_is_uniform(self):
game = pyspiel.load_game("leduc_poker")
bots = [
uniform_random.UniformRandomBot(0, random),
uniform_random.UniformRandomBot(1, random)
]
# deal each player a card
state = game.new_initial_state()
state.apply_action(2)
state.apply_action(4)
# p0 starts: uniform from [check, bet]
policy, _ = bots[0].step_with_policy(state)
self.assertCountEqual(policy, [(1, 0.5), (2, 0.5)])
# Afte p0 bets, p1 chooses from [fold, call, raise]
state.apply_action(2)
policy, _ = bots[1].step_with_policy(state)
self.assertCountEqual(policy, [(0, 1 / 3), (1, 1 / 3), (2, 1 / 3)])
def test_no_legal_actions(self):
game = pyspiel.load_game("kuhn_poker")
bot = uniform_random.UniformRandomBot(0, random)
state = game.new_initial_state()
state.apply_action(2) # deal
state.apply_action(1) # deal
state.apply_action(1) # bet
state.apply_action(0) # fold
bot.restart_at(state)
policy, action = bot.step_with_policy(state)
self.assertEqual(policy, [])
self.assertEqual(action, pyspiel.INVALID_ACTION)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/bots/uniform_random_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A bot that chooses uniformly at random from legal actions."""
import pyspiel
class UniformRandomBot(pyspiel.Bot):
"""Chooses uniformly at random from the available legal actions."""
def __init__(self, player_id, rng):
"""Initializes a uniform-random bot.
Args:
player_id: The integer id of the player for this bot, e.g. `0` if acting
as the first player.
rng: A random number generator supporting a `choice` method, e.g.
`np.random`
"""
pyspiel.Bot.__init__(self)
self._player_id = player_id
self._rng = rng
def restart_at(self, state):
pass
def player_id(self):
return self._player_id
def provides_policy(self):
return True
def step_with_policy(self, state):
"""Returns the stochastic policy and selected action in the given state.
Args:
state: The current state of the game.
Returns:
A `(policy, action)` pair, where policy is a `list` of
`(action, probability)` pairs for each legal action, with
`probability = 1/num_actions`
The `action` is selected uniformly at random from the legal actions,
or `pyspiel.INVALID_ACTION` if there are no legal actions available.
"""
legal_actions = state.legal_actions(self._player_id)
if not legal_actions:
return [], pyspiel.INVALID_ACTION
p = 1 / len(legal_actions)
policy = [(action, p) for action in legal_actions]
action = self._rng.choice(legal_actions)
return policy, action
def step(self, state):
return self.step_with_policy(state)[1]
| open_spiel-master | open_spiel/python/bots/uniform_random.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A bot that uses an external agent over the Go Text Protocol."""
import subprocess
import time
import pyspiel
class CommandError(Exception):
"""An error message returned from the GTP bot."""
class GTPBot(pyspiel.Bot):
"""A bot that uses an external agent over GTP to get the action to play.
The Go Text Protocol, GTP, is a text based protocol for communication with
computer Go programs (https://www.lysator.liu.se/~gunnar/gtp/). It has also
been adopted by agents in other games including Hex and Havannah. If you need
to configure your agent in some specific way (eg time/resource limits), you
can use `gtp_cmd` to send raw commands to it.
"""
def __init__(self, game, exec_path, player_colors=("b", "w"),
suppress_stderr=True):
"""Create a Bot that runs an external binary using GTP.
Args:
game: A Game object to pull the configuration (boardsize)
exec_path: A string or list to be passed to popen to launch the binary.
player_colors: A list or tuple of names to be passed to gtp's `play`
command to tell it which player made the move.
suppress_stderr: Whether to suppress stderr from the binary.
"""
pyspiel.Bot.__init__(self)
self._process = subprocess.Popen(
exec_path, bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=(subprocess.DEVNULL if suppress_stderr else None))
self._game = game
params = game.get_parameters()
if "board_size" in params:
self.gtp_cmd("boardsize", str(params["board_size"]))
if len(player_colors) != game.num_players():
raise ValueError(
("player_colors has the wrong number of players for this game. Got "
"{}, expected {}").format(len(player_colors), game.num_players()))
self._player_colors = player_colors
def __del__(self):
self.close()
def close(self):
"""Tell the game to quit and wait for it to do so, killing eventually."""
# We support closing an already closed instance, as __del__ will be called
# when the object is deleted, thus closing a potentially already closed obj.
# The hasattr is in case Popen raises and self._process doesn't exist.
if hasattr(self, "_process") and self._process is not None:
if self.running:
try:
self.gtp_cmd("quit")
except (CommandError, IOError):
pass
self._process.stdin.close()
self._process.stdout.close()
_shutdown_proc(self._process, 3)
self._process = None
def gtp_cmd(self, *args):
"""Send commands directly to the game, and get the response as a string."""
cmd = " ".join([str(a) for a in args]).encode()
self._process.stdin.write(cmd + b"\n")
response = ""
while True:
line = self._process.stdout.readline().decode()
if not line:
raise IOError("Engine closed the connection.")
if line == "\n":
if response:
break # A blank line signifies end of response.
else:
continue # Ignore leading newlines, possibly left from prev response.
response += line
if response.startswith("="):
return response[1:].strip()
else:
raise CommandError(response[1:].strip())
def inform_action(self, state, player_id, action):
"""Let the bot know of the other agent's actions."""
self.gtp_cmd("play", self._player_colors[player_id],
state.action_to_string(action))
def step(self, state):
"""Returns the selected action and steps the internal state forward."""
return state.string_to_action(self.gtp_cmd(
"genmove", self._player_colors[state.current_player()]))
def restart(self):
self.gtp_cmd("clear_board")
def restart_at(self, state):
self.restart()
new_state = self._game.new_initial_state()
for action in state.history():
self.inform_action(new_state, new_state.current_player(),
new_state.action_to_string(action))
new_state.apply_action(action)
@property
def name(self):
"""The name reported by the agent."""
return self.gtp_cmd("name")
@property
def version(self):
"""The version reported by the agent."""
return self.gtp_cmd("version")
@property
def running(self):
"""Whether the agent binary is still running."""
# poll returns None if it's running, otherwise the exit code.
return self._process and (self._process.poll() is None)
@property
def pid(self):
"""The pid of the agent binary."""
return self._process.pid if self.running else None
def _shutdown_proc(p, timeout):
"""Waits for a proc to shut down; terminates or kills it after `timeout`."""
freq = 10 # how often to check per second
for _ in range(1 + timeout * freq):
p.terminate()
ret = p.poll()
if ret is not None:
return ret
time.sleep(1 / freq)
p.kill()
return p.wait()
| open_spiel-master | open_spiel/python/bots/gtp.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for Information Set MCTS bot.
This test mimics the basic C++ tests in algorithms/is_mcts_test.cc.
"""
# pylint: disable=g-unreachable-test-method
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import evaluate_bots
import pyspiel
SEED = 12983641
class ISMCTSBotTest(absltest.TestCase):
def ismcts_play_game(self, game):
evaluator = pyspiel.RandomRolloutEvaluator(1, SEED)
for final_policy_type in [
pyspiel.ISMCTSFinalPolicyType.NORMALIZED_VISIT_COUNT,
pyspiel.ISMCTSFinalPolicyType.MAX_VISIT_COUNT,
pyspiel.ISMCTSFinalPolicyType.MAX_VALUE
]:
bot = pyspiel.ISMCTSBot(SEED, evaluator, 5.0, 1000, -1, final_policy_type,
False, False)
bots = [bot] * game.num_players()
evaluate_bots.evaluate_bots(game.new_initial_state(), bots, np.random)
bot = pyspiel.ISMCTSBot(SEED, evaluator, 5.0, 1000, 10, final_policy_type,
False, False)
bots = [bot] * game.num_players()
evaluate_bots.evaluate_bots(game.new_initial_state(), bots, np.random)
bot = pyspiel.ISMCTSBot(SEED, evaluator, 5.0, 1000, 10, final_policy_type,
True, True)
bots = [bot] * game.num_players()
evaluate_bots.evaluate_bots(game.new_initial_state(), bots, np.random)
def test_basic_sim_kuhn(self):
game = pyspiel.load_game("kuhn_poker")
self.ismcts_play_game(game)
game = pyspiel.load_game("kuhn_poker(players=3)")
self.ismcts_play_game(game)
def test_basic_sim_leduc(self):
game = pyspiel.load_game("leduc_poker")
self.ismcts_play_game(game)
game = pyspiel.load_game("leduc_poker(players=3)")
self.ismcts_play_game(game)
if __name__ == "__main__":
np.random.seed(SEED)
absltest.main()
| open_spiel-master | open_spiel/python/bots/is_mcts_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.bots.bluechip_bridge_uncontested_bidding."""
from absl.testing import absltest
from open_spiel.python.bots import bluechip_bridge
import pyspiel
class BluechipBridgeWrapperTest(absltest.TestCase):
def test_complete_deal_east(self):
# Plays a complete deal, with the mock external bot playing East.
# The deal is as follows:
#
# Vul: None
# S AKJ8
# H 4
# D JT9532
# C 32
# S 3 S Q9
# H KQJ8762 H AT5
# D K4 D A87
# C KQ4 C AJT96
# S T76542
# H 93
# D Q6
# C 875
#
# West North East South
# Pass 1N Pass
# 2D Pass 2H Pass
# 3S Dbl 4C Pass
# 4D Pass 4N Pass
# 5D Pass 6H Pass
# Pass Pass
#
# N E S W N E S
# S7 S3 SK S9
# DJ D8 D6 DK
# H2 H4 HT H9
# H5 H3 H6 C3
# C4 C2 CT C5
# C6 C7 CQ D2
# CK D3 CJ C8
# D4 D5 DA DQ
# C9 S2 H7 S8
# HK SJ HA S4
# CA S5 H8 D9
# HQ DT D7 S6
# HJ SA SQ ST
#
# Declarer tricks: 12
game = pyspiel.load_game('bridge(use_double_dummy_result=false)')
mock_client = absltest.mock.Mock(
**{
'read_line.side_effect': [
'Connecting "WBridge5" as ANYPL using protocol version 18',
'EAST ready for teams',
'EAST ready to start',
'EAST ready for deal',
'EAST ready for cards',
"EAST ready for NORTH's bid",
'EAST bids 1NT',
"EAST ready for SOUTH's bid",
"EAST ready for WEST's bid",
"EAST ready for NORTH's bid",
'EAST bids 2H',
"EAST ready for SOUTH's bid",
"EAST ready for WEST's bid",
"EAST ready for NORTH's bid",
'EAST bids 4C Alert.',
"EAST ready for SOUTH's bid",
"EAST ready for WEST's bid",
"EAST ready for NORTH's bid",
'EAST bids 4NT',
"EAST ready for SOUTH's bid",
"EAST ready for WEST's bid",
"EAST ready for NORTH's bid",
'EAST bids 6H',
"EAST ready for SOUTH's bid",
"EAST ready for WEST's bid",
"EAST ready for NORTH's bid",
"EAST ready for SOUTH's card to trick 1",
'EAST ready for dummy',
'WEST plays 3s',
"EAST ready for NORTH's card to trick 1",
'EAST plays 9s',
"EAST ready for NORTH's card to trick 2",
'EAST plays 8d',
"EAST ready for SOUTH's card to trick 2",
'WEST plays kd',
'WEST plays 2h',
"EAST ready for NORTH's card to trick 3",
'EAST plays th',
"EAST ready for SOUTH's card to trick 3",
'EAST plays 5h',
"EAST ready for SOUTH's card to trick 4",
'WEST plays 6h',
"EAST ready for NORTH's card to trick 4",
'WEST plays 4c',
"EAST ready for NORTH's card to trick 5",
'EAST plays tc',
"EAST ready for SOUTH's card to trick 5",
'EAST plays 6c',
"EAST ready for SOUTH's card to trick 6",
'WEST plays qc',
"EAST ready for NORTH's card to trick 6",
'WEST plays kc',
"EAST ready for NORTH's card to trick 7",
'EAST plays jc',
"EAST ready for SOUTH's card to trick 7",
'WEST plays 4d',
"EAST ready for NORTH's card to trick 8",
'EAST plays ad',
"EAST ready for SOUTH's card to trick 8",
'EAST plays 9c',
"EAST ready for SOUTH's card to trick 9",
'WEST plays 7h',
"EAST ready for NORTH's card to trick 9",
'WEST plays kh',
"EAST ready for NORTH's card to trick 10",
'EAST plays ah',
"EAST ready for SOUTH's card to trick 10",
'EAST plays ac',
"EAST ready for SOUTH's card to trick 11",
'WEST plays 8h',
"EAST ready for NORTH's card to trick 11",
'WEST plays qh',
"EAST ready for NORTH's card to trick 12",
'EAST plays 7d',
"EAST ready for SOUTH's card to trick 12",
'WEST plays jh',
"EAST ready for NORTH's card to trick 13",
'EAST plays qs',
]
})
bot = bluechip_bridge.BlueChipBridgeBot(game, 1, lambda: mock_client)
state = game.new_initial_state()
history = [
33, 25, 3, 44, 47, 28, 23, 46, 1, 43, 30, 26, 29, 48, 24, 42, 13, 21,
17, 8, 5, 34, 6, 7, 37, 49, 11, 38, 51, 32, 20, 9, 0, 14, 35, 22, 10,
50, 15, 45, 39, 16, 12, 18, 27, 31, 41, 40, 4, 36, 19, 2, 52, 59, 52,
61, 52, 62, 52, 68, 53, 70, 52, 71, 52, 74, 52, 76, 52, 82, 52, 52, 52,
23, 7, 47, 31, 37, 25, 17, 45, 2, 10, 34, 30, 14, 6, 18, 4, 8, 0, 32,
12, 16, 20, 40, 1, 44, 5, 36, 24, 9, 13, 49, 41, 28, 3, 22, 27, 46, 39,
50, 11, 48, 15, 26, 29, 42, 33, 21, 19, 38, 51, 43, 35
]
# Check the bot provides the expected actions
for action in history:
if state.current_player() == 1:
bot_action = bot.step(state)
self.assertEqual(action, bot_action)
state.apply_action(action)
# Check the session went as expected; send_line calls are us sending
# data to the (mock) external bot.
mock_client.assert_has_calls([
absltest.mock.call.read_line(),
absltest.mock.call.send_line('EAST ("WBridge5") seated'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line(
'Teams: N/S "north-south" E/W "east-west"'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('start of board'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line(
'Board number 1. Dealer NORTH. Neither vulnerable.'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line(
"EAST's cards: C A J T 9 6. D A 8 7. H A T 5. S Q 9."),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('WEST bids 2D'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('WEST bids 3S'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH DOUBLES'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('WEST bids 4D'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('WEST bids 5D'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('WEST PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH PASSES'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 7s'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line(
"Dummy's cards: C K Q 4. D K 4. H K Q J 8 7 6 2. S 3."),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays ks'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays jd'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 6d'),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays 4h'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 9h'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 3h'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays 3c'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays 2c'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 5c'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 7c'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays 2d'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays 3d'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 8c'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays 5d'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays qd'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 2s'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays 8s'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays js'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 4s'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 5s'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays 9d'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays td'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('SOUTH plays 6s'),
absltest.mock.call.send_line('EAST to lead'),
absltest.mock.call.read_line(),
absltest.mock.call.read_line(),
absltest.mock.call.send_line('NORTH plays as'),
absltest.mock.call.read_line(),
])
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/bots/bluechip_bridge_test.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements a generic environment for iterated normal form games.
It does so wuth automatic vectorization. Along with the environment, it also
provides pre-defined factory functions for common games such as the iterated
prisoners dilemma and the iterated matching pennies.
"""
import numpy as np
from pyspiel import PlayerId
from open_spiel.python.rl_environment import Environment
from open_spiel.python.rl_environment import StepType
from open_spiel.python.rl_environment import TimeStep
class IteratedMatrixGame(Environment):
"""Environment for iterated normal form games.
Supports automatic vectorization.
"""
def __init__(
self,
payoff_matrix: np.ndarray,
iterations: int,
batch_size=1,
include_remaining_iterations=True,
):
# pylint: disable=super-init-not-called
self._payoff_matrix = np.array(payoff_matrix, dtype=np.float32)
self._iterations = iterations
self._num_players = payoff_matrix.ndim - 1
self._batch_size = batch_size
self._include_remaining_iterations = include_remaining_iterations
self._t = 0
self._actions = np.arange(
np.prod(self.action_spec()['num_actions'])
).reshape(*[payoff_matrix.shape[p] for p in range(self._num_players)])
def one_hot(self, x, n):
return np.eye(n)[x]
@property
def num_players(self):
return self._num_players
def observation_spec(self):
info_state_spec, legal_actions_spec = [], []
for i in range(self._num_players):
num_actions = np.prod(self._payoff_matrix.shape[:-1]) + 1
if self._include_remaining_iterations:
num_actions += 1
info_state_spec.append([num_actions])
legal_actions_spec.append(self._payoff_matrix.shape[i])
return {
'info_state': tuple(info_state_spec),
'legal_actions': tuple(legal_actions_spec),
'current_player': (),
}
def action_spec(self):
num_actions, mins, maxs = [], [], []
for i in range(self._num_players):
num_actions.append(self._payoff_matrix.shape[i])
mins.append(0)
maxs.append(self._payoff_matrix.shape[i] - 1)
return {
'num_actions': tuple(num_actions),
'min': tuple(mins),
'max': tuple(maxs),
'dtype': int,
}
def step(self, actions: np.ndarray):
if actions.ndim == 1:
actions = actions[None, :]
payoffs = self._payoff_matrix[tuple(actions.T)]
s1 = self.one_hot(
self._actions[tuple(actions.T)] + 1, n=np.max(self._actions) + 2
)
s2 = self.one_hot(
self._actions[tuple(actions[..., ::-1].T)] + 1,
n=np.max(self._actions) + 2,
)
rewards = [
np.squeeze(p)
for p in np.split(
payoffs, indices_or_sections=self._num_players, axis=1
)
]
discounts = [np.ones_like(r) for r in rewards]
if self._t == self._iterations - 1:
step_type = StepType.LAST
else:
step_type = StepType.MID
self._t += 1
remaining_iters = float((self._iterations - self._t)) / self._iterations
info_state = [s1, s2]
if self._include_remaining_iterations:
info_state = np.concatenate(
[
info_state,
np.full((self._batch_size, 1), fill_value=remaining_iters),
],
axis=-1,
)
legal_actions = self._get_legal_actions()
return TimeStep(
observations={
'info_state': info_state,
'legal_actions': legal_actions,
'batch_size': actions.shape[0],
'current_player': PlayerId.SIMULTANEOUS,
},
rewards=rewards,
discounts=discounts,
step_type=step_type,
)
def _get_legal_actions(self):
legal_actions = []
for p in range(self.num_players):
actions = np.arange(self.action_spec()['num_actions'][p])
legal_actions.append([actions] * self._batch_size)
return np.array(legal_actions)
def reset(self):
self._t = 0
info_state = np.zeros((
self.num_players,
self._batch_size,
*self.observation_spec()['info_state'][0],
))
info_state[..., 0] = 1.0
if self._include_remaining_iterations:
info_state[..., -1] = 1.0
rewards = np.squeeze(np.zeros((self.num_players, self._batch_size)))
discounts = np.squeeze(np.ones((self.num_players, self._batch_size)))
return TimeStep(
observations={
'info_state': [
np.squeeze(s).astype(np.float32) for s in info_state
],
'legal_actions': self._get_legal_actions(),
'batch_size': self._batch_size,
'current_player': PlayerId.SIMULTANEOUS,
},
rewards=[np.squeeze(a).astype(np.float32) for a in rewards],
discounts=[np.squeeze(a).astype(np.float32) for a in discounts],
step_type=StepType.FIRST,
)
def IteratedPrisonersDilemma(iterations: int, batch_size=1):
return IteratedMatrixGame(
payoff_matrix=np.array([[[-1, -1], [-3, 0]], [[0, -3], [-2, -2]]]),
iterations=iterations,
batch_size=batch_size,
include_remaining_iterations=False,
)
def IteratedMatchingPennies(iterations: int, batch_size=1):
return IteratedMatrixGame(
payoff_matrix=np.array([[[1, -1], [-1, 1]], [[-1, 1], [1, -1]]]),
iterations=iterations,
batch_size=batch_size,
include_remaining_iterations=False,
)
| open_spiel-master | open_spiel/python/environments/iterated_matrix_game.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.environment.catch."""
import random
from absl.testing import absltest
from open_spiel.python import rl_environment
from open_spiel.python.environments import catch
def _select_random_legal_action(time_step):
cur_legal_actions = time_step.observations["legal_actions"][0]
action = random.choice(cur_legal_actions)
return action
class CatchEnvTest(absltest.TestCase):
def test_obs_spec(self):
env = catch.Environment()
obs_specs = env.observation_spec()
self.assertLen(obs_specs, 3)
self.assertCountEqual(obs_specs.keys(),
["current_player", "info_state", "legal_actions"])
def test_action_spec(self):
env = catch.Environment()
action_spec = env.action_spec()
self.assertLen(action_spec, 4)
self.assertCountEqual(action_spec.keys(),
["dtype", "max", "min", "num_actions"])
self.assertEqual(action_spec["num_actions"], 3)
self.assertEqual(action_spec["dtype"], int)
def test_action_interfaces(self):
env = catch.Environment(height=2)
time_step = env.reset()
# Singleton list works
action_list = [0]
time_step = env.step(action_list)
self.assertEqual(time_step.step_type, rl_environment.StepType.MID)
# Integer works
action_int = 0
time_step = env.step(action_int)
self.assertEqual(time_step.step_type, rl_environment.StepType.LAST)
def test_many_runs(self):
random.seed(123)
for _ in range(20):
height = random.randint(2, 10)
env = catch.Environment(height=height)
time_step = env.reset()
self.assertEqual(time_step.step_type, rl_environment.StepType.FIRST)
self.assertIsNone(time_step.rewards)
action_int = _select_random_legal_action(time_step)
time_step = env.step(action_int)
self.assertEqual(time_step.step_type, rl_environment.StepType.MID)
self.assertEqual(time_step.rewards, [0])
for _ in range(1, height):
action_int = _select_random_legal_action(time_step)
time_step = env.step(action_int)
self.assertEqual(time_step.step_type, rl_environment.StepType.LAST)
self.assertIn(time_step.rewards[0], [-1, 0, 1])
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/environments/catch_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/environments/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.environment.cliff_walking."""
import random
from absl.testing import absltest
from open_spiel.python import rl_environment
from open_spiel.python.environments import cliff_walking
def _select_random_legal_action(time_step):
cur_legal_actions = time_step.observations["legal_actions"][0]
action = random.choice(cur_legal_actions)
return action
class CliffWalkingEnvTest(absltest.TestCase):
def test_obs_spec(self):
env = cliff_walking.Environment()
obs_specs = env.observation_spec()
self.assertLen(obs_specs, 3)
self.assertCountEqual(obs_specs.keys(),
["current_player", "info_state", "legal_actions"])
self.assertEqual(obs_specs["info_state"], (2,))
def test_action_spec(self):
env = cliff_walking.Environment()
action_spec = env.action_spec()
self.assertLen(action_spec, 4)
self.assertCountEqual(action_spec.keys(),
["dtype", "max", "min", "num_actions"])
self.assertEqual(action_spec["num_actions"], 4)
self.assertEqual(action_spec["dtype"], int)
def test_action_interfaces(self):
env = cliff_walking.Environment()
time_step = env.reset()
# Singleton list works
action_list = [cliff_walking.UP]
time_step = env.step(action_list)
self.assertEqual(time_step.step_type, rl_environment.StepType.MID)
# Integer works
action_int = cliff_walking.UP
time_step = env.step(action_int)
self.assertEqual(time_step.step_type, rl_environment.StepType.MID)
def test_many_runs(self):
random.seed(1234)
for _ in range(30):
height = random.randint(3, 10)
width = random.randint(3, 10)
env = cliff_walking.Environment(height=height, width=width)
time_step = env.reset()
self.assertEqual(time_step.step_type, rl_environment.StepType.FIRST)
self.assertIsNone(time_step.rewards)
action_int = cliff_walking.UP
time_step = env.step(action_int)
self.assertEqual(time_step.step_type, rl_environment.StepType.MID)
self.assertEqual(time_step.rewards, [-1.0])
action_int = cliff_walking.RIGHT
for _ in range(1, width):
time_step = env.step(action_int)
self.assertEqual(time_step.step_type, rl_environment.StepType.MID)
self.assertEqual(time_step.rewards, [-1.0])
action_int = cliff_walking.DOWN
time_step = env.step(action_int)
self.assertEqual(time_step.step_type, rl_environment.StepType.LAST)
self.assertEqual(time_step.rewards, [-1.0])
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/environments/cliff_walking_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A cliff walking single agent reinforcement learning environment."""
import numpy as np
from open_spiel.python import rl_environment
# Actions
RIGHT, UP, LEFT, DOWN = range(4)
class Environment(object):
r"""A cliff walking reinforcement learning environment.
This is a deterministic environment that can be used to test RL algorithms.
Note there are *no illegal moves* in this environment--if the agent is on the
edge of the cliff and takes an action which would yield an invalid position,
the action is ignored (as if there were walls surrounding the cliff).
Cliff example for height=3 and width=5:
| | | | | |
| | | | | |
| S | x | x | x | G |
where `S` is always the starting position, `G` is always the goal and `x`
represents the zone of high negative reward to be avoided. For this instance,
the optimum policy is depicted as follows:
| | | | | |
|-->|-->|-->|-->|\|/|
|/|\| x | x | x | G |
yielding a reward of -6 (minus 1 per time step).
See pages 132 of Rich Sutton's book for details:
http://www.incompleteideas.net/book/bookdraft2018mar21.pdf
"""
def __init__(self, height=4, width=8, discount=1.0, max_t=100):
if height < 2 or width < 3:
raise ValueError("height must be >= 2 and width >= 3.")
self._height = height
self._width = width
self._legal_actions = [RIGHT, UP, LEFT, DOWN]
self._should_reset = True
self._max_t = max_t
# Discount returned at non-initial steps.
self._discounts = [discount] * self.num_players
def reset(self):
"""Resets the environment."""
self._should_reset = False
self._time_counter = 0
self._state = np.array([self._height - 1, 0])
observations = {
"info_state": [self._state.copy()],
"legal_actions": [self._legal_actions],
"current_player": 0,
}
return rl_environment.TimeStep(
observations=observations,
rewards=None,
discounts=None,
step_type=rl_environment.StepType.FIRST)
def step(self, actions):
"""Updates the environment according to `actions` and returns a `TimeStep`.
Args:
actions: A singleton list with an integer, or an integer, representing the
action the agent took.
Returns:
A `rl_environment.TimeStep` namedtuple containing:
observation: singleton list of dicts containing player observations,
each corresponding to `observation_spec()`.
reward: singleton list containing the reward at this timestep, or None
if step_type is `rl_environment.StepType.FIRST`.
discount: singleton list containing the discount in the range [0, 1], or
None if step_type is `rl_environment.StepType.FIRST`.
step_type: A `rl_environment.StepType` value.
"""
if self._should_reset:
return self.reset()
self._time_counter += 1
if isinstance(actions, list):
action = actions[0]
elif isinstance(actions, int):
action = actions
else:
raise ValueError("Action not supported.", actions)
dx = 0
dy = 0
if action == LEFT:
dx -= 1
elif action == RIGHT:
dx += 1
if action == UP:
dy -= 1
elif action == DOWN:
dy += 1
self._state += np.array([dy, dx])
self._state = self._state.clip(0, [self._height - 1, self._width - 1])
done = self._is_pit(self._state) or self._is_goal(self._state)
done = done or self._time_counter >= self._max_t
# Return observation
step_type = (
rl_environment.StepType.LAST if done else rl_environment.StepType.MID)
self._should_reset = step_type == rl_environment.StepType.LAST
observations = {
"info_state": [self._state.copy()],
"legal_actions": [self._legal_actions],
"current_player": 0,
}
return rl_environment.TimeStep(
observations=observations,
rewards=[self._get_reward(self._state)],
discounts=self._discounts,
step_type=step_type)
def _is_goal(self, pos):
"""Check if position is bottom right corner of grid."""
return pos[0] == self._height - 1 and pos[1] == self._width - 1
def _is_pit(self, pos):
"""Check if position is in bottom row between start and goal."""
return (pos[1] > 0 and pos[1] < self._width - 1 and
pos[0] == self._height - 1)
def _get_reward(self, pos):
if self._is_pit(pos):
return -100.0
else:
return -1.0
def observation_spec(self):
"""Defines the observation provided by the environment.
Each dict member will contain its expected structure and shape.
Returns:
A specification dict describing the observation fields and shapes.
"""
return dict(
info_state=tuple([2]),
legal_actions=(len(self._legal_actions),),
current_player=(),
)
def action_spec(self):
"""Defines action specifications.
Specifications include action boundaries and their data type.
Returns:
A specification dict containing action properties.
"""
return dict(
num_actions=len(self._legal_actions),
min=min(self._legal_actions),
max=max(self._legal_actions),
dtype=int,
)
@property
def num_players(self):
return 1
@property
def is_turn_based(self):
return False
| open_spiel-master | open_spiel/python/environments/cliff_walking.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Catch reinforcement learning environment."""
import collections
import numpy as np
from open_spiel.python import rl_environment
# Actions
NOOP = 0
LEFT = 1
RIGHT = 2
_Point = collections.namedtuple("Point", ["x", "y"])
class Environment(object):
"""A catch reinforcement learning environment.
The implementation considers illegal actions: trying to move the paddle in the
wall direction when next to a wall will incur in an invalid action and an
error will be purposely raised.
"""
def __init__(self, discount=1.0, width=5, height=10, seed=None):
self._rng = np.random.RandomState(seed)
self._width = width
self._height = height
self._should_reset = True
self._num_actions = 3
# Discount returned at non-initial steps.
self._discounts = [discount] * self.num_players
def reset(self):
"""Resets the environment."""
self._should_reset = False
self._ball_pos = _Point(x=self._rng.randint(0, self._width - 1), y=0)
self._paddle_pos = _Point(
x=self._rng.randint(0, self._width - 1), y=self._height - 1)
legal_actions = [NOOP]
if self._paddle_pos.x > 0:
legal_actions.append(LEFT)
if self._paddle_pos.x < self._width - 1:
legal_actions.append(RIGHT)
observations = {
"info_state": [self._get_observation()],
"legal_actions": [legal_actions],
"current_player": 0,
}
return rl_environment.TimeStep(
observations=observations,
rewards=None,
discounts=None,
step_type=rl_environment.StepType.FIRST)
def step(self, actions):
"""Updates the environment according to `actions` and returns a `TimeStep`.
Args:
actions: A singleton list with an integer, or an integer, representing the
action the agent took.
Returns:
A `rl_environment.TimeStep` namedtuple containing:
observation: singleton list of dicts containing player observations,
each corresponding to `observation_spec()`.
reward: singleton list containing the reward at this timestep, or None
if step_type is `rl_environment.StepType.FIRST`.
discount: singleton list containing the discount in the range [0, 1], or
None if step_type is `rl_environment.StepType.FIRST`.
step_type: A `rl_environment.StepType` value.
"""
if self._should_reset:
return self.reset()
if isinstance(actions, list):
action = actions[0]
elif isinstance(actions, int):
action = actions
else:
raise ValueError("Action not supported.", actions)
# Update paddle position
x, y = self._paddle_pos.x, self._paddle_pos.y
if action == LEFT:
x -= 1
elif action == RIGHT:
x += 1
elif action != NOOP:
raise ValueError("unrecognized action ", action)
assert 0 <= x < self._width, (
"Illegal action detected ({}), new state: ({},{})".format(action, x, y))
self._paddle_pos = _Point(x, y)
# Update ball position
x, y = self._ball_pos.x, self._ball_pos.y
if y == self._height - 1:
done = True
reward = 1.0 if x == self._paddle_pos.x else -1.0
else:
done = False
y += 1
reward = 0.0
self._ball_pos = _Point(x, y)
# Return observation
step_type = (
rl_environment.StepType.LAST if done else rl_environment.StepType.MID)
self._should_reset = step_type == rl_environment.StepType.LAST
legal_actions = [NOOP]
if self._paddle_pos.x > 0:
legal_actions.append(LEFT)
if self._paddle_pos.x < self._width - 1:
legal_actions.append(RIGHT)
observations = {
"info_state": [self._get_observation()],
"legal_actions": [legal_actions],
"current_player": 0,
}
return rl_environment.TimeStep(
observations=observations,
rewards=[reward],
discounts=self._discounts,
step_type=step_type)
def _get_observation(self):
board = np.zeros((self._height, self._width), dtype=np.float32)
board[self._ball_pos.y, self._ball_pos.x] = 1.0
board[self._paddle_pos.y, self._paddle_pos.x] = 1.0
return board.flatten()
def observation_spec(self):
"""Defines the observation provided by the environment.
Each dict member will contain its expected structure and shape.
Returns:
A specification dict describing the observation fields and shapes.
"""
return dict(
info_state=tuple([self._height * self._width]),
legal_actions=(self._num_actions,),
current_player=(),
)
def action_spec(self):
"""Defines action specifications.
Specifications include action boundaries and their data type.
Returns:
A specification dict containing action properties.
"""
return dict(num_actions=self._num_actions, min=0, max=2, dtype=int)
@property
def num_players(self):
return 1
@property
def is_turn_based(self):
return False
| open_spiel-master | open_spiel/python/environments/catch.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.double_oracle."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import double_oracle
import pyspiel
class DoubleOracleTest(absltest.TestCase):
def test_rock_paper_scissors(self):
game = pyspiel.load_matrix_game("matrix_rps")
solver = double_oracle.DoubleOracleSolver(game)
solution, iteration, value = solver.solve(initial_strategies=[[0], [0]])
np.testing.assert_allclose(solution[0], np.ones(3)/3.)
np.testing.assert_allclose(solution[1], np.ones(3)/3.)
self.assertEqual(iteration, 3)
self.assertAlmostEqual(value, 0.0)
def test_single_step(self):
game = pyspiel.load_matrix_game("matrix_rps")
solver = double_oracle.DoubleOracleSolver(game)
solver.subgame_strategies = [[0], [0]]
best_response, best_response_utility = solver.step()
self.assertListEqual(best_response, [1, 1])
self.assertListEqual(best_response_utility, [1.0, 1.0])
def test_kuhn_poker(self):
game = pyspiel.extensive_to_matrix_game(pyspiel.load_game("kuhn_poker"))
solver = double_oracle.DoubleOracleSolver(game)
solution, iteration, value = solver.solve(initial_strategies=[[0], [0]])
# check if solution is Nash
exp_utilty = solution[0] @ solver.payoffs @ solution[1]
self.assertAlmostEqual(max(solver.payoffs[0] @ solution[1]), exp_utilty[0])
self.assertAlmostEqual(max(solution[0] @ solver.payoffs[1]), exp_utilty[1])
self.assertEqual(iteration, 8)
self.assertAlmostEqual(value, 0.0)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/double_oracle_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.