python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that Mean Field Games are implemented properly.
These tests are intended to help developers to write mean field games that
satisfy most of the unspecified constraints assumed by the following algorithms:
- python/mfg/algorithms/policy_value.py
- python/mfg/algorithms/nash_conv.py
- python/mfg/algorithms/mirror_descent.py
- python/mfg/algorithms/fictitious_play.py
- python/mfg/algorithms/distribution.py
- python/mfg/algorithms/best_response_value.py
- python/rl_environment.py
These tests are not exhaustive and will be updated with time.
"""
import random
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import get_all_states
from open_spiel.python.mfg import games as mfg_games # pylint:disable=unused-import
from open_spiel.python.mfg.algorithms import distribution
import pyspiel
FLAGS = flags.FLAGS
# Use a small depth limit to keep the length of the test reasonable.
flags.DEFINE_integer(
'get_all_states_depth_limit', 10,
'Depth limit of getting all the states (-1 for unlimited)')
flags.DEFINE_integer('rl_env_simulations', 10,
'Number of simulations for the RL environment tests')
def _get_next_states(state, next_states, to_string):
"""Extract non-chance states for a subgame into the all_states dict."""
is_mean_field = state.current_player() == pyspiel.PlayerId.MEAN_FIELD
if state.is_chance_node():
# Add only if not already present
for action, _ in state.chance_outcomes():
next_state = state.child(action)
state_str = to_string(next_state)
if state_str not in next_states:
next_states[state_str] = next_state
if is_mean_field:
support = state.distribution_support()
next_state = state.clone()
support_length = len(support)
# update with a dummy distribution
next_state.update_distribution(
[1.0 / support_length for _ in range(support_length)])
state_str = to_string(next_state)
if state_str not in next_states:
next_states[state_str] = next_state
if int(state.current_player()) >= 0:
for action in state.legal_actions():
next_state = state.child(action)
state_str = to_string(next_state)
if state_str not in next_states:
next_states[state_str] = next_state
def _next_states(states, to_string):
next_states = {}
for state in states:
_get_next_states(state, next_states, to_string)
return set(next_states.keys()), set(next_states.values())
def type_from_states(states):
"""Get node type of a list of states and assert they are the same."""
types = [state.get_type() for state in states]
assert len(set(types)) == 1
return types[0]
class FiniteHorizonTest(parameterized.TestCase):
@parameterized.parameters(
{'game_name': 'python_mfg_crowd_modelling'},
{'game_name': 'mfg_crowd_modelling'},
{'game_name': 'mfg_garnet'},
{'game_name': 'mfg_crowd_modelling_2d'},
{'game_name': 'python_mfg_periodic_aversion'},
{'game_name': 'python_mfg_predator_prey'},
)
def test_is_finite_horizon(self, game_name):
"""Check that the game has no loop."""
game = pyspiel.load_game(game_name)
states = set(game.new_initial_states())
def to_string(s):
return s.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID)
all_states_key = set(to_string(state) for state in states)
while type_from_states(states) != pyspiel.StateType.TERMINAL:
new_states_key, states = _next_states(states, to_string)
self.assertEmpty(all_states_key.intersection(new_states_key))
all_states_key.update(new_states_key)
@parameterized.parameters(
{'game_name': 'python_mfg_crowd_modelling'},
{'game_name': 'mfg_crowd_modelling'},
{'game_name': 'mfg_garnet'},
{'game_name': 'mfg_crowd_modelling_2d'},
{'game_name': 'python_mfg_periodic_aversion'},
{'game_name': 'python_mfg_predator_prey'},
)
def test_has_at_least_an_action(self, game_name):
"""Check that all population's state have at least one action."""
game = pyspiel.load_game(game_name)
def to_string(s):
return s.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID)
states = get_all_states.get_all_states(
game,
depth_limit=FLAGS.get_all_states_depth_limit,
include_terminals=False,
include_chance_states=False,
include_mean_field_states=False,
to_string=to_string)
for state in states.values():
self.assertNotEmpty(state.legal_actions())
@parameterized.parameters(
{'game_name': 'python_mfg_crowd_modelling'},
{'game_name': 'mfg_crowd_modelling'},
{'game_name': 'mfg_garnet'},
{'game_name': 'mfg_crowd_modelling_2d'},
{'game_name': 'python_mfg_periodic_aversion'},
{'game_name': 'python_mfg_predator_prey'},
)
def test_rl_environment(self, game_name):
"""Check that the RL environment runs for a few trajectories."""
game = pyspiel.load_game(game_name)
uniform_policy = policy.UniformRandomPolicy(game)
mfg_dist = distribution.DistributionPolicy(game, uniform_policy)
envs = [
rl_environment.Environment(
game, mfg_distribution=mfg_dist, mfg_population=p)
for p in range(game.num_players())
]
for p, env in enumerate(envs):
for _ in range(FLAGS.rl_env_simulations):
time_step = env.reset()
while not time_step.last():
a = random.choice(time_step.observations['legal_actions'][p])
time_step = env.step([a])
env = envs[0]
self.assertEqual(env.mfg_distribution, mfg_dist)
# Update the distribution.
new_mfg_dist = distribution.DistributionPolicy(game, uniform_policy)
env.update_mfg_distribution(new_mfg_dist)
self.assertEqual(env.mfg_distribution, new_mfg_dist)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/tests/mfg_implementation_test/mfg_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A tabular representation of a distribution for a game."""
from typing import Dict, Optional
from open_spiel.python.mfg import distribution
import pyspiel
DistributionDict = Dict[str, float]
class TabularDistribution(distribution.ParametricDistribution):
"""Distribution that uses a dictionary to store the values of the states."""
def __init__(self, game: pyspiel.Game):
self._distribution: DistributionDict = {}
super().__init__(game)
def value(self, state: pyspiel.State) -> float:
return self.value_str(self.state_to_str(state))
def value_str(self,
state_str: str,
default_value: Optional[float] = None) -> float:
"""Returns the probability of the distribution on the state string given.
Args:
state_str: A string.
default_value: If not None, return this value if the state is not in the
support of the distribution.
Returns:
A `float`.
Raises:
ValueError: If the state has not been seen by the distribution and no
default value has been passed to the method.
"""
if default_value is None:
try:
return self._distribution[state_str]
except KeyError as e:
raise ValueError(
f"Distribution not computed for state {state_str}") from e
return self._distribution.get(state_str, default_value)
def get_params(self) -> DistributionDict:
return self._distribution
def set_params(self, params: DistributionDict):
self._distribution = params
def state_to_str(self, state: pyspiel.State) -> str:
# TODO(author15): Consider switching to
# state.mean_field_population(). For now, this does not matter in
# practice since games don't have different observation strings for
# different player IDs.
return state.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID)
@property
def distribution(self) -> DistributionDict:
return self._distribution
| open_spiel-master | open_spiel/python/mfg/tabular_distribution.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/mfg/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Representation of a distribution for a game.
This is a standard representation for passing distributions into algorithms,
with currently the following implementations:
The main way of using a distribution is to call `value(state)`.
"""
import abc
from typing import Any, Optional
import pyspiel
class Distribution(abc.ABC):
"""Base class for distributions.
This represents a probability distribution over the states of a game.
Attributes:
game: the game for which this distribution is derives
"""
def __init__(self, game: pyspiel.Game):
"""Initializes a distribution.
Args:
game: the game for which this distribution is derives
"""
self.game = game
@abc.abstractmethod
def value(self, state: pyspiel.State) -> float:
"""Returns the probability of the distribution on the state.
Args:
state: A `pyspiel.State` object.
Returns:
A `float`.
"""
raise NotImplementedError()
@abc.abstractmethod
def value_str(self,
state_str: str,
default_value: Optional[float] = None) -> float:
"""Returns the probability of the distribution on the state string given.
Args:
state_str: A string.
default_value: If not None, return this value if the state is not in the
support of the distribution.
Returns:
A `float`.
"""
raise NotImplementedError()
def __call__(self, state: pyspiel.State) -> float:
"""Turns the distribution into a callable.
Args:
state: The current state of the game.
Returns:
Float: probability.
"""
return self.value(state)
class ParametricDistribution(Distribution):
"""A parametric distribution."""
@abc.abstractmethod
def get_params(self) -> Any:
"""Returns the distribution parameters."""
@abc.abstractmethod
def set_params(self, params: Any):
"""Sets the distribution parameters."""
| open_spiel-master | open_spiel/python/mfg/distribution.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MFG utilities."""
import pickle
from open_spiel.python.utils import gfile
from open_spiel.python.mfg import distribution
def save_parametric_distribution(dist: distribution.ParametricDistribution,
filename: str):
"""Saves the parametric distribution to a Pickle file."""
with gfile.Open(filename, "wb") as f:
pickle.dump(dist.get_params(), f, protocol=pickle.DEFAULT_PROTOCOL)
| open_spiel-master | open_spiel/python/mfg/utils.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Representation of a value for a game.
This is a standard representation for passing value functions into algorithms,
with currently the following implementations:
The main way of using a value is to call `value(state)`
or `value(state, action)`.
We will prevent calling a value on a state action on a MEAN_FIELD state.
The state can be a pyspiel.State object or its string representation. For a
particular ValueFunction instance, you should use only one or the other. The
behavior may be undefined for mixed usage depending on the implementation.
"""
import collections
from typing import Union
import pyspiel
ValueFunctionState = Union[pyspiel.State, str]
class ValueFunction(object):
"""Base class for values.
A ValueFunction is something that returns a value given
a state of the world or a state and an action.
Attributes:
game: the game for which this ValueFunction derives
"""
def __init__(self, game):
"""Initializes a value.
Args:
game: the game for which this value derives
"""
self.game = game
def value(self, state: ValueFunctionState, action=None) -> float:
"""Returns a float representing a value.
Args:
state: A `pyspiel.State` object or its string representation.
action: may be None or a legal action
Returns:
A value for the state (and eventuallu state action pair).
"""
raise NotImplementedError()
def __call__(self, state: ValueFunctionState, action=None) -> float:
"""Turns the value into a callable.
Args:
state: A `pyspiel.State` object or its string representation.
action: may be None or a legal action
Returns:
Float: the value of the state or the state action pair.
"""
return self.value(state, action=action)
def set_value(self, state: ValueFunctionState, value: float, action=None):
"""Sets the value of the state.
Args:
state: A `pyspiel.State` object or its string representation.
value: Value of the state.
action: may be None or a legal action
"""
raise NotImplementedError()
def has(self, state: ValueFunctionState, action=None) -> bool:
"""Returns true if state(-action) has an explicit value.
Args:
state: A `pyspiel.State` object or its string representation.
action: may be None or a legal action
Returns:
True if there is an explicitly specified value.
"""
raise NotImplementedError()
def add_value(self, state, value: float, action=None):
"""Adds the value to the current value of the state.
Args:
state: A `pyspiel.State` object or its string representation.
value: Value to add.
action: may be None or a legal action
"""
self.set_value(
state, self.value(state, action=action) + value, action=action)
class TabularValueFunction(ValueFunction):
"""Tabular value function backed by a dictionary."""
def __init__(self, game):
super().__init__(game)
self._values = collections.defaultdict(float)
def value(self, state: ValueFunctionState, action=None):
return self._values[(state, action)]
def set_value(self, state: ValueFunctionState, value: float, action=None):
self._values[(state, action)] = value
def has(self, state: ValueFunctionState, action=None):
return (state, action) in self._values
| open_spiel-master | open_spiel/python/mfg/value.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for best_response_value."""
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python import policy
from open_spiel.python.mfg import value
from open_spiel.python.mfg.algorithms import best_response_value
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.games import crowd_modelling # pylint: disable=unused-import
import pyspiel
class BestResponseTest(parameterized.TestCase):
@parameterized.named_parameters(('python', 'python_mfg_crowd_modelling'),
('cpp', 'mfg_crowd_modelling'))
def test_best_response(self, name):
"""Checks if the value of a policy computation works."""
game = pyspiel.load_game(name)
uniform_policy = policy.UniformRandomPolicy(game)
dist = distribution.DistributionPolicy(game, uniform_policy)
br_value = best_response_value.BestResponse(
game, dist, value.TabularValueFunction(game))
br_val = br_value(game.new_initial_state())
self.assertAlmostEqual(br_val, 30.029387484327486)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/mfg/algorithms/best_response_value_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple benchmark for MFG algorithms and environments."""
import itertools
import time
from typing import Sequence
from absl import app
from absl import flags
from open_spiel.python.mfg import games # pylint: disable=unused-import
from open_spiel.python.mfg.algorithms import fictitious_play
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_list('games',
['python_mfg_crowd_modelling', 'mfg_crowd_modelling'],
'List of games to benchmark.')
flags.DEFINE_list(
'parameters', ['size:10;100', 'horizon:10;100'],
'List of parameters to sweep on (see default flag value for '
'syntax).')
def convert_param_spec(param_spec):
"""Converts 'size:10;200' into ('size', [10, 200])."""
split = param_spec.split(':', 2)
return split[0], [int(v) for v in split[1].split(';')]
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
param_names, param_values = zip(
*[convert_param_spec(spec) for spec in FLAGS.parameters])
header = (['game_name'] + list(param_names) +
['fictitious_play_iteration_time'])
timing_results = []
for game_name in FLAGS.games:
for param_tuple in itertools.product(*param_values):
result_line = [game_name] + [str(p) for p in param_tuple]
print('Computing timings for:', ' '.join(result_line))
param_dict = dict(zip(param_names, param_tuple))
game = pyspiel.load_game(game_name, param_dict)
t0 = time.time()
fp = fictitious_play.FictitiousPlay(game)
fp.iteration()
elapsed = time.time() - t0
result_line.append(f'{elapsed:.4f}s')
print(' '.join(result_line))
timing_results.append(result_line)
print('\nRESULTS:')
print(' '.join(header))
for line in timing_results:
print(' '.join([str(v) for v in line]))
if __name__ == '__main__':
app.run(main)
| open_spiel-master | open_spiel/python/mfg/algorithms/benchmark.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes a greedy policy from a value."""
import numpy as np
from open_spiel.python import policy as policy_std
from open_spiel.python.mfg import value
class GreedyPolicy(policy_std.Policy):
"""Computes the greedy policy of a value."""
def __init__(self, game, player_ids, state_action_value: value.ValueFunction):
"""Initializes the greedy policy.
Args:
game: The game to analyze.
player_ids: list of player ids for which this policy applies; each should
be in the range 0..game.num_players()-1.
state_action_value: A state-action value function.
"""
super(GreedyPolicy, self).__init__(game, player_ids)
self._state_action_value = state_action_value
def action_probabilities(self, state, player_id=None):
q = [
self._state_action_value(state, action)
for action in state.legal_actions()
]
amax_q = [0.0 for _ in state.legal_actions()]
amax_q[np.argmax(q)] = 1.0
return dict(zip(state.legal_actions(), amax_q))
| open_spiel-master | open_spiel/python/mfg/algorithms/greedy_policy.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for policy_value."""
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python import policy
from open_spiel.python.mfg import value
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import policy_value
from open_spiel.python.mfg.games import crowd_modelling # pylint: disable=unused-import
import pyspiel
class PolicyValueTest(parameterized.TestCase):
@parameterized.named_parameters(('python', 'python_mfg_crowd_modelling'),
('cpp', 'mfg_crowd_modelling'))
def test_policy_value(self, name):
"""Checks if the value of a policy computation works.
Args:
name: Name of the game.
"""
game = pyspiel.load_game(name)
uniform_policy = policy.UniformRandomPolicy(game)
dist = distribution.DistributionPolicy(game, uniform_policy)
py_value = policy_value.PolicyValue(game, dist, uniform_policy,
value.TabularValueFunction(game))
py_val = py_value(game.new_initial_state())
self.assertAlmostEqual(py_val, 27.215850929940448)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/mfg/algorithms/policy_value_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for deep average-network fictitious play."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.jax import dqn
from open_spiel.python.mfg.algorithms import average_network_fictitious_play
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.games import crowd_modelling # pylint: disable=unused-import
import pyspiel
from open_spiel.python.utils import training
class AverageNetworkFictitiousPlayTest(parameterized.TestCase):
@parameterized.named_parameters(('cpp', 'mfg_crowd_modelling'),
('python', 'python_mfg_crowd_modelling'))
def test_train(self, name):
"""Checks that the training works."""
game = pyspiel.load_game(name)
assert game.num_players() == 1
uniform_policy = policy.UniformRandomPolicy(game)
uniform_dist = distribution.DistributionPolicy(game, uniform_policy)
env = rl_environment.Environment(
game, mfg_distribution=uniform_dist, mfg_population=0)
info_state_size = env.observation_spec()['info_state'][0]
num_actions = env.action_spec()['num_actions']
np.random.seed(0)
dqn_args = {
'batch_size': 32,
'epsilon_end': 0.1,
'epsilon_start': 0.1,
'hidden_layers_sizes': [128],
'learn_every': 32,
'learning_rate': 0.01,
'min_buffer_size_to_learn': 32,
'optimizer_str': 'adam',
'replay_buffer_capacity': 2000,
'update_target_network_every': 32,
}
br_agent = dqn.DQN(0, info_state_size, num_actions, **dqn_args)
args = {
'batch_size': 32,
'hidden_layers_sizes': [128],
'reservoir_buffer_capacity': 100000,
'learning_rate': 0.01,
'min_buffer_size_to_learn': 32,
'optimizer_str': 'adam',
'seed': 0,
'tau': 1.0,
}
fp = average_network_fictitious_play.AverageNetworkFictitiousPlay(
game, [env], [br_agent],
num_episodes_per_iteration=50,
num_training_steps_per_iteration=10,
**args)
# Run several iterations.
for _ in range(5):
training.run_episodes([env], [br_agent],
num_episodes=50,
is_evaluation=False)
fp.iteration()
# Just sanity check.
nash_conv_fp = nash_conv.NashConv(game, fp.policy)
self.assertLessEqual(nash_conv_fp.nash_conv(), 15)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/mfg/algorithms/average_network_fictitious_play_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/mfg/algorithms/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Fictitious Play from Perrin & al.
Reference: https://arxiv.org/abs/2007.03458.
As presented, the Fictitious Play algorithm provides a robust approximation
scheme for Nash equilibrium by iteratively computing the best response
against the distribution induced by the average of the past best responses.
The provided formulation of Deep Fictitious Play mirrors this procedure,
but substitutes out the exact best reponse computation with an approximation
of best response values through a Reinforcement Learning approach (where
the RL method in question is a user-determined parameter for each iteration).
Policy is initialized to uniform policy.
Each iteration:
1. Compute best response against policy
2. Update policy as weighted average of best response and current policy
(default learning rate is 1 / num_iterations + 1).
To use fictitious play one should initialize it and run multiple iterations:
fp = FictitiousPlay(game)
for _ in range(num_iterations):
fp.iteration()
policy = fp.get_policy()
"""
import math
from typing import List, Optional
from open_spiel.python import policy as policy_std
from open_spiel.python.mfg import distribution as distribution_std
from open_spiel.python.mfg import value
from open_spiel.python.mfg.algorithms import best_response_value
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import greedy_policy
from open_spiel.python.mfg.algorithms import policy_value
from open_spiel.python.mfg.algorithms import softmax_policy
import pyspiel
class MergedPolicy(policy_std.Policy):
"""Merge several policies."""
def __init__(
self,
game,
player_ids: List[int],
policies: List[policy_std.Policy],
distributions: List[distribution_std.Distribution],
weights: List[float],
):
"""Initializes the merged policy.
Args:
game: The game to analyze.
player_ids: list of player ids for which this policy applies; each should
be in the range 0..game.num_players()-1.
policies: A `List[policy_std.Policy]` object.
distributions: A `List[distribution_std.Distribution]` object.
weights: A `List[float]` object. The elements should sum to 1.
"""
super().__init__(game, player_ids)
self._policies = policies
self._distributions = distributions
self._weights = weights
assert len(policies) == len(distributions), (
f'Length mismatch {len(policies)} != {len(distributions)}')
assert len(policies) == len(weights), (
f'Length mismatch {len(policies)} != {len(weights)}')
assert math.isclose(
sum(weights),
1.0), (f'Weights should sum to 1, but instead sum to {sum(weights)}')
def action_probabilities(self, state, player_id=None):
action_prob = []
legal = state.legal_actions()
num_legal = len(legal)
for a in legal:
merged_pi = 0.0
norm_merged_pi = 0.0
for p, d, w in zip(self._policies, self._distributions, self._weights):
try:
merged_pi += w * d(state) * p(state)[a]
norm_merged_pi += w * d(state)
except (KeyError, ValueError):
# This happens when the state was not observed in the merged
# distributions or policies.
pass
if norm_merged_pi > 0.0:
action_prob.append((a, merged_pi / norm_merged_pi))
else:
action_prob.append((a, 1.0 / num_legal))
return dict(action_prob)
class FictitiousPlay(object):
"""Computes the value of a specified strategy."""
def __init__(self,
game: pyspiel.Game,
lr: Optional[float] = None,
temperature: Optional[float] = None):
"""Initializes the greedy policy.
Args:
game: The game to analyze.
lr: The learning rate of mirror descent. If None, at iteration i it will
be set to 1/i.
temperature: If set, then instead of the greedy policy a softmax policy
with the specified temperature will be used to update the policy at each
iteration.
"""
self._game = game
self._lr = lr
self._temperature = temperature
self._policy = policy_std.UniformRandomPolicy(self._game)
self._fp_step = 0
def get_policy(self):
return self._policy
def iteration(self, br_policy=None, learning_rate=None):
"""Returns a new `TabularPolicy` equivalent to this policy.
Args:
br_policy: Policy to compute the best response value for each iteration.
If none provided, the exact value is computed.
learning_rate: The learning rate.
"""
self._fp_step += 1
distrib = distribution.DistributionPolicy(self._game, self._policy)
if br_policy:
br_value = policy_value.PolicyValue(self._game, distrib, br_policy)
else:
br_value = best_response_value.BestResponse(
self._game, distrib, value.TabularValueFunction(self._game))
# Policy is either greedy or softmax with respect to the best response if
# temperature is specified.
player_ids = list(range(self._game.num_players()))
if self._temperature is None:
pi = greedy_policy.GreedyPolicy(self._game, player_ids, br_value)
else:
pi = softmax_policy.SoftmaxPolicy(self._game, player_ids,
self._temperature, br_value)
pi = pi.to_tabular()
distrib_pi = distribution.DistributionPolicy(self._game, pi)
if learning_rate:
weight = learning_rate
else:
weight = self._lr if self._lr else 1.0 / (self._fp_step + 1)
if math.isclose(weight, 1.0):
self._policy = pi
else:
self._policy = MergedPolicy(self._game, player_ids, [self._policy, pi],
[distrib, distrib_pi],
[1.0 - weight, weight]).to_tabular()
| open_spiel-master | open_spiel/python/mfg/algorithms/fictitious_play.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for softmax_policy."""
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python import policy
from open_spiel.python.mfg import value
from open_spiel.python.mfg.algorithms import best_response_value
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import policy_value
from open_spiel.python.mfg.algorithms import softmax_policy
from open_spiel.python.mfg.games import crowd_modelling # pylint: disable=unused-import
import pyspiel
class SoftmaxPolicyTest(parameterized.TestCase):
@parameterized.named_parameters(('python', 'python_mfg_crowd_modelling'),
('cpp', 'mfg_crowd_modelling'))
def test_softmax(self, name):
"""Check if the softmax policy works as expected.
The test checks that:
- uniform prior policy gives the same results than no prior.
- very high temperature gives almost a uniform policy.
- very low temperature gives almost a deterministic policy for the best
action.
Args:
name: Name of the game.
"""
game = pyspiel.load_game(name)
uniform_policy = policy.UniformRandomPolicy(game)
dist = distribution.DistributionPolicy(game, uniform_policy)
br_value = best_response_value.BestResponse(
game, dist, value.TabularValueFunction(game))
br_init_val = br_value(game.new_initial_state())
# uniform prior policy gives the same results than no prior.
softmax_pi_uniform_prior = softmax_policy.SoftmaxPolicy(
game, None, 1.0, br_value, uniform_policy).to_tabular()
softmax_pi_uniform_prior_value = policy_value.PolicyValue(
game, dist, softmax_pi_uniform_prior, value.TabularValueFunction(game))
softmax_pi_uniform_prior_init_val = softmax_pi_uniform_prior_value(
game.new_initial_state())
softmax_pi_no_prior = softmax_policy.SoftmaxPolicy(game, None, 1.0,
br_value, None)
softmax_pi_no_prior_value = policy_value.PolicyValue(
game, dist, softmax_pi_no_prior, value.TabularValueFunction(game))
softmax_pi_no_prior_init_val = softmax_pi_no_prior_value(
game.new_initial_state())
self.assertAlmostEqual(softmax_pi_uniform_prior_init_val,
softmax_pi_no_prior_init_val)
# very high temperature gives almost a uniform policy.
uniform_policy = uniform_policy.to_tabular()
uniform_value = policy_value.PolicyValue(game, dist, uniform_policy,
value.TabularValueFunction(game))
uniform_init_val = uniform_value(game.new_initial_state())
softmax_pi_no_prior = softmax_policy.SoftmaxPolicy(game, None, 100000000,
br_value, None)
softmax_pi_no_prior_value = policy_value.PolicyValue(
game, dist, softmax_pi_no_prior, value.TabularValueFunction(game))
softmax_pi_no_prior_init_val = softmax_pi_no_prior_value(
game.new_initial_state())
self.assertAlmostEqual(uniform_init_val, softmax_pi_no_prior_init_val)
# very low temperature gives almost a best response policy.
softmax_pi_no_prior = softmax_policy.SoftmaxPolicy(game, None, 0.0001,
br_value, None)
softmax_pi_no_prior_value = policy_value.PolicyValue(
game, dist, softmax_pi_no_prior, value.TabularValueFunction(game))
softmax_pi_no_prior_init_val = softmax_pi_no_prior_value(
game.new_initial_state())
self.assertAlmostEqual(br_init_val, softmax_pi_no_prior_init_val)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/mfg/algorithms/softmax_policy_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for greedy_policy."""
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python import policy
from open_spiel.python.mfg import value
from open_spiel.python.mfg.algorithms import best_response_value
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import greedy_policy
from open_spiel.python.mfg.algorithms import policy_value
from open_spiel.python.mfg.games import crowd_modelling # pylint: disable=unused-import
import pyspiel
class GreedyPolicyTest(parameterized.TestCase):
@parameterized.named_parameters(('python', 'python_mfg_crowd_modelling'),
('cpp', 'mfg_crowd_modelling'))
def test_greedy(self, name):
"""Check if the greedy policy works as expected.
The test checks that a greedy policy with respect to an optimal value is
an optimal policy.
Args:
name: Name of the game.
"""
game = pyspiel.load_game(name)
uniform_policy = policy.UniformRandomPolicy(game)
dist = distribution.DistributionPolicy(game, uniform_policy)
br_value = best_response_value.BestResponse(
game, dist, value.TabularValueFunction(game))
br_val = br_value(game.new_initial_state())
greedy_pi = greedy_policy.GreedyPolicy(game, None, br_value)
greedy_pi = greedy_pi.to_tabular()
pybr_value = policy_value.PolicyValue(game, dist, greedy_pi,
value.TabularValueFunction(game))
pybr_val = pybr_value(game.new_initial_state())
self.assertAlmostEqual(br_val, pybr_val)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/mfg/algorithms/greedy_policy_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nash conv."""
from absl.testing import absltest
from open_spiel.python import policy
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.games import crowd_modelling
import pyspiel
class BestResponseTest(absltest.TestCase):
def test_python_game(self):
"""Checks if the NashConv is consistent through time."""
game = crowd_modelling.MFGCrowdModellingGame()
uniform_policy = policy.UniformRandomPolicy(game)
nash_conv_fp = nash_conv.NashConv(game, uniform_policy)
self.assertAlmostEqual(nash_conv_fp.nash_conv(), 2.8135365543870385)
def test_cpp_game(self):
"""Checks if the NashConv is consistent through time."""
game = pyspiel.load_game("mfg_crowd_modelling")
uniform_policy = policy.UniformRandomPolicy(game)
nash_conv_fp = nash_conv.NashConv(game, uniform_policy)
self.assertAlmostEqual(nash_conv_fp.nash_conv(), 2.8135365543870385)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/mfg/algorithms/nash_conv_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes the distribution of a policy."""
import collections
from typing import List, Tuple
from open_spiel.python import policy as policy_module
from open_spiel.python.mfg import tabular_distribution
from open_spiel.python.mfg.tabular_distribution import DistributionDict
import pyspiel
def type_from_states(states):
"""Get node type of a list of states and assert they are the same."""
types = [state.get_type() for state in states]
assert len(set(types)) == 1, f"types: {types}"
return types[0]
def _check_distribution_sum(distribution: DistributionDict, expected_sum: int):
"""Sanity check that the distribution sums to a given value."""
sum_state_probabilities = sum(distribution.values())
assert abs(sum_state_probabilities - expected_sum) < 1e-4, (
"Sum of probabilities of all possible states should be the number of "
f"population, it is {sum_state_probabilities}.")
class DistributionPolicy(tabular_distribution.TabularDistribution):
"""Computes the distribution of a specified strategy."""
def __init__(self,
game: pyspiel.Game,
policy: policy_module.Policy,
root_state: pyspiel.State = None):
"""Initializes the distribution calculation.
Args:
game: The game to analyze.
policy: The policy we compute the distribution of.
root_state: The state of the game at which to start analysis. If `None`,
the game root states are used.
"""
super().__init__(game)
self._policy = policy
if root_state is None:
self._root_states = game.new_initial_states()
else:
self._root_states = [root_state]
self.evaluate()
def evaluate(self):
"""Evaluate the distribution over states of self._policy."""
# List of all game states that have a non-zero probability at the current
# timestep and player ID.
current_states = self._root_states.copy()
# Distribution at the current timestep. Maps state strings to
# floats. For each group of states for a given population, these
# floats represent a probability distribution.
current_distribution = {
self.state_to_str(state): 1 for state in current_states
}
# List of all distributions computed so far.
all_distributions = [current_distribution]
while type_from_states(current_states) != pyspiel.StateType.TERMINAL:
new_states, new_distribution = self._one_forward_step(
current_states, current_distribution, self._policy)
_check_distribution_sum(new_distribution, self.game.num_players())
current_distribution = new_distribution
current_states = new_states
all_distributions.append(new_distribution)
# Merge all per-timestep distributions into `self.distribution`.
for dist in all_distributions:
for state_str, prob in dist.items():
if state_str in self.distribution:
raise ValueError(
f"{state_str} has already been seen in distribution.")
self.distribution[state_str] = prob
def _forward_actions(
self, current_states: List[pyspiel.State], distribution: DistributionDict,
actions_and_probs_fn) -> Tuple[List[pyspiel.State], DistributionDict]:
"""Applies one action to each current state.
Args:
current_states: The states to apply actions on.
distribution: Current distribution.
actions_and_probs_fn: Function that maps one state to the corresponding
list of (action, proba). For decision nodes, this should be the policy,
and for chance nodes, this should be chance outcomes.
Returns:
A pair:
- new_states: List of new states after applying one action on
each input state.
- new_distribution: Probabilities for each of these states.
"""
new_states = []
new_distribution = collections.defaultdict(float)
for state in current_states:
state_str = self.state_to_str(state)
for action, prob in actions_and_probs_fn(state):
new_state = state.child(action)
new_state_str = self.state_to_str(new_state)
if new_state_str not in new_distribution:
new_states.append(new_state)
new_distribution[new_state_str] += prob * distribution[state_str]
return new_states, new_distribution
def _one_forward_step(self, current_states: List[pyspiel.State],
distribution: DistributionDict,
policy: policy_module.Policy):
"""Performs one step of the forward equation.
Namely, this takes as input a list of current state, the current
distribution, and performs one step of the forward equation, using
actions coming from the policy or from the chance node
probabilities, or propagating the distribution to the MFG nodes.
Args:
current_states: The states to perform the forward step on. All states are
assumed to be of the same type.
distribution: Current distribution.
policy: Policy that will be used if states
Returns:
A pair:
- new_states: List of new states after applying one step of the
forward equation (either performing one action or doing one
distribution update).
- new_distribution: Probabilities for each of these states.
"""
state_types = type_from_states(current_states)
if state_types == pyspiel.StateType.CHANCE:
return self._forward_actions(current_states, distribution,
lambda state: state.chance_outcomes())
if state_types == pyspiel.StateType.MEAN_FIELD:
new_states = []
new_distribution = {}
for state in current_states:
dist = [
# We need to default to 0, since the support requested by
# the state in `state.distribution_support()` might have
# states that we might not have reached yet. A probability
# of 0. should be given for them.
distribution.get(str_state, 0.)
for str_state in state.distribution_support()
]
new_state = state.clone()
new_state.update_distribution(dist)
new_state_str = self.state_to_str(new_state)
if new_state_str not in new_distribution:
new_states.append(new_state)
new_distribution[new_state_str] = 0.0
new_distribution[new_state_str] += distribution.get(
self.state_to_str(state), 0)
return new_states, new_distribution
if state_types == pyspiel.StateType.DECISION:
return self._forward_actions(
current_states, distribution,
lambda state: policy.action_probabilities(state).items())
raise ValueError(
f"Unpexpected state_stypes: {state_types}, states: {current_states}")
| open_spiel-master | open_spiel/python/mfg/algorithms/distribution.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Boltzmann Policy Iteration."""
from open_spiel.python import policy as policy_lib
from open_spiel.python.mfg.algorithms import mirror_descent
class BoltzmannPolicyIteration(mirror_descent.MirrorDescent):
"""Boltzmann Policy Iteration algorithm.
In this algorithm, at each iteration, we update the policy by first computing
the Q-function that evaluates the current policy, and then take a softmax.
This corresponds to using Online Mirror Descent algorithm without summing
Q-functions but simply taking the latest Q-function.
"""
def get_projected_policy(self) -> policy_lib.Policy:
"""Returns the projected policy."""
return mirror_descent.ProjectedPolicy(
self._game,
list(range(self._game.num_players())),
self._state_value,
coeff=self._lr)
| open_spiel-master | open_spiel/python/mfg/algorithms/boltzmann_policy_iteration.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes a softmax policy from a value function."""
from typing import Optional
import numpy as np
from open_spiel.python import policy
from open_spiel.python.mfg import value
class SoftmaxPolicy(policy.Policy):
"""Computes the softmax policy of a value function."""
def __init__(self,
game,
player_ids,
temperature: float,
state_action_value: value.ValueFunction,
prior_policy: Optional[policy.Policy] = None):
"""Initializes the softmax policy.
Args:
game: The game to analyze.
player_ids: list of player ids for which this policy applies; each
should be in the range 0..game.num_players()-1.
temperature: float to scale the values (multiplied by 1/temperature).
state_action_value: A state-action value function.
prior_policy: Optional argument. Prior policy to scale the softmax
policy.
"""
super(SoftmaxPolicy, self).__init__(game, player_ids)
self._state_action_value = state_action_value
self._prior_policy = prior_policy
self._temperature = temperature
def action_probabilities(self, state, player_id=None):
legal_actions = state.legal_actions()
max_q = np.max(
[self._state_action_value(state, action) for action in legal_actions])
exp_q = [
np.exp((self._state_action_value(state, action) - max_q) /
self._temperature) for action in legal_actions
]
if self._prior_policy is not None:
prior_probs = self._prior_policy.action_probabilities(state)
exp_q = [
prior_probs.get(action, 0) * exp_q[i]
for i, action in enumerate(legal_actions)
]
denom = sum(exp_q)
smax_q = exp_q if denom == 0 else exp_q / denom
return dict(zip(legal_actions, smax_q))
| open_spiel-master | open_spiel/python/mfg/algorithms/softmax_policy.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mirror descent."""
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python.mfg import value
from open_spiel.python.mfg.algorithms import mirror_descent
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.games import crowd_modelling # pylint: disable=unused-import
import pyspiel
class MirrorDescentTest(parameterized.TestCase):
@parameterized.named_parameters(('python', 'python_mfg_crowd_modelling'),
('cpp', 'mfg_crowd_modelling'))
def test_fp(self, name):
"""Checks if mirror descent works."""
game = pyspiel.load_game(name)
md = mirror_descent.MirrorDescent(game, value.TabularValueFunction(game))
for _ in range(10):
md.iteration()
md_policy = md.get_policy()
nash_conv_md = nash_conv.NashConv(game, md_policy)
self.assertAlmostEqual(nash_conv_md.nash_conv(), 2.2730324915546056)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/mfg/algorithms/mirror_descent_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Does a backward pass to output the value of a policy."""
from typing import Optional
from open_spiel.python import policy as policy_std
from open_spiel.python.mfg import distribution as distribution_std
from open_spiel.python.mfg import value
import pyspiel
class PolicyValue(value.ValueFunction):
"""Computes the value of a specified strategy."""
def __init__(self,
game,
distribution: distribution_std.Distribution,
policy: policy_std.Policy,
state_value: Optional[value.ValueFunction] = None,
root_state=None):
"""Initializes the value calculation.
Args:
game: The game to analyze.
distribution: A `distribution.Distribution` object.
policy: A `policy.Policy` object.
state_value: A state value function. Defaults to Tabular.
root_state: The state of the game at which to start. If `None`, the game
root state is used.
"""
super(PolicyValue, self).__init__(game)
if root_state is None:
self._root_states = game.new_initial_states()
else:
self._root_states = [root_state]
self._distribution = distribution
self._policy = policy
self._state_value = (state_value if state_value is not None
else value.TabularValueFunction(game))
self.evaluate()
def eval_state(self, state):
"""Evaluate the value of a state."""
state_str = state.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID)
if self._state_value.has(state_str):
return self._state_value(state_str)
elif state.is_terminal():
self._state_value.set_value(
state_str,
state.rewards()[state.mean_field_population()])
return self._state_value(state_str)
elif state.current_player() == pyspiel.PlayerId.CHANCE:
self._state_value.set_value(state_str, 0.0)
for action, prob in state.chance_outcomes():
new_state = state.child(action)
self._state_value.add_value(state_str,
prob * self.eval_state(new_state))
return self._state_value(state_str)
elif state.current_player() == pyspiel.PlayerId.MEAN_FIELD:
dist_to_register = state.distribution_support()
dist = [
self._distribution.value_str(str_state, 0.)
for str_state in dist_to_register
]
new_state = state.clone()
new_state.update_distribution(dist)
self._state_value.set_value(
state_str,
state.rewards()[state.mean_field_population()] +
self.eval_state(new_state))
return self._state_value(state_str)
else:
assert int(state.current_player()) >= 0, "The player id should be >= 0"
v = 0.0
for action, prob in self._policy.action_probabilities(state).items():
new_state = state.child(action)
v += prob * self.eval_state(new_state)
self._state_value.set_value(
state_str,
state.rewards()[state.mean_field_population()] + v)
return self._state_value(state_str)
def evaluate(self):
"""Evaluate the value over states of self._policy."""
for state in self._root_states:
self.eval_state(state)
def value(self, state, action=None):
if action is None:
return self._state_value(
state.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID))
new_state = state.child(action)
return state.rewards()[state.mean_field_population()] + self._state_value(
new_state.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID))
| open_spiel-master | open_spiel/python/mfg/algorithms/policy_value.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(sertan): Add link to the reference paper.
"""Munchausen DQN Agent and deep online mirror descent implementation."""
import collections
from typing import Any, Callable, Dict, Optional, Tuple
from absl import logging
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
from open_spiel.python import rl_agent
from open_spiel.python import rl_agent_policy
from open_spiel.python.mfg.algorithms import distribution as distribution_std
from open_spiel.python.utils.replay_buffer import ReplayBuffer
Transition = collections.namedtuple(
"Transition",
"info_state action legal_one_hots reward next_info_state is_final_step "
"next_legal_one_hots")
# Penalty for illegal actions in action selection. In epsilon-greedy, this will
# prevent them from being selected and in soft-max the probabilities will be
# (close to) 0.
ILLEGAL_ACTION_PENALTY = -1e9
# Lower bound for action probabilities to prevent NaNs in log terms.
MIN_ACTION_PROB = 1e-6
def _copy_params(params):
"""Returns a copy of the params."""
return jax.tree_map(lambda x: x.copy(), params)
class MunchausenDQN(rl_agent.AbstractAgent):
"""Munchausen DQN Agent implementation in JAX."""
def __init__(
self,
player_id,
state_representation_size,
num_actions,
# Training options.
batch_size: int = 128,
learn_every: int = 64,
epsilon_start: float = 0.1,
epsilon_end: float = 0.1,
epsilon_decay_duration: int = int(20e6),
epsilon_power: float = 1.0,
discount_factor: float = 1.0,
# Replay buffer options.
replay_buffer_capacity: int = int(2e5),
min_buffer_size_to_learn: int = 1000,
replay_buffer_class=ReplayBuffer,
# Loss and optimizer options.
optimizer: str = "sgd",
learning_rate: float = 0.01,
loss: str = "mse",
huber_loss_parameter: float = 1.0,
# Network options.
update_target_network_every: int = 19200,
hidden_layers_sizes=128,
qnn_params_init=None,
# Munchausen options.
tau=0.05,
alpha=0.9,
reset_replay_buffer_on_update: bool = True,
gradient_clipping: Optional[float] = None,
with_munchausen: bool = True,
seed: int = 42):
"""Initialize the Munchausen DQN agent."""
self.player_id = int(player_id)
self._num_actions = num_actions
self._batch_size = batch_size
self._learn_every = learn_every
self._epsilon_start = epsilon_start
self._epsilon_end = epsilon_end
self._epsilon_decay_duration = epsilon_decay_duration
self._epsilon_power = epsilon_power
self._discount_factor = discount_factor
self._reset_replay_buffer_on_update = reset_replay_buffer_on_update
self._tau = tau
self._alpha = alpha
# If true, the target uses Munchausen penalty terms.
self._with_munchausen = with_munchausen
self._prev_action = None
self._prev_legal_action = None
self._prev_time_step = None
# Used to select actions.
self._rs = np.random.RandomState(seed)
# Step counter to keep track of learning, eps decay and target network.
self._step_counter = 0
# Keep track of the last training loss achieved in an update step.
self._last_loss_value = None
# Create the replay buffer.
if not isinstance(replay_buffer_capacity, int):
raise ValueError("Replay buffer capacity not an integer.")
self._replay_buffer = replay_buffer_class(replay_buffer_capacity)
self._min_buffer_size_to_learn = min_buffer_size_to_learn
# Create the Q-network.
self._update_target_network_every = update_target_network_every
if isinstance(hidden_layers_sizes, int):
hidden_layers_sizes = [hidden_layers_sizes]
def network(x):
mlp = hk.nets.MLP(hidden_layers_sizes + [num_actions])
return mlp(x)
self.hk_network = hk.without_apply_rng(hk.transform(network))
self.hk_network_apply = jax.jit(self.hk_network.apply)
if qnn_params_init:
self._params_q_network = _copy_params(qnn_params_init)
self._params_target_q_network = _copy_params(qnn_params_init)
self._params_prev_q_network = _copy_params(qnn_params_init)
else:
rng = jax.random.PRNGKey(seed)
x = jnp.ones([1, state_representation_size])
self._params_q_network = self.hk_network.init(rng, x)
self._params_target_q_network = self.hk_network.init(rng, x)
self._params_prev_q_network = self.hk_network.init(rng, x)
# Create the loss function and the optimizer.
if loss == "mse":
self._loss_func = lambda x: jnp.mean(x**2)
elif loss == "huber":
self._loss_func = lambda x: jnp.mean( # pylint: disable=g-long-lambda
rlax.huber_loss(x, huber_loss_parameter))
else:
raise ValueError("Not implemented, choose from 'mse', 'huber'.")
if optimizer == "adam":
optimizer = optax.adam(learning_rate)
elif optimizer == "sgd":
optimizer = optax.sgd(learning_rate)
else:
raise ValueError("Not implemented, choose from 'adam' and 'sgd'.")
# Clipping the gradients prevent divergence and allow more stable training.
if gradient_clipping:
optimizer = optax.chain(optimizer,
optax.clip_by_global_norm(gradient_clipping))
opt_init, opt_update = optimizer.init, optimizer.update
def _stochastic_gradient_descent(params, opt_state, gradient):
updates, opt_state = opt_update(gradient, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
self._opt_update_fn = _stochastic_gradient_descent
self._opt_state = opt_init(self._params_q_network)
self._loss_and_grad = jax.value_and_grad(self._loss, has_aux=False)
self._jit_update = jax.jit(self._get_update())
def step(self,
time_step,
is_evaluation=False,
add_transition_record=True,
use_softmax=False,
tau: Optional[float] = None):
"""Returns the action to be taken and updates the Q-network if needed.
Args:
time_step: an instance of rl_environment.TimeStep.
is_evaluation: bool, whether this is a training or evaluation call.
add_transition_record: Whether to add to the replay buffer on this step.
use_softmax: Uses soft-max action selection.
tau: Tau for soft-max action selection. If None, then the training value
will be used.
Returns:
A `rl_agent.StepOutput` containing the action probs and chosen action.
"""
# Act step: don't act at terminal info states or if its not our turn.
if (not time_step.last()) and (time_step.is_simultaneous_move() or
self.player_id == int(
time_step.current_player())):
# Act according to epsilon-greedy or soft-max for current Q-network.
info_state = time_step.observations["info_state"][self.player_id]
legal_actions = time_step.observations["legal_actions"][self.player_id]
if use_softmax:
action, probs = self._softmax(info_state, legal_actions,
self._tau if tau is None else tau)
else:
epsilon = self._get_epsilon(is_evaluation)
action, probs = self._epsilon_greedy(info_state, legal_actions, epsilon)
else:
action = None
probs = []
# Don't mess up with the state during evaluation.
if not is_evaluation:
self._step_counter += 1
if self._step_counter % self._learn_every == 0:
self._last_loss_value = self.learn()
if self._step_counter % self._update_target_network_every == 0:
self._params_target_q_network = _copy_params(self._params_q_network)
if self._prev_time_step and add_transition_record:
# We may omit record adding here if it's done elsewhere.
self.add_transition(self._prev_time_step, self._prev_action,
self._prev_legal_action, time_step)
if time_step.last(): # prepare for the next episode.
self._prev_time_step = None
self._prev_action = None
self._prev_legal_action = None
else:
self._prev_time_step = time_step
self._prev_action = action
self._prev_legal_action = legal_actions
return rl_agent.StepOutput(action=action, probs=probs)
def add_transition(self, prev_time_step, prev_action, prev_legal_actions,
time_step):
"""Adds the new transition using `time_step` to the replay buffer.
Adds the transition from `self._prev_time_step` to `time_step` by
`self._prev_action`.
Args:
prev_time_step: prev ts, an instance of rl_environment.TimeStep.
prev_action: int, action taken at `prev_time_step`.
prev_legal_actions: Previous legal actions.
time_step: current ts, an instance of rl_environment.TimeStep.
"""
assert prev_time_step is not None
next_legal_actions = (
time_step.observations["legal_actions"][self.player_id])
next_legal_one_hots = self._to_one_hot(next_legal_actions)
# Added for deep OMD: keep previous action mask.
prev_legal_one_hots = self._to_one_hot(prev_legal_actions)
transition = Transition(
info_state=(
prev_time_step.observations["info_state"][self.player_id][:]),
action=prev_action,
legal_one_hots=prev_legal_one_hots,
reward=time_step.rewards[self.player_id],
next_info_state=time_step.observations["info_state"][self.player_id][:],
is_final_step=float(time_step.last()),
next_legal_one_hots=next_legal_one_hots)
self._replay_buffer.add(transition)
def _get_action_probs(self, params, info_states, legal_one_hots):
"""Returns the soft-max action probability distribution."""
q_values = self.hk_network.apply(params, info_states)
legal_q_values = q_values + (1 - legal_one_hots) * ILLEGAL_ACTION_PENALTY
return jax.nn.softmax(legal_q_values / self._tau)
def _loss(self, params, params_target, params_prev, info_states, actions,
legal_one_hots, rewards, next_info_states, are_final_steps,
next_legal_one_hots):
"""Returns the Munchausen loss."""
# Target with 2 parts: reward and value for next state; each part is
# modified according to the Munchausen trick.
q_values = self.hk_network.apply(params, info_states)
target_q_values = self.hk_network.apply(params_target, next_info_states)
r_term = rewards
if self._with_munchausen:
probs = self._get_action_probs(params_prev, info_states, legal_one_hots)
prob_prev_action = jnp.sum(probs * actions, axis=-1)
penalty_pi = jnp.log(jnp.clip(prob_prev_action, MIN_ACTION_PROB))
r_term += self._alpha * self._tau * penalty_pi
if self._with_munchausen:
# Average value over actions + extra log term.
# We clip the probabilities to avoid NaNs in the log term.
next_probs = self._get_action_probs(params_prev, next_info_states,
next_legal_one_hots)
q_term_values = next_probs * (
target_q_values -
self._tau * jnp.log(jnp.clip(next_probs, MIN_ACTION_PROB)))
q_term = jnp.sum(q_term_values, axis=-1)
else:
# Maximum value.
max_next_q = jnp.max(
target_q_values + (1 - legal_one_hots) * ILLEGAL_ACTION_PENALTY,
axis=-1)
max_next_q = jax.numpy.where(
1 - are_final_steps, x=max_next_q, y=jnp.zeros_like(max_next_q))
q_term = max_next_q
target = (r_term + (1 - are_final_steps) * self._discount_factor * q_term)
target = jax.lax.stop_gradient(target)
predictions = jnp.sum(q_values * actions, axis=-1)
return self._loss_func(predictions - target)
def _get_update(self):
"""Returns the gradient update function."""
def update(params, params_target, params_prev, opt_state, info_states,
actions, legal_one_hots, rewards, next_info_states,
are_final_steps, next_legal_one_hots):
loss_val, grad_val = self._loss_and_grad(params, params_target,
params_prev, info_states,
actions, legal_one_hots, rewards,
next_info_states,
are_final_steps,
next_legal_one_hots)
new_params, new_opt_state = self._opt_update_fn(params, opt_state,
grad_val)
return new_params, new_opt_state, loss_val
return update
def _to_one_hot(self, a, value=1.0):
"""Returns the one-hot encoding of the action."""
a_one_hot = np.zeros(self._num_actions)
a_one_hot[a] = value
return a_one_hot
def learn(self):
"""Compute the loss on sampled transitions and perform a Q-network update.
If there are not enough elements in the buffer, no loss is computed and
`None` is returned instead.
Returns:
The average loss obtained on this batch of transitions or `None`.
"""
if (len(self._replay_buffer) < self._batch_size or
len(self._replay_buffer) < self._min_buffer_size_to_learn):
return None
transitions = self._replay_buffer.sample(self._batch_size)
info_states = np.asarray([t.info_state for t in transitions])
actions = np.asarray([self._to_one_hot(t.action) for t in transitions])
legal_one_hots = np.asarray([t.legal_one_hots for t in transitions])
rewards = np.asarray([t.reward for t in transitions])
next_info_states = np.asarray([t.next_info_state for t in transitions])
are_final_steps = np.asarray([t.is_final_step for t in transitions])
next_legal_one_hots = np.asarray(
[t.next_legal_one_hots for t in transitions])
self._params_q_network, self._opt_state, loss_val = self._jit_update(
self._params_q_network, self._params_target_q_network,
self._params_prev_q_network, self._opt_state, info_states, actions,
legal_one_hots, rewards, next_info_states, are_final_steps,
next_legal_one_hots)
return loss_val
def _epsilon_greedy(self, info_state, legal_actions, epsilon):
"""Returns a valid epsilon-greedy action and action probabilities.
Args:
info_state: hashable representation of the information state.
legal_actions: list of legal actions at `info_state`.
epsilon: float, probability of taking an exploratory action.
Returns:
A valid epsilon-greedy action and action probabilities.
"""
if self._rs.rand() < epsilon:
action = self._rs.choice(legal_actions)
probs = self._to_one_hot(legal_actions, value=1.0 / len(legal_actions))
return action, probs
info_state = np.reshape(info_state, [1, -1])
q_values = self.hk_network_apply(self._params_q_network, info_state)[0]
legal_one_hot = self._to_one_hot(legal_actions)
legal_q_values = q_values + (1 - legal_one_hot) * ILLEGAL_ACTION_PENALTY
action = int(np.argmax(legal_q_values))
probs = self._to_one_hot(action)
return action, probs
def _get_epsilon(self, is_evaluation):
"""Returns the evaluation or decayed epsilon value."""
if is_evaluation:
return 0.0
decay_steps = min(self._step_counter, self._epsilon_decay_duration)
decayed_epsilon = (
self._epsilon_end + (self._epsilon_start - self._epsilon_end) *
(1 - decay_steps / self._epsilon_decay_duration)**self._epsilon_power)
return decayed_epsilon
def _softmax(self, info_state, legal_actions,
tau: float) -> Tuple[int, np.ndarray]:
"""Returns a valid soft-max action and action probabilities."""
info_state = np.reshape(info_state, [1, -1])
q_values = self.hk_network_apply(self._params_q_network, info_state)[0]
legal_one_hot = self._to_one_hot(legal_actions)
legal_q_values = q_values + (1 - legal_one_hot) * ILLEGAL_ACTION_PENALTY
# Apply temperature and subtract the maximum value for numerical stability.
temp = legal_q_values / tau
unnormalized = np.exp(temp - np.amax(temp))
probs = unnormalized / unnormalized.sum()
action = self._rs.choice(legal_actions, p=probs[legal_actions])
return action, probs
def update_prev_q_network(self):
"""Updates the parameters of the previous Q-network."""
self._params_prev_q_network = _copy_params(self._params_q_network)
if self._reset_replay_buffer_on_update:
# Also reset the replay buffer to avoid having transitions from the
# previous policy.
self._replay_buffer.reset()
@property
def loss(self):
return self._last_loss_value
class SoftMaxMunchausenDQN(rl_agent.AbstractAgent):
"""Wraps a Munchausen DQN agent to use soft-max action selection."""
def __init__(self, agent: MunchausenDQN, tau: Optional[float] = None):
self._agent = agent
self._tau = tau
def step(self, time_step, is_evaluation=False):
return self._agent.step(
time_step, is_evaluation=is_evaluation, use_softmax=True, tau=self._tau)
class DeepOnlineMirrorDescent(object):
"""The deep online mirror descent algorithm."""
def __init__(self,
game,
envs,
agents,
eval_every=200,
num_episodes_per_iteration=1000,
logging_fn: Optional[Callable[[int, int, Dict[str, Any]],
None]] = None):
"""Initializes mirror descent.
Args:
game: The game,
envs: RL environment for each player.
agents: Munchausen DQN agents for each player.
eval_every: Number of training episodes between two evaluations.
num_episodes_per_iteration: Number of training episodes for each
iiteration.
logging_fn: Callable for logging the metrics. The arguments will be the
current iteration, episode and a dictionary of metrics to log.
"""
assert len(envs) == len(agents)
# Make sure that the agents are all MunchausenDQN.
for agent in agents:
assert isinstance(agent, MunchausenDQN)
self._game = game
self._eval_every = eval_every
self._num_episodes_per_iteration = num_episodes_per_iteration
self._envs = envs
self._agents = agents
self._use_observation = envs[0].use_observation
self._iteration = 0
if logging_fn is None:
logging_fn = lambda it, ep, vals: logging.info("%d/%d %r", it, ep, vals)
self._logging_fn = logging_fn
# Set the initial policy and distribution.
self._update_policy_and_distribution()
def _train_agents(self):
"""Trains the agents.
This will evaluate the Q-network for current policy and distribution.
"""
for ep in range(self._num_episodes_per_iteration):
for env, agent in zip(self._envs, self._agents):
time_step = env.reset()
while not time_step.last():
agent_output = agent.step(time_step, use_softmax=False)
action_list = [agent_output.action]
time_step = env.step(action_list)
# Episode is over, step all agents with final info state.
agent.step(time_step, use_softmax=False)
if (ep + 1) % self._eval_every == 0:
metrics = {}
for i, agent in enumerate(self._agents):
metrics[f"agent{i}/loss"] = agent.loss
self._logging_fn(self._iteration, ep + 1, metrics)
def _update_policy_and_distribution(self):
"""Updates the current soft-max policy and the distribution."""
self._policy = self.get_softmax_policy()
self._distribution = distribution_std.DistributionPolicy(
self._game, self._policy)
def get_softmax_policy(self,
tau: Optional[float] = None
) -> rl_agent_policy.JointRLAgentPolicy:
"""Returns the softmax policy with the specified tau.
Args:
tau: Tau for soft-max action selection, or None to use the value set in
the MunchausenDQN agents.
Returns:
A JointRLAgentPolicy.
"""
return rl_agent_policy.JointRLAgentPolicy(
self._game, {
idx: SoftMaxMunchausenDQN(agent, tau=tau)
for idx, agent in enumerate(self._agents)
}, self._use_observation)
def iteration(self):
"""An iteration of Mirror Descent."""
self._train_agents()
self._update_policy_and_distribution()
self._iteration += 1
# Update the distributions of the environments and the previous Q-networks
# of the agents.
for env, agent in zip(self._envs, self._agents):
env.update_mfg_distribution(self.distribution)
agent.update_prev_q_network()
@property
def policy(self):
return self._policy
@property
def distribution(self):
return self._distribution
| open_spiel-master | open_spiel/python/mfg/algorithms/munchausen_deep_mirror_descent.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Fixed Point."""
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python.mfg.algorithms import fixed_point
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.games import crowd_modelling # pylint: disable=unused-import
import pyspiel
class FixedPointTest(parameterized.TestCase):
@parameterized.named_parameters(('python', 'python_mfg_crowd_modelling'),
('cpp', 'mfg_crowd_modelling'))
def test_run(self, name):
"""Checks if the algorithm works."""
game = pyspiel.load_game(name)
fixed_p = fixed_point.FixedPoint(game)
for _ in range(10):
fixed_p.iteration()
fixed_p_policy = fixed_p.get_policy()
nash_conv_fixed_p = nash_conv.NashConv(game, fixed_p_policy)
self.assertAlmostEqual(nash_conv_fixed_p.nash_conv(), 55.745, places=3)
@parameterized.named_parameters(('python', 'python_mfg_crowd_modelling'),
('cpp', 'mfg_crowd_modelling'))
def test_softmax(self, name):
"""Checks the softmax policy."""
game = pyspiel.load_game(name)
fixed_p = fixed_point.FixedPoint(game, temperature=10.0)
for _ in range(10):
fixed_p.iteration()
fixed_p_policy = fixed_p.get_policy()
nash_conv_fixed_p = nash_conv.NashConv(game, fixed_p_policy)
self.assertAlmostEqual(nash_conv_fixed_p.nash_conv(), 2.421, places=3)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/mfg/algorithms/fixed_point_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mirror Descent (https://arxiv.org/pdf/2103.00623.pdf)."""
from typing import Dict, List, Optional
import numpy as np
from open_spiel.python import policy as policy_lib
from open_spiel.python.mfg import value
from open_spiel.python.mfg.algorithms import distribution
import pyspiel
def softmax_projection(logits):
max_l = max(logits)
exp_l = [np.exp(l - max_l) for l in logits]
norm_exp = sum(exp_l)
return [l / norm_exp for l in exp_l]
class ProjectedPolicy(policy_lib.Policy):
"""Project values on the policy simplex."""
def __init__(
self,
game: pyspiel.Game,
player_ids: List[int],
state_value: value.ValueFunction,
coeff: float = 1.0,
):
"""Initializes the projected policy.
Args:
game: The game to analyze.
player_ids: list of player ids for which this policy applies; each should
be in the range 0..game.num_players()-1.
state_value: The (cumulative) state value to project.
coeff: Coefficient for the values of the states.
"""
super(ProjectedPolicy, self).__init__(game, player_ids)
self._state_value = state_value
self._coeff = coeff
def value(self, state: pyspiel.State, action: Optional[int] = None) -> float:
if action is None:
return self._state_value(
state.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID))
else:
new_state = state.child(action)
return state.rewards()[0] + self._state_value(
new_state.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID))
def action_probabilities(self,
state: pyspiel.State,
player_id: Optional[int] = None) -> Dict[int, float]:
del player_id
action_logit = [(a, self._coeff * self.value(state, action=a))
for a in state.legal_actions()]
action, logit = zip(*action_logit)
return dict(zip(action, softmax_projection(logit)))
class MirrorDescent(object):
"""The mirror descent algorithm."""
def __init__(self,
game: pyspiel.Game,
state_value: Optional[value.ValueFunction] = None,
lr: float = 0.01,
root_state: Optional[pyspiel.State] = None):
"""Initializes mirror descent.
Args:
game: The game,
state_value: A state value function. Default to TabularValueFunction.
lr: The learning rate of mirror descent,
root_state: The state of the game at which to start. If `None`, the game
root state is used.
"""
self._game = game
if root_state is None:
self._root_states = game.new_initial_states()
else:
self._root_states = [root_state]
self._policy = policy_lib.UniformRandomPolicy(game)
self._distribution = distribution.DistributionPolicy(game, self._policy)
self._md_step = 0
self._lr = lr
self._state_value = (
state_value if state_value else value.TabularValueFunction(game))
self._cumulative_state_value = value.TabularValueFunction(game)
def get_state_value(self, state: pyspiel.State,
learning_rate: float) -> float:
"""Returns the value of the state."""
if state.is_terminal():
return state.rewards()[state.mean_field_population()]
if state.current_player() == pyspiel.PlayerId.CHANCE:
v = 0.0
for action, prob in state.chance_outcomes():
new_state = state.child(action)
v += prob * self.eval_state(new_state, learning_rate)
return v
if state.current_player() == pyspiel.PlayerId.MEAN_FIELD:
dist_to_register = state.distribution_support()
dist = [
self._distribution.value_str(str_state, 0.0)
for str_state in dist_to_register
]
new_state = state.clone()
new_state.update_distribution(dist)
return (state.rewards()[state.mean_field_population()] +
self.eval_state(new_state, learning_rate))
assert int(state.current_player()) >= 0, "The player id should be >= 0"
v = 0.0
for action, prob in self._policy.action_probabilities(state).items():
new_state = state.child(action)
v += prob * self.eval_state(new_state, learning_rate)
return state.rewards()[state.mean_field_population()] + v
def eval_state(self, state: pyspiel.State, learning_rate: float) -> float:
"""Evaluate the value of a state and update the cumulative sum."""
state_str = state.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID)
# Return the already calculated value if present.
if self._state_value.has(state_str):
return self._state_value(state_str)
# Otherwise, calculate the value of the state.
v = self.get_state_value(state, learning_rate)
self._state_value.set_value(state_str, v)
# Update the cumulative value of the state.
self._cumulative_state_value.add_value(state_str, learning_rate * v)
return v
def get_projected_policy(self) -> policy_lib.Policy:
"""Returns the projected policy."""
return ProjectedPolicy(self._game, list(range(self._game.num_players())),
self._cumulative_state_value)
def iteration(self, learning_rate: Optional[float] = None):
"""An iteration of Mirror Descent."""
self._md_step += 1
# TODO(sertan): Fix me.
self._state_value = value.TabularValueFunction(self._game)
for state in self._root_states:
self.eval_state(state, learning_rate if learning_rate else self._lr)
self._policy = self.get_projected_policy()
self._distribution = distribution.DistributionPolicy(
self._game, self._policy)
def get_policy(self) -> policy_lib.Policy:
return self._policy
@property
def distribution(self) -> distribution.DistributionPolicy:
return self._distribution
| open_spiel-master | open_spiel/python/mfg/algorithms/mirror_descent.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the Deep Average-network Fictitious Play.
Coupled with agents that compute a best-response (BR) at each iteration, instead
of keeping in memory all the BRs from past iterations Deep Average-network
Fictitious Play learns along the way the policy generating the average
distribution. This is done by keeping a buffer of state-action pairs generated
by past BRs and learning the average policy (represented by a neural network) by
minimizing a categorical loss. This approach is inspired by the Neural
Fictitious Self Play (NFSP) method (Heinrich & Silver, 2016), developed
initially for imperfect information games with a finite number of players, and
adapted here to the MFG setting.
"""
import dataclasses
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
from open_spiel.python import rl_agent
from open_spiel.python import rl_agent_policy
from open_spiel.python import rl_environment
from open_spiel.python.mfg.algorithms import distribution
import pyspiel
from open_spiel.python.utils import reservoir_buffer
from open_spiel.python.utils import training
@dataclasses.dataclass
class Transition:
"""Transitions stored in the reservoir buffer."""
info_state: np.ndarray
action_probs: np.ndarray
legal_actions_mask: np.ndarray
class AveragePolicy(rl_agent.AbstractAgent):
"""NFSP-like agent that learns an average policy using a single network."""
def __init__(self,
player_id: int,
br_rl_agent: rl_agent.AbstractAgent,
state_representation_size: int,
num_actions: int,
hidden_layers_sizes: List[int],
params_avg_network: Optional[jnp.ndarray] = None,
reservoir_buffer_capacity: int = 100000,
batch_size: int = 128,
learning_rate: float = 0.01,
min_buffer_size_to_learn: int = 1000,
optimizer_str: str = 'sgd',
gradient_clipping: Optional[float] = None,
seed: int = 42,
tau: float = 1.0):
"""Initialize the AveragePolicy agent."""
self._br_rl_agent = br_rl_agent
self._player_id = player_id
self._num_actions = num_actions
self._batch_size = batch_size
self._min_buffer_size_to_learn = min_buffer_size_to_learn
self._reservoir_buffer = reservoir_buffer.ReservoirBuffer(
reservoir_buffer_capacity)
# Keep track of the last training loss achieved in an update step.
self._last_loss_value = None
# Average policy network.
def network(x):
mlp = hk.nets.MLP(hidden_layers_sizes + [num_actions])
return mlp(x)
self.avg_network = hk.without_apply_rng(hk.transform(network))
def avg_network_policy(param, info_state):
action_values = self.avg_network.apply(param, info_state)
return jax.nn.softmax(action_values / tau, axis=1)
self._avg_network_policy = jax.jit(avg_network_policy)
rng = jax.random.PRNGKey(seed)
x = jnp.ones([1, state_representation_size])
# Use the specified parameters if any, or initialize the network with random
# weights.
if params_avg_network is None:
self._params_avg_network = self.avg_network.init(rng, x)
else:
self._params_avg_network = jax.tree_map(lambda x: x.copy(),
params_avg_network)
self._params_avg_network = jax.device_put(self._params_avg_network)
if optimizer_str == 'adam':
optimizer = optax.adam(learning_rate)
elif optimizer_str == 'sgd':
optimizer = optax.sgd(learning_rate)
else:
raise ValueError('Not implemented, choose from "adam" and "sgd".')
if gradient_clipping:
optimizer = optax.chain(optimizer,
optax.clip_by_global_norm(gradient_clipping))
opt_init, opt_update = optimizer.init, optimizer.update
def opt_update_fn(params, opt_state, gradient):
"""Learning rule (stochastic gradient descent)."""
updates, opt_state = opt_update(gradient, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
self._opt_update_fn = opt_update_fn
self._opt_state = opt_init(self._params_avg_network)
self._loss_and_grad = jax.value_and_grad(self._loss_avg, has_aux=False)
self._jit_update = jax.jit(self._get_update_fn())
def _get_update_fn(self):
"""Returns the function that updates the parameters."""
def update(param_avg, opt_state_avg, info_states, action_probs):
loss_val, grad_val = self._loss_and_grad(param_avg, info_states,
action_probs)
new_param_avg, new_opt_state_avg = self._opt_update_fn(
param_avg, opt_state_avg, grad_val)
return new_param_avg, new_opt_state_avg, loss_val
return update
def _act(self, info_state, legal_actions) -> Tuple[int, np.ndarray]:
"""Returns an action and the action probabilities."""
info_state = np.reshape(info_state, [1, -1])
action_probs = self._avg_network_policy(self._params_avg_network,
info_state)
# Remove illegal actions and normalize probs
probs = np.zeros(self._num_actions)
action_probs = np.asarray(action_probs)
probs[legal_actions] = action_probs[0][legal_actions]
probs /= sum(probs)
action = np.random.choice(len(probs), p=probs)
return action, probs
@property
def loss(self) -> Optional[float]:
"""Return the latest loss."""
return self._last_loss_value
def step(self,
time_step: rl_environment.TimeStep,
is_evaluation: bool = True) -> Optional[rl_agent.StepOutput]:
"""Returns the action to be taken by following the average network policy.
Note that unlike most other algorithms, this method doesn't train the agent.
Instead, we add new samples to the reservoir buffer and the training happens
at a later stage.
Args:
time_step: an instance of rl_environment.TimeStep.
is_evaluation: bool, whether this is a training or evaluation call.
Returns:
A `rl_agent.StepOutput` containing the action probs and chosen action.
"""
# Prepare for the next episode.
if time_step.last():
return
if is_evaluation:
# Use the average policy network.
info_state = time_step.observations['info_state'][self._player_id]
legal_actions = time_step.observations['legal_actions'][self._player_id]
action, probs = self._act(info_state, legal_actions)
return rl_agent.StepOutput(action=action, probs=probs)
# Use the best response agent and add the transition in the reservoir
# buffer.
br_agent_output = self._br_rl_agent.step(time_step, is_evaluation=True)
self._add_transition(time_step, br_agent_output)
return br_agent_output
def _add_transition(self, time_step, agent_output):
"""Adds the new transition using `time_step` to the reservoir buffer.
Transitions are in the form (time_step, agent_output.probs, legal_mask).
Args:
time_step: an instance of rl_environment.TimeStep.
agent_output: an instance of rl_agent.StepOutput.
"""
legal_actions = time_step.observations['legal_actions'][self._player_id]
legal_actions_mask = np.zeros(self._num_actions)
legal_actions_mask[legal_actions] = 1.0
transition = Transition(
info_state=(time_step.observations['info_state'][self._player_id][:]),
action_probs=agent_output.probs,
legal_actions_mask=legal_actions_mask)
self._reservoir_buffer.add(transition)
def _loss_avg(self, param_avg, info_states, action_probs):
avg_logit = self.avg_network.apply(param_avg, info_states)
loss_value = -jnp.sum(
action_probs * jax.nn.log_softmax(avg_logit)) / avg_logit.shape[0]
return loss_value
def learn(self) -> Optional[float]:
"""Compute the loss on sampled transitions and perform a avg-network update.
If there are not enough elements in the buffer, no loss is computed and
`None` is returned instead.
Returns:
The average loss obtained on this batch of transitions or `None`.
"""
if (len(self._reservoir_buffer) < self._batch_size or
len(self._reservoir_buffer) < self._min_buffer_size_to_learn):
return None
transitions = self._reservoir_buffer.sample(self._batch_size)
info_states = np.asarray([t.info_state for t in transitions])
action_probs = np.asarray([t.action_probs for t in transitions])
self._params_avg_network, self._opt_state, loss_val_avg = self._jit_update(
self._params_avg_network, self._opt_state, info_states, action_probs)
self._last_loss_value = float(loss_val_avg)
return loss_val_avg
class AverageNetworkFictitiousPlay(object):
"""Deep Average-network Fictitious Play.
See the file description for more information.
"""
def __init__(self,
game: pyspiel.Game,
envs: Sequence[rl_environment.Environment],
br_rl_agents: Sequence[rl_agent.AbstractAgent],
num_episodes_per_iteration: int,
num_training_steps_per_iteration: int,
eval_every: int = 200,
logging_fn: Optional[Callable[[int, int, Dict[str, Any]],
None]] = None,
**kwargs):
"""Initializes the greedy policy.
Args:
game: The game to analyze.
envs: RL environment for each player.
br_rl_agents: Best response, e.g. DQN, agents for each player.
num_episodes_per_iteration: Number of episodes to collect samples that are
added to the reservoir buffer.
num_training_steps_per_iteration: Number of steps to train the average
policy in each iteration.
eval_every: Number of training steps between two evaluations.
logging_fn: Callable for logging the metrics. The arguments will be the
current iteration, episode and a dictionary of metrics to log.
**kwargs: kwargs passed to the AveragePolicy() constructor.
"""
self._game = game
self._envs = envs
self._num_episodes_per_iteration = num_episodes_per_iteration
self._num_training_steps_per_iteration = num_training_steps_per_iteration
self._eval_every = eval_every
self._logging_fn = logging_fn
self._num_players = game.num_players()
self._fp_iteration = 0
env = self._envs[0]
info_state_size = env.observation_spec()['info_state'][0]
num_actions = env.action_spec()['num_actions']
self._avg_rl_agents = [
AveragePolicy(p, br_rl_agents[p], info_state_size, num_actions,
**kwargs) for p in range(self._num_players)
]
self._policy = rl_agent_policy.JointRLAgentPolicy(
self._game,
{idx: agent for idx, agent in enumerate(self._avg_rl_agents)},
use_observation=env.use_observation)
self._update_distribution()
def _update_distribution(self):
"""Calculates the current distribution and updates the environments."""
self._distribution = distribution.DistributionPolicy(
self._game, self._policy)
for env in self._envs:
env.update_mfg_distribution(self._distribution)
@property
def policy(self) -> rl_agent_policy.JointRLAgentPolicy:
return self._policy
def iteration(self):
"""An average-network fictitious play step."""
# Generate samples using latest best-response and add them to the reservoir
# buffer. Note that the algorithm is agnostic to the best-response policies
# as we only use them to collect new samples. They can be approximate (e.g.
# backed by a deep algorithm) or exact.
training.run_episodes(
self._envs,
self._avg_rl_agents,
num_episodes=self._num_episodes_per_iteration,
is_evaluation=False)
# Train the average policy.
for step in range(self._num_training_steps_per_iteration):
for avg_rl_agent in self._avg_rl_agents:
avg_rl_agent.learn()
if self._logging_fn and (step + 1) % self._eval_every == 0:
self._logging_fn(
self._fp_iteration, step, {
f'avg_agent{i}/loss': float(agent.loss)
for i, agent in enumerate(self._avg_rl_agents)
})
# Update the distribution.
self._update_distribution()
self._fp_iteration += 1
| open_spiel-master | open_spiel/python/mfg/algorithms/average_network_fictitious_play.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Munchausen deep online mirror descent."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import munchausen_deep_mirror_descent
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.games import crowd_modelling # pylint: disable=unused-import
import pyspiel
class DeepOnlineMirrorDescentTest(parameterized.TestCase):
@parameterized.named_parameters(('cpp', 'mfg_crowd_modelling'),
('python', 'python_mfg_crowd_modelling'))
def test_train(self, name):
"""Checks that the training works."""
game = pyspiel.load_game(name)
assert game.num_players() == 1
uniform_policy = policy.UniformRandomPolicy(game)
uniform_dist = distribution.DistributionPolicy(game, uniform_policy)
env = rl_environment.Environment(
game, mfg_distribution=uniform_dist, mfg_population=0)
info_state_size = env.observation_spec()['info_state'][0]
num_actions = env.action_spec()['num_actions']
np.random.seed(0)
args = {
'alpha': 0.9,
'batch_size': 128,
'discount_factor': 1.0,
'epsilon_decay_duration': 20000000,
'epsilon_end': 0.1,
'epsilon_start': 0.1,
'gradient_clipping': 40,
'hidden_layers_sizes': [128, 128],
'learn_every': 64,
'learning_rate': 0.01,
'loss': 'mse',
'min_buffer_size_to_learn': 500,
'optimizer': 'adam',
'replay_buffer_capacity': 2000,
'tau': 10,
'update_target_network_every': 50
}
agent = munchausen_deep_mirror_descent.MunchausenDQN(
0, info_state_size, num_actions, **args)
md = munchausen_deep_mirror_descent.DeepOnlineMirrorDescent(
game, [env], [agent], num_episodes_per_iteration=100)
for _ in range(10):
md.iteration()
nash_conv_md = nash_conv.NashConv(game, md.policy)
self.assertLessEqual(nash_conv_md.nash_conv(), 3)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/mfg/algorithms/munchausen_deep_mirror_descent_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Nash Conv metric for a policy.
In the context of mean field games, the Nash Conv is the difference between:
- the value of a policy against the distribution of that policy,
- and the best response against the distribution of the policy.
"""
from open_spiel.python import policy as policy_std
from open_spiel.python.mfg import value
from open_spiel.python.mfg.algorithms import best_response_value
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import policy_value
class NashConv(object):
"""Computes the Nash Conv of a policy."""
def __init__(self, game, policy: policy_std.Policy, root_state=None):
"""Initializes the nash conv.
Args:
game: The game to analyze.
policy: A `policy.Policy` object.
root_state: The state of the game at which to start. If `None`, the game
root state is used.
"""
self._game = game
self._policy = policy
if root_state is None:
self._root_states = game.new_initial_states()
else:
self._root_states = [root_state]
self._distrib = distribution.DistributionPolicy(
self._game, self._policy, root_state=root_state)
self._pi_value = policy_value.PolicyValue(
self._game,
self._distrib,
self._policy,
value.TabularValueFunction(self._game),
root_state=root_state)
self._br_value = best_response_value.BestResponse(
self._game,
self._distrib,
value.TabularValueFunction(self._game),
root_state=root_state)
def nash_conv(self):
"""Returns the nash conv.
Returns:
A float representing the nash conv for the policy.
"""
return sum([
self._br_value.eval_state(state) - self._pi_value.eval_state(state)
for state in self._root_states
])
def br_values(self):
"""Returns the best response values to the policy distribution.
Returns:
A List[float] representing the best response values for a policy
distribution.
"""
return [self._br_value.eval_state(state) for state in self._root_states]
@property
def distribution(self) -> distribution.DistributionPolicy:
return self._distrib
| open_spiel-master | open_spiel/python/mfg/algorithms/nash_conv.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Munchausen Online Mirror Descent."""
from typing import Dict, List, Optional
import numpy as np
from open_spiel.python import policy as policy_lib
from open_spiel.python.mfg import value
from open_spiel.python.mfg.algorithms import mirror_descent
import pyspiel
class ProjectedPolicyMunchausen(mirror_descent.ProjectedPolicy):
"""Project values on the policy simplex."""
def __init__(
self,
game: pyspiel.Game,
player_ids: List[int],
state_value: value.ValueFunction,
learning_rate: float,
policy: policy_lib.Policy,
):
"""Initializes the projected policy.
Args:
game: The game to analyze.
player_ids: list of player ids for which this policy applies; each should
be in the range 0..game.num_players()-1.
state_value: The state value to project.
learning_rate: The learning rate.
policy: The policy to project.
"""
super().__init__(game, player_ids, state_value)
self._learning_rate = learning_rate
self._policy = policy
def action_probabilities(self,
state: pyspiel.State,
player_id: Optional[int] = None) -> Dict[int, float]:
del player_id
action_logit = [
(a, self._learning_rate * self.value(state, action=a) + np.log(p))
for a, p in self._policy.action_probabilities(state).items()
]
action, logit = zip(*action_logit)
return dict(zip(action, mirror_descent.softmax_projection(logit)))
class MunchausenMirrorDescent(mirror_descent.MirrorDescent):
"""Munchausen Online Mirror Descent algorithm.
This algorithm is equivalent to the online mirror descent algorithm but
instead of summing value functions, it directly computes the cumulative
Q-function using a penalty with respect to the previous policy.
"""
def eval_state(self, state: pyspiel.State, learning_rate: float):
"""Evaluate the value of a state."""
state_str = state.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID)
# Return the already calculated value if present.
if self._state_value.has(state_str):
return self._state_value(state_str)
# Otherwise, calculate the value of the state.
v = self.get_state_value(state, learning_rate)
self._state_value.set_value(state_str, v)
return v
def get_projected_policy(self) -> policy_lib.Policy:
"""Returns the projected policy."""
return ProjectedPolicyMunchausen(self._game,
list(range(self._game.num_players())),
self._state_value, self._lr, self._policy)
| open_spiel-master | open_spiel/python/mfg/algorithms/munchausen_mirror_descent.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Does a backward pass to output a value of a best response policy."""
from typing import Optional
from open_spiel.python.mfg import distribution as distribution_std
from open_spiel.python.mfg import value
import pyspiel
class BestResponse(value.ValueFunction):
"""Computes a best response value."""
def __init__(self,
game,
distribution: distribution_std.Distribution,
state_value: Optional[value.ValueFunction] = None,
root_state=None):
"""Initializes the best response calculation.
Args:
game: The game to analyze.
distribution: A `distribution_std.Distribution` object.
state_value: A state value function. Default to TabularValueFunction.
root_state: The state of the game at which to start. If `None`, the game
root state is used.
"""
super().__init__(game)
if root_state is None:
self._root_states = game.new_initial_states()
else:
self._root_states = [root_state]
self._distribution = distribution
self._state_value = (state_value if state_value
else value.TabularValueFunction(game))
self.evaluate()
def eval_state(self, state):
"""Evaluate the value of a state.
Args:
state: a game state.
Returns:
the optimal value of the state
Recursively computes the value of the optimal policy given the fixed state
distribution. `self._state_value` is used as a cache for pre-computed
values.
"""
state_str = state.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID)
if self._state_value.has(state_str):
return self._state_value(state_str)
if state.is_terminal():
self._state_value.set_value(
state_str,
state.rewards()[state.mean_field_population()])
return self._state_value(state_str)
if state.current_player() == pyspiel.PlayerId.CHANCE:
self._state_value.set_value(state_str, 0.0)
for action, prob in state.chance_outcomes():
new_state = state.child(action)
self._state_value.add_value(state_str,
prob * self.eval_state(new_state))
return self._state_value(state_str)
if state.current_player() == pyspiel.PlayerId.MEAN_FIELD:
dist = [
# We need to default to 0, because
# `state.distribution_support()` might contain states that
# we did not reach yet. These states should be given a
# probability of 0.
self._distribution.value_str(str_state, 0.)
for str_state in state.distribution_support()
]
new_state = state.clone()
new_state.update_distribution(dist)
self._state_value.set_value(
state_str,
state.rewards()[state.mean_field_population()] +
self.eval_state(new_state))
return self._state_value(state_str)
else:
assert int(state.current_player()) >= 0, "The player id should be >= 0"
max_q = max(
self.eval_state(state.child(action))
for action in state.legal_actions())
self._state_value.set_value(
state_str,
state.rewards()[state.mean_field_population()] + max_q)
return self._state_value(state_str)
def evaluate(self):
"""Evaluate the best response value on all states."""
for state in self._root_states:
self.eval_state(state)
def value(self, state, action=None):
if action is None:
return self._state_value(
state.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID))
new_state = state.child(action)
return state.rewards()[state.mean_field_population()] + self._state_value(
new_state.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID))
| open_spiel-master | open_spiel/python/mfg/algorithms/best_response_value.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for distribution."""
from absl.testing import absltest
from open_spiel.python import policy
from open_spiel.python.mfg import games # pylint: disable=unused-import
from open_spiel.python.mfg.algorithms import distribution
import pyspiel
class DistributionTest(absltest.TestCase):
def test_basic(self):
game = pyspiel.load_game("python_mfg_crowd_modelling")
uniform_policy = policy.UniformRandomPolicy(game)
dist = distribution.DistributionPolicy(game, uniform_policy)
state = game.new_initial_state().child(0)
self.assertAlmostEqual(dist.value(state), 1 / game.size)
def test_state_support_outside_distrib(self):
game = pyspiel.load_game("mfg_crowd_modelling_2d", {
"initial_distribution": "[0|0]",
"initial_distribution_value": "[1.]",
})
uniform_policy = policy.UniformRandomPolicy(game)
_ = distribution.DistributionPolicy(game, uniform_policy)
def test_multi_pop(self):
game = pyspiel.load_game("python_mfg_predator_prey")
self.assertEqual(game.num_players(), 3)
uniform_policy = policy.UniformRandomPolicy(game)
dist = distribution.DistributionPolicy(game, uniform_policy)
for pop in range(3):
self.assertAlmostEqual(
dist.value(game.new_initial_state_for_population(pop)), 1.)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/mfg/algorithms/distribution_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fictitious play."""
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python import policy
from open_spiel.python import rl_agent_policy
from open_spiel.python import rl_environment
from open_spiel.python.jax import dqn
from open_spiel.python.mfg import value
from open_spiel.python.mfg.algorithms import best_response_value
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import fictitious_play
from open_spiel.python.mfg.algorithms import greedy_policy
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.algorithms import policy_value
from open_spiel.python.mfg.games import crowd_modelling
import pyspiel
class FictitiousPlayTest(parameterized.TestCase):
@parameterized.named_parameters(("python", "python_mfg_crowd_modelling"),
("cpp", "mfg_crowd_modelling"))
def test_run(self, name: str):
"""Checks if fictitious play works."""
game = pyspiel.load_game(name)
fp = fictitious_play.FictitiousPlay(game)
for _ in range(10):
fp.iteration()
fp_policy = fp.get_policy()
nash_conv_fp = nash_conv.NashConv(game, fp_policy)
self.assertAlmostEqual(nash_conv_fp.nash_conv(), 0.991, places=3)
@parameterized.named_parameters(("at_init", True), ("at_each_step", False))
def test_learning_rate(self, at_init: bool):
"""Checks if learning rate works."""
game = crowd_modelling.MFGCrowdModellingGame()
lr = 1.0
fp = fictitious_play.FictitiousPlay(game, lr=lr if at_init else None)
for _ in range(10):
fp.iteration(learning_rate=None if at_init else lr)
fp_policy = fp.get_policy()
nash_conv_fp = nash_conv.NashConv(game, fp_policy)
self.assertAlmostEqual(nash_conv_fp.nash_conv(), 55.745, places=3)
def test_soft_max(self):
"""Checks if soft-max policy works."""
game = crowd_modelling.MFGCrowdModellingGame()
fp = fictitious_play.FictitiousPlay(game, temperature=1)
for _ in range(10):
fp.iteration()
fp_policy = fp.get_policy()
nash_conv_fp = nash_conv.NashConv(game, fp_policy)
self.assertAlmostEqual(nash_conv_fp.nash_conv(), 1.062, places=3)
@parameterized.named_parameters(("python", "python_mfg_crowd_modelling"),
("cpp", "mfg_crowd_modelling"))
def test_dqn(self, name):
"""Checks if fictitious play with DQN-based value function works."""
game = pyspiel.load_game(name)
dfp = fictitious_play.FictitiousPlay(game)
uniform_policy = policy.UniformRandomPolicy(game)
dist = distribution.DistributionPolicy(game, uniform_policy)
envs = [
rl_environment.Environment(
game, mfg_distribution=dist, mfg_population=p)
for p in range(game.num_players())
]
dqn_agent = dqn.DQN(
0,
state_representation_size=envs[0].observation_spec()["info_state"][0],
num_actions=envs[0].action_spec()["num_actions"],
hidden_layers_sizes=[256, 128, 64],
replay_buffer_capacity=100,
batch_size=5,
epsilon_start=0.02,
epsilon_end=0.01)
br_policy = rl_agent_policy.RLAgentPolicy(
game, dqn_agent, 0, use_observation=True)
for _ in range(10):
dfp.iteration(br_policy=br_policy)
dfp_policy = dfp.get_policy()
nash_conv_dfp = nash_conv.NashConv(game, dfp_policy)
self.assertAlmostEqual(nash_conv_dfp.nash_conv(), 1.056, places=3)
def test_average(self):
"""Test the average of policies.
Here we test that the average of values is the value of the average policy.
"""
game = crowd_modelling.MFGCrowdModellingGame()
uniform_policy = policy.UniformRandomPolicy(game)
mfg_dist = distribution.DistributionPolicy(game, uniform_policy)
br_value = best_response_value.BestResponse(
game, mfg_dist, value.TabularValueFunction(game))
py_value = policy_value.PolicyValue(game, mfg_dist, uniform_policy,
value.TabularValueFunction(game))
greedy_pi = greedy_policy.GreedyPolicy(game, None, br_value)
greedy_pi = greedy_pi.to_tabular()
merged_pi = fictitious_play.MergedPolicy(
game, list(range(game.num_players())), [uniform_policy, greedy_pi],
[mfg_dist, distribution.DistributionPolicy(game, greedy_pi)],
[0.5, 0.5])
merged_pi_value = policy_value.PolicyValue(game, mfg_dist, merged_pi,
value.TabularValueFunction(game))
self.assertAlmostEqual(
merged_pi_value(game.new_initial_state()),
(br_value(game.new_initial_state()) +
py_value(game.new_initial_state())) / 2)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/mfg/algorithms/fictitious_play_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fixed Point."""
from typing import Optional
from open_spiel.python import policy as policy_lib
from open_spiel.python.mfg import value
from open_spiel.python.mfg.algorithms import best_response_value
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import greedy_policy
from open_spiel.python.mfg.algorithms import softmax_policy
import pyspiel
class FixedPoint(object):
"""The fixed point algorithm.
This algorithm is based on Banach-Picard iterations for the fixed point
operator characterizing the Nash equilibrium. At each iteration, the policy is
updated by computing a best response against the current mean-field or a
regularized version that is obtained by taking a softmax with respect to the
optimal Q-function, and the mean-field is updated by taking the mean-field
induced by the current policy.
"""
def __init__(self, game: pyspiel.Game, temperature: Optional[float] = None):
"""Initializes the algorithm.
Args:
game: The game to analyze.
temperature: If set, then instead of the greedy policy a softmax policy
with the specified temperature will be used to update the policy at each
iteration.
"""
self._game = game
self._temperature = temperature
self._policy = policy_lib.UniformRandomPolicy(self._game)
self._distribution = distribution.DistributionPolicy(game, self._policy)
def iteration(self):
"""An itertion of Fixed Point."""
# Calculate the current distribution and the best response.
distrib = distribution.DistributionPolicy(self._game, self._policy)
br_value = best_response_value.BestResponse(
self._game, distrib, value.TabularValueFunction(self._game))
# Policy is either greedy or softmax with respect to the best response if
# temperature is specified.
player_ids = list(range(self._game.num_players()))
if self._temperature is None:
self._policy = greedy_policy.GreedyPolicy(self._game, player_ids,
br_value)
else:
self._policy = softmax_policy.SoftmaxPolicy(self._game, player_ids,
self._temperature, br_value)
self._distribution = distribution.DistributionPolicy(
self._game, self._policy)
def get_policy(self) -> policy_lib.Policy:
return self._policy
@property
def distribution(self) -> distribution.DistributionPolicy:
return self._distribution
| open_spiel-master | open_spiel/python/mfg/algorithms/fixed_point.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Boltzmann Policy Iteration."""
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python.mfg import value
from open_spiel.python.mfg.algorithms import boltzmann_policy_iteration
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.games import crowd_modelling # pylint: disable=unused-import
import pyspiel
class BoltzmannPolicyIterationTest(parameterized.TestCase):
@parameterized.named_parameters(('python', 'python_mfg_crowd_modelling'),
('cpp', 'mfg_crowd_modelling'))
def test_run(self, name):
"""Checks if the algorithm works."""
game = pyspiel.load_game(name)
bpi = boltzmann_policy_iteration.BoltzmannPolicyIteration(
game, value.TabularValueFunction(game))
for _ in range(10):
bpi.iteration()
bpi_policy = bpi.get_policy()
nash_conv_bpi = nash_conv.NashConv(game, bpi_policy)
self.assertAlmostEqual(nash_conv_bpi.nash_conv(), 2.75428, places=5)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/mfg/algorithms/boltzmann_policy_iteration_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Munchausen Online Mirror Descent."""
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python.mfg import value
from open_spiel.python.mfg.algorithms import munchausen_mirror_descent
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.games import crowd_modelling # pylint: disable=unused-import
import pyspiel
class MunchausenMirrorDescentTest(parameterized.TestCase):
@parameterized.named_parameters(('python', 'python_mfg_crowd_modelling'),
('cpp', 'mfg_crowd_modelling'))
def test_run(self, name):
"""Checks if the algorithm works."""
game = pyspiel.load_game(name)
md = munchausen_mirror_descent.MunchausenMirrorDescent(
game, value.TabularValueFunction(game))
for _ in range(10):
md.iteration()
md_policy = md.get_policy()
nash_conv_md = nash_conv.NashConv(game, md_policy)
self.assertAlmostEqual(nash_conv_md.nash_conv(), 2.27366, places=5)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/mfg/algorithms/munchausen_mirror_descent_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run deep online mirror descent algorithm with Munchausen DQN agents."""
import os
from typing import Sequence
from absl import flags
import jax
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.mfg import utils
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import munchausen_deep_mirror_descent
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.algorithms import policy_value
from open_spiel.python.mfg.games import factory
from open_spiel.python.utils import app
from open_spiel.python.utils import metrics
FLAGS = flags.FLAGS
flags.DEFINE_string("game_name", "mfg_crowd_modelling_2d", "Name of the game.")
_ENV_SETTING = flags.DEFINE_string(
"env_setting",
"crowd_modelling_2d_four_rooms",
"Name of the game settings. If None, the game name will be used.",
)
# Training options.
_BATCH_SIZE = flags.DEFINE_integer(
"batch_size", 128, "Number of transitions to sample at each learning step."
)
_LEARN_EVERY = flags.DEFINE_integer(
"learn_every", 64, "Number of steps between learning updates."
)
_NUM_EPISODES_PER_ITERATION = flags.DEFINE_integer(
"num_episodes_per_iteration",
1000,
"Number of training eepisodes for each iteration.",
)
flags.DEFINE_integer("num_iterations", 100, "Number of iterations.")
_EPSILON_DECAY_DURATION = flags.DEFINE_integer(
"epsilon_decay_duration",
100000,
"Number of game steps over which epsilon is decayed.",
)
flags.DEFINE_float("epsilon_power", 1, "Power for the epsilon decay.")
flags.DEFINE_float("epsilon_start", 0.1, "Starting exploration parameter.")
flags.DEFINE_float("epsilon_end", 0.1, "Final exploration parameter.")
_DISCOUNT_FACTOR = flags.DEFINE_float(
"discount_factor", 1.0, "Discount factor for future rewards."
)
_RESET_REPLAY_BUFFER_ON_UPDATE = flags.DEFINE_bool(
"reset_replay_buffer_on_update",
False,
"Reset the replay buffer when the softmax policy is updated.",
)
flags.DEFINE_integer("seed", 42, "Training seed.")
# Evaluation options.
_EVAL_EVERY = flags.DEFINE_integer(
"eval_every", 200, "Episode frequency at which the agents are evaluated."
)
# Network options.
_HIDDEN_LAYERS_SIZES = flags.DEFINE_list(
"hidden_layers_sizes",
["128", "128"],
"Number of hidden units in the avg-net and Q-net.",
)
_UPDATE_TARGET_NETWORK_EVERY = flags.DEFINE_integer(
"update_target_network_every",
200,
"Number of steps between DQN target network updates.",
)
# Replay buffer options.
_REPLAY_BUFFER_CAPACITY = flags.DEFINE_integer(
"replay_buffer_capacity", 40000, "Size of the replay buffer."
)
_MIN_BUFFER_SIZE_TO_LEARN = flags.DEFINE_integer(
"min_buffer_size_to_learn",
1000,
"Number of samples in buffer before learning begins.",
)
# Loss and optimizer options.
flags.DEFINE_enum("optimizer", "adam", ["sgd", "adam"], "Optimizer.")
flags.DEFINE_float("learning_rate", 0.01, "Learning rate for inner rl agent.")
flags.DEFINE_enum("loss", "mse", ["mse", "huber"], "Loss function.")
flags.DEFINE_float("huber_loss_parameter", 1.0, "Parameter for Huber loss.")
flags.DEFINE_float("gradient_clipping", None, "Value to clip the gradient to.")
# Munchausen options.
flags.DEFINE_float("tau", 10, "Temperature parameter in Munchausen target.")
flags.DEFINE_float("alpha", 0.99, "Alpha parameter in Munchausen target.")
_WITH_MUNCHAUSEN = flags.DEFINE_bool(
"with_munchausen", True, "If true, target uses Munchausen penalty terms."
)
# Logging options.
flags.DEFINE_bool("use_checkpoints", False, "Save/load neural network weights.")
_CHECKPOINT_DIR = flags.DEFINE_string(
"checkpoint_dir", "/tmp/dqn_test", "Directory to save/load the agent."
)
_LOGDIR = flags.DEFINE_string(
"logdir",
None,
"Logging dir to use for TF summary files. If None, the metrics will only "
"be logged to stderr.",
)
_LOG_DISTRIBUTION = flags.DEFINE_bool(
"log_distribution", False, "Enables logging of the distribution."
)
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
game = factory.create_game_with_setting(FLAGS.game_name, _ENV_SETTING.value)
num_players = game.num_players()
# Create the environments with uniform initial policy.
uniform_policy = policy.UniformRandomPolicy(game)
uniform_dist = distribution.DistributionPolicy(game, uniform_policy)
envs = [
rl_environment.Environment( # pylint: disable=g-complex-comprehension
game,
mfg_distribution=uniform_dist,
mfg_population=p,
observation_type=rl_environment.ObservationType.OBSERVATION,
)
for p in range(num_players)
]
env = envs[0]
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
# Create the agents.
kwargs = {
"alpha": FLAGS.alpha,
"batch_size": _BATCH_SIZE.value,
"discount_factor": _DISCOUNT_FACTOR.value,
"epsilon_decay_duration": _EPSILON_DECAY_DURATION.value,
"epsilon_end": FLAGS.epsilon_end,
"epsilon_power": FLAGS.epsilon_power,
"epsilon_start": FLAGS.epsilon_start,
"gradient_clipping": FLAGS.gradient_clipping,
"hidden_layers_sizes": [int(l) for l in _HIDDEN_LAYERS_SIZES.value],
"huber_loss_parameter": FLAGS.huber_loss_parameter,
"learn_every": _LEARN_EVERY.value,
"learning_rate": FLAGS.learning_rate,
"loss": FLAGS.loss,
"min_buffer_size_to_learn": _MIN_BUFFER_SIZE_TO_LEARN.value,
"optimizer": FLAGS.optimizer,
"replay_buffer_capacity": _REPLAY_BUFFER_CAPACITY.value,
"reset_replay_buffer_on_update": _RESET_REPLAY_BUFFER_ON_UPDATE.value,
"seed": FLAGS.seed,
"tau": FLAGS.tau,
"update_target_network_every": _UPDATE_TARGET_NETWORK_EVERY.value,
"with_munchausen": _WITH_MUNCHAUSEN.value,
}
agents = [
munchausen_deep_mirror_descent.MunchausenDQN(
p, info_state_size, num_actions, **kwargs
)
for p in range(num_players)
]
# Metrics writer will also log the metrics to stderr.
just_logging = _LOGDIR.value is None or jax.host_id() > 0
writer = metrics.create_default_writer(
logdir=_LOGDIR.value, just_logging=just_logging
)
# # Save the parameters.
writer.write_hparams(kwargs)
def logging_fn(it, episode, vals):
writer.write_scalars(it * num_episodes_per_iteration + episode, vals)
num_episodes_per_iteration = _NUM_EPISODES_PER_ITERATION.value
md = munchausen_deep_mirror_descent.DeepOnlineMirrorDescent(
game,
envs,
agents,
eval_every=_EVAL_EVERY.value,
num_episodes_per_iteration=num_episodes_per_iteration,
logging_fn=logging_fn,
)
def log_metrics(it):
"""Logs the training metrics for each iteration."""
initial_states = game.new_initial_states()
pi_value = policy_value.PolicyValue(game, md.distribution, md.policy)
m = {
f"best_response/{state}": pi_value.eval_state(state)
for state in initial_states
}
nash_conv_md = nash_conv.NashConv(game, md.policy).nash_conv()
m["nash_conv_md"] = nash_conv_md
if _LOG_DISTRIBUTION.value and _LOGDIR.value:
# We log distribution directly to a Pickle file as it may be large for
# logging as a metric.
filename = os.path.join(_LOGDIR.value, f"distribution_{it}.pkl")
utils.save_parametric_distribution(md.distribution, filename)
logging_fn(it, 0, m)
log_metrics(0)
for it in range(1, FLAGS.num_iterations + 1):
md.iteration()
log_metrics(it)
# Make sure all values were written.
writer.flush()
if __name__ == "__main__":
jax.config.parse_flags_with_absl()
app.run(main)
| open_spiel-master | open_spiel/python/mfg/examples/mfg_munchausen_domd_jax.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs Deep Average-network Fictitious Play with DQN agents."""
import os
from typing import Sequence
from absl import flags
import jax
from open_spiel.python import policy as policy_std
from open_spiel.python import rl_environment
from open_spiel.python.jax import dqn
from open_spiel.python.mfg import utils
from open_spiel.python.mfg.algorithms import average_network_fictitious_play
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.algorithms import policy_value
from open_spiel.python.mfg.games import factory
from open_spiel.python.utils import app
from open_spiel.python.utils import metrics
from open_spiel.python.utils import training
_GAME_NAME = flags.DEFINE_string('game_name', 'mfg_crowd_modelling_2d',
'Name of the game.')
_ENV_SETTING = flags.DEFINE_string(
'env_setting', 'mfg_crowd_modelling_2d',
'Name of the game settings. If None, the game name will be used.')
_LOGDIR = flags.DEFINE_string(
'logdir', None,
'Logging dir to use for TF summary files. If None, the metrics will only '
'be logged to stderr.')
_LOG_DISTRIBUTION = flags.DEFINE_bool('log_distribution', False,
'Enables logging of the distribution.')
_NUM_ITERATIONS = flags.DEFINE_integer('num_iterations', 100,
'Number of iterations.')
_EVAL_EVERY = flags.DEFINE_integer(
'eval_every', 200, 'Episode frequency at which the agents are evaluated.')
# Flags for best response RL (DQN) agent.
# Training options.
_BATCH_SIZE = flags.DEFINE_integer(
'batch_size', 128, 'Number of transitions to sample at each learning step.')
_LEARN_EVERY = flags.DEFINE_integer(
'learn_every', 40, 'Number of steps between learning updates.')
_NUM_DQN_EPISODES_PER_ITERATION = flags.DEFINE_integer(
'num_dqn_episodes_per_iteration', 3000,
'Number of DQN training episodes for each iteration.')
_EPSILON_DECAY_DURATION = flags.DEFINE_integer(
'epsilon_decay_duration', int(20e6),
'Number of game steps over which epsilon is decayed.')
_EPSILON_START = flags.DEFINE_float('epsilon_start', 0.1,
'Starting exploration parameter.')
_EPSILON_END = flags.DEFINE_float('epsilon_end', 0.1,
'Final exploration parameter.')
_DISCOUNT_FACTOR = flags.DEFINE_float('discount_factor', 1.0,
'Discount factor for future rewards.')
_SEED = flags.DEFINE_integer('seed', 42, 'Training seed.')
# Network options.
_HIDDEN_LAYERS_SIZES = flags.DEFINE_list(
'hidden_layers_sizes', ['128', '128'],
'Number of hidden units in the Q-net.')
_UPDATE_TARGET_NETWORK_EVERY = flags.DEFINE_integer(
'update_target_network_every', 200,
'Number of steps between DQN target network updates.')
# Replay buffer options.
_REPLAY_BUFFER_CAPACITY = flags.DEFINE_integer('replay_buffer_capacity', 5000,
'Size of the replay buffer.')
_MIN_BUFFER_SIZE_TO_LEARN = flags.DEFINE_integer(
'min_buffer_size_to_learn', 200,
'Number of samples in buffer before learning begins.')
# Loss and optimizer options.
_OPTIMIZER = flags.DEFINE_enum('optimizer', 'adam', ['sgd', 'adam'],
'Optimizer.')
_LEARNING_RATE = flags.DEFINE_float('learning_rate', 0.001,
'Learning rate for inner rl agent.')
_LOSS = flags.DEFINE_enum('loss', 'mse', ['mse', 'huber'], 'Loss function.')
_HUBER_LOSS_PARAMETER = flags.DEFINE_float('huber_loss_parameter', 1.0,
'Parameter for Huber loss.')
_GRADIENT_CLIPPING = flags.DEFINE_float('gradient_clipping', 40,
'Value to clip the gradient to.')
# Flags for average policy RL agent.
# Training options.
_AVG_POL_BATCH_SIZE = flags.DEFINE_integer(
'avg_pol_batch_size', 128,
'Number of transitions to sample at each learning step.')
_AVG_POL_NUM_TRAINING_STEPS_PER_ITERATION = flags.DEFINE_integer(
'avg_pol_num_training_steps_per_iteration', 2000,
'Number of steps for average policy at each FP iteration.')
_AVG_POL_NUM_EPISODES_PER_ITERATION = flags.DEFINE_integer(
'avg_pol_num_episodes_per_iteration', 100,
'Number of samples to store at each FP iteration.')
# Network options.
_AVG_POL_HIDDEN_LAYERS_SIZES = flags.DEFINE_list(
'avg_pol_hidden_layers_sizes', ['128', '128'],
'Number of hidden units in the avg-net and Q-net.')
# Reservoir buffer options.
_AVG_POL_RESERVOIR_BUFFER_CAPACITY = flags.DEFINE_integer(
'avg_pol_reservoir_buffer_capacity', 100000000,
'Size of the reservoir buffer.')
_AVG_POL_MIN_BUFFER_SIZE_TO_LEARN = flags.DEFINE_integer(
'avg_pol_min_buffer_size_to_learn', 100,
'Number of samples in buffer before learning begins.')
# Loss and optimizer options.
_AVG_POL_OPTIMIZER = flags.DEFINE_enum('avg_pol_optimizer', 'sgd',
['sgd', 'adam'], 'Optimizer.')
_AVG_POL_LEARNING_RATE = flags.DEFINE_float(
'avg_pol_learning_rate', 0.01, 'Learning rate for inner rl agent.')
_AVG_GRADIENT_CLIPPING = flags.DEFINE_float('avg_gradient_clipping', 100,
'Value to clip the gradient to.')
_AVG_POL_TAU = flags.DEFINE_float('avg_pol_tau', 10.0,
'Temperature for softmax in policy.')
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
game = factory.create_game_with_setting(_GAME_NAME.value, _ENV_SETTING.value)
num_players = game.num_players()
# Create the environments with uniform initial policy.
uniform_policy = policy_std.UniformRandomPolicy(game)
uniform_dist = distribution.DistributionPolicy(game, uniform_policy)
envs = [
rl_environment.Environment(
game, mfg_distribution=uniform_dist, mfg_population=p)
for p in range(num_players)
]
env = envs[0]
info_state_size = env.observation_spec()['info_state'][0]
num_actions = env.action_spec()['num_actions']
# Best response policy agents.
kwargs_dqn = {
'batch_size': _BATCH_SIZE.value,
'discount_factor': _DISCOUNT_FACTOR.value,
'epsilon_decay_duration': _EPSILON_DECAY_DURATION.value,
'epsilon_end': _EPSILON_END.value,
'epsilon_start': _EPSILON_START.value,
'gradient_clipping': _GRADIENT_CLIPPING.value,
'hidden_layers_sizes': [int(l) for l in _HIDDEN_LAYERS_SIZES.value],
'huber_loss_parameter': _HUBER_LOSS_PARAMETER.value,
'learn_every': _LEARN_EVERY.value,
'learning_rate': _LEARNING_RATE.value,
'loss_str': _LOSS.value,
'min_buffer_size_to_learn': _MIN_BUFFER_SIZE_TO_LEARN.value,
'optimizer_str': _OPTIMIZER.value,
'replay_buffer_capacity': _REPLAY_BUFFER_CAPACITY.value,
'seed': _SEED.value,
'update_target_network_every': _UPDATE_TARGET_NETWORK_EVERY.value,
}
br_rl_agents = [
dqn.DQN(p, info_state_size, num_actions, **kwargs_dqn)
for p in range(num_players)
]
num_training_steps_per_iteration = (
_AVG_POL_NUM_TRAINING_STEPS_PER_ITERATION.value)
# Metrics writer will also log the metrics to stderr.
just_logging = _LOGDIR.value is None or jax.host_id() > 0
writer = metrics.create_default_writer(
_LOGDIR.value, just_logging=just_logging)
def logging_fn(it, step, vals):
writer.write_scalars(it * num_training_steps_per_iteration + step, vals)
# Average policy agents.
kwargs_avg = {
'batch_size': _AVG_POL_BATCH_SIZE.value,
'hidden_layers_sizes': [
int(l) for l in _AVG_POL_HIDDEN_LAYERS_SIZES.value
],
'reservoir_buffer_capacity': _AVG_POL_RESERVOIR_BUFFER_CAPACITY.value,
'learning_rate': _AVG_POL_LEARNING_RATE.value,
'min_buffer_size_to_learn': _AVG_POL_MIN_BUFFER_SIZE_TO_LEARN.value,
'optimizer_str': _AVG_POL_OPTIMIZER.value,
'gradient_clipping': _AVG_GRADIENT_CLIPPING.value,
'seed': _SEED.value,
'tau': _AVG_POL_TAU.value
}
fp = average_network_fictitious_play.AverageNetworkFictitiousPlay(
game,
envs,
br_rl_agents,
_AVG_POL_NUM_EPISODES_PER_ITERATION.value,
num_training_steps_per_iteration,
eval_every=_EVAL_EVERY.value,
logging_fn=logging_fn,
**kwargs_avg)
def log_metrics(it):
"""Logs the training metrics for each iteration."""
initial_states = game.new_initial_states()
distrib = distribution.DistributionPolicy(game, fp.policy)
pi_value = policy_value.PolicyValue(game, distrib, fp.policy)
m = {
f'best_response/{state}': pi_value.eval_state(state)
for state in initial_states
}
m.update({
f'br_agent{i}/loss': agent.loss for i, agent in enumerate(br_rl_agents)
})
nash_conv_fp = nash_conv.NashConv(game, fp.policy)
m['nash_conv_fp'] = nash_conv_fp.nash_conv()
logging_fn(it, 0, m)
# Also save the distribution.
if _LOG_DISTRIBUTION.value and not just_logging:
filename = os.path.join(_LOGDIR.value, f'distribution_{it}.pkl')
utils.save_parametric_distribution(nash_conv_fp.distribution, filename)
for it in range(_NUM_ITERATIONS.value):
# Train the RL agent to learn a best response.
training.run_episodes(
envs,
br_rl_agents,
num_episodes=_NUM_DQN_EPISODES_PER_ITERATION.value,
is_evaluation=False)
# Run an iteration of average-network fictitious play and log the metrics.
fp.iteration()
log_metrics(it + 1)
# Make sure all values were written.
writer.flush()
if __name__ == '__main__':
jax.config.parse_flags_with_absl()
app.run(main)
| open_spiel-master | open_spiel/python/mfg/examples/mfg_average_network_fp_jax.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN agents trained on an MFG against a crowd following a uniform policy."""
from absl import flags
import jax
from open_spiel.python import policy
from open_spiel.python import rl_agent_policy
from open_spiel.python import rl_environment
from open_spiel.python.jax import dqn
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.algorithms import policy_value
from open_spiel.python.mfg.games import factory
from open_spiel.python.utils import app
from open_spiel.python.utils import metrics
FLAGS = flags.FLAGS
flags.DEFINE_string("game_name", "python_mfg_predator_prey",
"Name of the game.")
flags.DEFINE_string(
"env_setting", None,
"Name of the game settings. If None, the game name will be used.")
flags.DEFINE_integer("num_train_episodes", int(20e6),
"Number of training episodes.")
flags.DEFINE_integer("eval_every", 10000,
"Episode frequency at which the agents are evaluated.")
flags.DEFINE_list("hidden_layers_sizes", [
128,
128,
], "Number of hidden units in the avg-net and Q-net.")
flags.DEFINE_integer("replay_buffer_capacity", int(2e5),
"Size of the replay buffer.")
flags.DEFINE_integer("min_buffer_size_to_learn", 1000,
"Number of samples in buffer before learning begins.")
flags.DEFINE_integer("batch_size", 128,
"Number of transitions to sample at each learning step.")
flags.DEFINE_integer("learn_every", 64,
"Number of steps between learning updates.")
flags.DEFINE_float("rl_learning_rate", 0.01,
"Learning rate for inner rl agent.")
flags.DEFINE_string("optimizer_str", "sgd",
"Optimizer, choose from 'adam', 'sgd'.")
flags.DEFINE_string("loss_str", "mse",
"Loss function, choose from 'mse', 'huber'.")
flags.DEFINE_integer("update_target_network_every", 19200,
"Number of steps between DQN target network updates.")
flags.DEFINE_float("discount_factor", 1.0,
"Discount factor for future rewards.")
flags.DEFINE_integer("epsilon_decay_duration", int(20e6),
"Number of game steps over which epsilon is decayed.")
flags.DEFINE_float("epsilon_start", 0.1, "Starting exploration parameter.")
flags.DEFINE_float("epsilon_end", 0.1, "Final exploration parameter.")
flags.DEFINE_bool("use_checkpoints", False, "Save/load neural network weights.")
flags.DEFINE_string("checkpoint_dir", "/tmp/dqn_test",
"Directory to save/load the agent.")
flags.DEFINE_string(
"logdir", None,
"Logging dir to use for TF summary files. If None, the metrics will only "
"be logged to stderr.")
def main(unused_argv):
game = factory.create_game_with_setting(FLAGS.game_name, FLAGS.env_setting)
uniform_policy = policy.UniformRandomPolicy(game)
mfg_dist = distribution.DistributionPolicy(game, uniform_policy)
envs = [
rl_environment.Environment(
game, mfg_distribution=mfg_dist, mfg_population=p)
for p in range(game.num_players())
]
info_state_size = envs[0].observation_spec()["info_state"][0]
num_actions = envs[0].action_spec()["num_actions"]
hidden_layers_sizes = [int(l) for l in FLAGS.hidden_layers_sizes]
kwargs = {
"replay_buffer_capacity": FLAGS.replay_buffer_capacity,
"min_buffer_size_to_learn": FLAGS.min_buffer_size_to_learn,
"batch_size": FLAGS.batch_size,
"learn_every": FLAGS.learn_every,
"learning_rate": FLAGS.rl_learning_rate,
"optimizer_str": FLAGS.optimizer_str,
"loss_str": FLAGS.loss_str,
"update_target_network_every": FLAGS.update_target_network_every,
"discount_factor": FLAGS.discount_factor,
"epsilon_decay_duration": FLAGS.epsilon_decay_duration,
"epsilon_start": FLAGS.epsilon_start,
"epsilon_end": FLAGS.epsilon_end,
}
# pylint: disable=g-complex-comprehension
agents = [
dqn.DQN(idx, info_state_size, num_actions, hidden_layers_sizes, **kwargs)
for idx in range(game.num_players())
]
joint_avg_policy = rl_agent_policy.JointRLAgentPolicy(
game, {idx: agent for idx, agent in enumerate(agents)},
envs[0].use_observation)
if FLAGS.use_checkpoints:
for agent in agents:
if agent.has_checkpoint(FLAGS.checkpoint_dir):
agent.restore(FLAGS.checkpoint_dir)
# Metrics writer will also log the metrics to stderr.
just_logging = FLAGS.logdir is None or jax.host_id() > 0
writer = metrics.create_default_writer(
logdir=FLAGS.logdir, just_logging=just_logging)
# Save the parameters.
writer.write_hparams(kwargs)
for ep in range(1, FLAGS.num_train_episodes + 1):
if ep % FLAGS.eval_every == 0:
writer.write_scalars(ep, {
f"agent{i}/loss": float(agent.loss) for i, agent in enumerate(agents)
})
initial_states = game.new_initial_states()
# Exact best response to uniform.
nash_conv_obj = nash_conv.NashConv(game, uniform_policy)
writer.write_scalars(
ep, {
f"exact_br/{state}": value
for state, value in zip(initial_states, nash_conv_obj.br_values())
})
# DQN best response to uniform.
pi_value = policy_value.PolicyValue(game, mfg_dist, joint_avg_policy)
writer.write_scalars(ep, {
f"dqn_br/{state}": pi_value.eval_state(state)
for state in initial_states
})
if FLAGS.use_checkpoints:
for agent in agents:
agent.save(FLAGS.checkpoint_dir)
for p in range(game.num_players()):
time_step = envs[p].reset()
while not time_step.last():
agent_output = agents[p].step(time_step)
action_list = [agent_output.action]
time_step = envs[p].step(action_list)
# Episode is over, step all agents with final info state.
agents[p].step(time_step)
# Make sure all values were written.
writer.flush()
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/mfg/examples/mfg_dqn_jax.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fictitious play on an MFG game."""
import os
from typing import Sequence
from absl import flags
from open_spiel.python.mfg import utils
from open_spiel.python.mfg.algorithms import fictitious_play
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.games import factory
from open_spiel.python.utils import app
from open_spiel.python.utils import metrics
FLAGS = flags.FLAGS
flags.DEFINE_string('game_name', 'mfg_crowd_modelling_2d', 'Name of the game.')
flags.DEFINE_string(
'setting', None,
'Name of the game settings. If None, the game name will be used.')
flags.DEFINE_integer('num_iterations', 100,
'Number of fictitious play iterations.')
flags.DEFINE_float('learning_rate', None,
'Learning rate. If not, it will be set to 1/iteration.')
_LOGDIR = flags.DEFINE_string(
'logdir', None,
'Logging dir to use for TF summary files. If None, the metrics will only '
'be logged to stderr.')
_LOG_DISTRIBUTION = flags.DEFINE_bool('log_distribution', False,
'Enables logging of the distribution.')
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
game = factory.create_game_with_setting(FLAGS.game_name, FLAGS.setting)
# Metrics writer will also log the metrics to stderr.
just_logging = _LOGDIR.value is None
writer = metrics.create_default_writer(
logdir=_LOGDIR.value, just_logging=just_logging)
# Save the parameters.
learning_rate = FLAGS.learning_rate
writer.write_hparams({'learning_rate': learning_rate})
fp = fictitious_play.FictitiousPlay(game)
for it in range(FLAGS.num_iterations):
fp.iteration(learning_rate=learning_rate)
fp_policy = fp.get_policy()
nash_conv_fp = nash_conv.NashConv(game, fp_policy)
exploitability = nash_conv_fp.nash_conv()
writer.write_scalars(it, {'exploitability': exploitability})
if _LOG_DISTRIBUTION.value and not just_logging:
filename = os.path.join(_LOGDIR.value, f'distribution_{it}.pkl')
utils.save_parametric_distribution(nash_conv_fp.distribution, filename)
writer.flush()
if __name__ == '__main__':
app.run(main)
| open_spiel-master | open_spiel/python/mfg/examples/mfg_fictitious_play.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mirror descent on an MFG game."""
import os
from typing import Sequence
from absl import flags
from open_spiel.python.mfg import utils
from open_spiel.python.mfg.algorithms import mirror_descent
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.games import factory
from open_spiel.python.utils import app
from open_spiel.python.utils import metrics
FLAGS = flags.FLAGS
_GAME_NAME = flags.DEFINE_string('game_name', 'mfg_crowd_modelling_2d',
'Name of the game.')
_SETTING = flags.DEFINE_string(
'setting', None,
'Name of the game settings. If None, the game name will be used.')
_NUM_ITERATIONS = flags.DEFINE_integer('num_iterations', 100,
'Number of mirror descent iterations.')
_LEARNING_RATE = flags.DEFINE_float('learning_rate', 0.01, 'Learning rate.')
_LOGDIR = flags.DEFINE_string(
'logdir', None,
'Logging dir to use for TF summary files. If None, the metrics will only '
'be logged to stderr.')
_LOG_DISTRIBUTION = flags.DEFINE_bool('log_distribution', False,
'Enables logging of the distribution.')
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
game = factory.create_game_with_setting(_GAME_NAME.value, _SETTING.value)
# Metrics writer will also log the metrics to stderr.
just_logging = _LOGDIR.value is None
writer = metrics.create_default_writer(
logdir=_LOGDIR.value, just_logging=just_logging)
# Save the parameters.
learning_rate = _LEARNING_RATE.value
writer.write_hparams({'learning_rate': learning_rate})
md = mirror_descent.MirrorDescent(game, lr=learning_rate)
for it in range(_NUM_ITERATIONS.value):
md.iteration()
md_policy = md.get_policy()
exploitability = nash_conv.NashConv(game, md_policy).nash_conv()
writer.write_scalars(it, {'exploitability': exploitability})
if _LOG_DISTRIBUTION.value and not just_logging:
filename = os.path.join(_LOGDIR.value, f'distribution_{it}.pkl')
utils.save_parametric_distribution(md.distribution, filename)
writer.flush()
if __name__ == '__main__':
app.run(main)
| open_spiel-master | open_spiel/python/mfg/examples/mfg_mirror_descent.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deep Fictitious Play using DQN agents trained on an MFG."""
from absl import flags
import jax
from open_spiel.python import policy
from open_spiel.python import rl_agent_policy
from open_spiel.python import rl_environment
from open_spiel.python.jax import dqn
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import fictitious_play
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.algorithms import policy_value
from open_spiel.python.mfg.games import factory
from open_spiel.python.utils import app
from open_spiel.python.utils import metrics
FLAGS = flags.FLAGS
flags.DEFINE_string("game_name", "python_mfg_predator_prey",
"Name of the game.")
flags.DEFINE_string(
"env_setting", None,
"Name of the game settings. If None, the game name will be used.")
flags.DEFINE_integer("num_iterations", 100,
"Number of fictitious play iterations.")
flags.DEFINE_integer("num_episodes_per_iteration", 1000,
"Number of training eepisodes for each iteration.")
flags.DEFINE_list("hidden_layers_sizes", [
128,
128,
], "Number of hidden units in the avg-net and Q-net.")
flags.DEFINE_integer("replay_buffer_capacity", int(2e5),
"Size of the replay buffer.")
flags.DEFINE_integer("min_buffer_size_to_learn", 1000,
"Number of samples in buffer before learning begins.")
flags.DEFINE_integer("batch_size", 128,
"Number of transitions to sample at each learning step.")
flags.DEFINE_integer("learn_every", 64,
"Number of steps between learning updates.")
flags.DEFINE_float("rl_learning_rate", 0.01,
"Learning rate for inner rl agent.")
flags.DEFINE_string("optimizer_str", "sgd",
"Optimizer, choose from 'adam', 'sgd'.")
flags.DEFINE_string("loss_str", "mse",
"Loss function, choose from 'mse', 'huber'.")
flags.DEFINE_integer("update_target_network_every", 400,
"Number of steps between DQN target network updates.")
flags.DEFINE_float("discount_factor", 1.0,
"Discount factor for future rewards.")
flags.DEFINE_integer("epsilon_decay_duration", int(20e6),
"Number of game steps over which epsilon is decayed.")
flags.DEFINE_float("epsilon_start", 0.1, "Starting exploration parameter.")
flags.DEFINE_float("epsilon_end", 0.1, "Final exploration parameter.")
flags.DEFINE_bool("use_checkpoints", False, "Save/load neural network weights.")
flags.DEFINE_string("checkpoint_dir", "/tmp/dqn_test",
"Directory to save/load the agent.")
flags.DEFINE_string(
"logdir", None,
"Logging dir to use for TF summary files. If None, the metrics will only "
"be logged to stderr.")
def main(unused_argv):
game = factory.create_game_with_setting(FLAGS.game_name, FLAGS.env_setting)
uniform_policy = policy.UniformRandomPolicy(game)
mfg_dist = distribution.DistributionPolicy(game, uniform_policy)
envs = [
rl_environment.Environment(
game, mfg_distribution=mfg_dist, mfg_population=p)
for p in range(game.num_players())
]
info_state_size = envs[0].observation_spec()["info_state"][0]
num_actions = envs[0].action_spec()["num_actions"]
hidden_layers_sizes = [int(l) for l in FLAGS.hidden_layers_sizes]
kwargs = {
"replay_buffer_capacity": FLAGS.replay_buffer_capacity,
"min_buffer_size_to_learn": FLAGS.min_buffer_size_to_learn,
"batch_size": FLAGS.batch_size,
"learn_every": FLAGS.learn_every,
"learning_rate": FLAGS.rl_learning_rate,
"optimizer_str": FLAGS.optimizer_str,
"loss_str": FLAGS.loss_str,
"update_target_network_every": FLAGS.update_target_network_every,
"discount_factor": FLAGS.discount_factor,
"epsilon_decay_duration": FLAGS.epsilon_decay_duration,
"epsilon_start": FLAGS.epsilon_start,
"epsilon_end": FLAGS.epsilon_end,
}
# pylint: disable=g-complex-comprehension
agents = [
dqn.DQN(idx, info_state_size, num_actions, hidden_layers_sizes, **kwargs)
for idx in range(game.num_players())
]
joint_avg_policy = rl_agent_policy.JointRLAgentPolicy(
game, {idx: agent for idx, agent in enumerate(agents)},
envs[0].use_observation)
if FLAGS.use_checkpoints:
for agent in agents:
if agent.has_checkpoint(FLAGS.checkpoint_dir):
agent.restore(FLAGS.checkpoint_dir)
# Metrics writer will also log the metrics to stderr.
just_logging = FLAGS.logdir is None or jax.host_id() > 0
writer = metrics.create_default_writer(
logdir=FLAGS.logdir, just_logging=just_logging)
# Save the parameters.
writer.write_hparams(kwargs)
fp = fictitious_play.FictitiousPlay(game)
num_episodes_per_iteration = FLAGS.num_episodes_per_iteration
def log_metrics(it, episode=0):
initial_states = game.new_initial_states()
fp_policy = fp.get_policy()
distrib = distribution.DistributionPolicy(game, fp_policy)
pi_value = policy_value.PolicyValue(game, distrib, fp_policy)
m = {
f"dqn_br/{state}": pi_value.eval_state(state)
for state in initial_states
}
# Loss will be None at the beginning.
if agents[0].loss is not None:
m.update({
f"agent{i}/loss": float(agent.loss) for i, agent in enumerate(agents)
})
nash_conv_fp = nash_conv.NashConv(game, fp_policy).nash_conv()
m["nash_conv_fp"] = nash_conv_fp
# We log using the total number of episode steps so that runs with different
# training regimes are comparable.
writer.write_scalars(it * num_episodes_per_iteration + episode, m)
log_metrics(0)
for it in range(FLAGS.num_iterations):
# Update the Fictitious Play policy.
fp.iteration(br_policy=joint_avg_policy)
# Update the distribution of the environments.
distrib = distribution.DistributionPolicy(game, fp.get_policy())
for env in envs:
env.update_mfg_distribution(distrib)
# Train the RL agent to learn a best response.
for _ in range(num_episodes_per_iteration):
for p in range(game.num_players()):
time_step = envs[p].reset()
while not time_step.last():
agent_output = agents[p].step(time_step)
action_list = [agent_output.action]
time_step = envs[p].step(action_list)
# Episode is over, step all agents with final info state.
agents[p].step(time_step)
# Check point the agents.
if FLAGS.use_checkpoints:
for agent in agents:
agent.save(FLAGS.checkpoint_dir)
# Log the final metrics.
log_metrics(it + 1)
# Make sure all values were written.
writer.flush()
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/mfg/examples/mfg_dqn_fp_jax.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for crowd_modelling_2d."""
from absl.testing import absltest
from open_spiel.python.mfg.games import crowd_modelling_2d
class CrowdModelling2DTest(absltest.TestCase):
def test_grid_to_forbidden_states(self):
forbidden_states = crowd_modelling_2d.grid_to_forbidden_states([
"#####",
"# # #",
"# #",
"#####",
])
self.assertEqual(
forbidden_states,
"[0|0;1|0;2|0;3|0;4|0;0|1;2|1;4|1;0|2;4|2;0|3;1|3;2|3;3|3;4|3]")
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/mfg/games/crowd_modelling_2d_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Mean Field predator-prey game, implemented in Python.
This corresponds to the predator-prey game described in section 5.4 of
"Scaling up Mean Field Games with Online Mirror Descent"
(https://arxiv.org/abs/2103.00623)
The environment is configurable in the following high-level ways:
- Number of populations.
- Reward matrix.
- Initial distribution.
- Geometry (torus, basic square).
"""
import enum
import functools
from typing import Any, List, Mapping, Optional, Tuple
import numpy as np
from open_spiel.python import observation
import pyspiel
from open_spiel.python.utils import shared_value
class Geometry(enum.IntEnum):
SQUARE = 0
TORUS = 1
_DEFAULT_SIZE = 5
_NUM_ACTIONS = 5
_NUM_CHANCE = 5
DEFAULT_REWARD_MATRIX_THREE_POPULATIONS = np.array(
# The first population is attracted to the second and tries to avoid the
# third one.
[[0, -1, 1], [1, 0, -1], [-1, 1, 0]]
)
DEFAULT_REWARD_MATRIX_FOUR_POPULATIONS = np.array(
# The first population is attracted to the second and tries to avoid the
# third one, and so on.
[[0, 1, 0, -1], [-1, 0, 1, 0], [0, -1, 0, 1], [1, 0, -1, 0]]
)
# Each population starts in a corner.
DEFAULT_INIT_DISTRIB_THREE_POPULATIONS = np.array([
# First population
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
# Second population
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
# Third population
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
])
DEFAULT_INIT_DISTRIB_FOUR_POPULATIONS = np.array([
# First population
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
# Second population
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
# Third population
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
# Fourth population
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
])
_DEFAULT_GEOMETRY = Geometry.SQUARE
_DEFAULT_NOISE_PROBABILITY = 0.8
_DEFAULT_CONGESTION_COEFF = 1.0
THREE_POPULATIONS = {
"size": _DEFAULT_SIZE,
"horizon": 10,
"players": 3,
# The reward matrix is represented as a string containing a
# space-separated list of values.
# Its size defines the number of populations in the mean field game.
"reward_matrix": " ".join(
str(v) for v in DEFAULT_REWARD_MATRIX_THREE_POPULATIONS.flatten()
),
"geometry": _DEFAULT_GEOMETRY,
"init_distrib": " ".join(
str(v) for v in DEFAULT_INIT_DISTRIB_THREE_POPULATIONS.flatten()
),
# Probability that the transition is affected by noise
"noise_probability": _DEFAULT_NOISE_PROBABILITY,
# Weight of congestion term in the reward
"congestion_coeff": _DEFAULT_CONGESTION_COEFF,
}
FOUR_POPULATIONS = {
"size": _DEFAULT_SIZE,
"horizon": 20,
"players": 4,
# The reward matrix is represented as a string containing a
# space-separated list of values.
# Its size defines the number of populations in the mean field game.
"reward_matrix": " ".join(
str(v) for v in DEFAULT_REWARD_MATRIX_FOUR_POPULATIONS.flatten()
),
"geometry": _DEFAULT_GEOMETRY,
"init_distrib": " ".join(
str(v) for v in DEFAULT_INIT_DISTRIB_FOUR_POPULATIONS.flatten()
),
# Probability that the transition is affected by noise
"noise_probability": _DEFAULT_NOISE_PROBABILITY,
# Weight of congestion term in the reward
"congestion_coeff": _DEFAULT_CONGESTION_COEFF,
}
_DEFAULT_PARAMS = THREE_POPULATIONS
_GAME_TYPE = pyspiel.GameType(
short_name="python_mfg_predator_prey",
long_name="Python Mean Field Predator Prey",
dynamics=pyspiel.GameType.Dynamics.MEAN_FIELD,
chance_mode=pyspiel.GameType.ChanceMode.EXPLICIT_STOCHASTIC,
information=pyspiel.GameType.Information.PERFECT_INFORMATION,
utility=pyspiel.GameType.Utility.GENERAL_SUM,
reward_model=pyspiel.GameType.RewardModel.REWARDS,
# We cannot pass math.inf here, so we pass a very high integer value.
max_num_players=1000000000,
min_num_players=1,
provides_information_state_string=True,
provides_information_state_tensor=False,
provides_observation_string=True,
provides_observation_tensor=True,
parameter_specification=_DEFAULT_PARAMS,
)
def get_param(param_name, params):
return params.get(param_name, _DEFAULT_PARAMS[param_name])
@functools.lru_cache(maxsize=None)
def _state_to_str(x, y, t, population, player_id):
"""A string that uniquely identify (pos, t, population, player_id)."""
if int(player_id) >= 0:
return f"(pop={population}, t={t}, pos=[{x} {y}])"
if player_id == pyspiel.PlayerId.MEAN_FIELD:
return f"(pop={population}, t={t}_a, pos=[{x} {y}])"
if player_id == pyspiel.PlayerId.CHANCE:
return f"(pop={population}, t={t}_a_mu, pos=[{x} {y}])"
class MFGPredatorPreyGame(pyspiel.Game):
"""Predator-prey multi-population MFG."""
# pylint:disable=dangerous-default-value
def __init__(self, params: Mapping[str, Any] = _DEFAULT_PARAMS):
self.size = get_param("size", params)
self.horizon = get_param("horizon", params)
flat_reward_matrix = np.fromstring(
get_param("reward_matrix", params), dtype=np.float64, sep=" "
)
num_players = get_param("players", params)
if len(flat_reward_matrix) != num_players**2:
raise ValueError(
"Reward matrix passed in flat representation does not represent a "
f"square matrix: {flat_reward_matrix}"
f" with number of players: {num_players}"
)
self.reward_matrix = flat_reward_matrix.reshape([num_players, num_players])
self.geometry = get_param("geometry", params)
num_states = self.size**2
game_info = pyspiel.GameInfo(
num_distinct_actions=_NUM_ACTIONS,
max_chance_outcomes=max(num_states, _NUM_CHANCE),
num_players=num_players,
min_utility=-np.inf,
max_utility=+np.inf,
utility_sum=None,
max_game_length=self.horizon,
)
self.noise_probability = get_param("noise_probability", params)
self.congestion_coeff = get_param("congestion_coeff", params)
# Represents the current probability distribution over game states
# (when grouped for each population).
str_init_distrib = get_param("init_distrib", params)
if str_init_distrib:
flat_init_distrib = np.fromstring(
str_init_distrib, dtype=np.float64, sep=" "
)
if len(flat_init_distrib) != num_players * self.size**2:
raise ValueError(
"Initial distribution matrix passed in flat representation does"
f" not represent a sequence of square matrices: {flat_init_distrib}"
f" with number of players: {num_players}"
f" and size: {self.size}"
)
self.initial_distribution = flat_init_distrib
else:
# Initialized with a uniform distribution.
self.initial_distribution = [1.0 / num_states] * (
num_states * num_players
)
super().__init__(_GAME_TYPE, game_info, params)
def new_initial_state(self):
"""Returns a new population-less blank state.
This state is provided for some internal operations that use blank
states (e.g. cloning), but cannot be used to play the game, i.e.
ApplyAction() will fail. Proper playable states should be
instantiated with new_initial_state_for_population().
"""
return MFGPredatorPreyState(self)
def max_chance_nodes_in_history(self):
"""Maximun chance nodes in game history."""
return self.horizon + 1
def new_initial_state_for_population(self, population):
"""State corresponding to the start of a game for a given population."""
return MFGPredatorPreyState(self, population)
def make_py_observer(self, iig_obs_type=None, params=None):
"""Returns an object used for observing game state."""
if (iig_obs_type is None) or (
iig_obs_type.public_info and not iig_obs_type.perfect_recall
):
return Observer(params, self)
return observation.IIGObserverForPublicInfoGame(iig_obs_type, params)
def pos_to_merged(pos: np.ndarray, size: int) -> int:
"""Converts a [x, y] position into a single integer."""
assert (pos >= 0).all(), pos
assert (pos < size).all(), pos
return pos[0] + pos[1] * size
def merged_to_pos(merged_pos: int, size: int) -> np.ndarray:
"""Inverse of pos_to_merged()."""
assert 0 <= merged_pos < size * size
return np.array([merged_pos % size, merged_pos // size])
class MFGPredatorPreyState(pyspiel.State):
"""State for the predator-prey MFG."""
# Maps legal actions to the corresponding move on the grid of the game.
_ACTION_TO_MOVE = {
0: np.array([0, 0]),
1: np.array([1, 0]),
2: np.array([0, 1]),
3: np.array([0, -1]),
4: np.array([-1, 0]),
}
# Action that corresponds to no displacement.
_NEUTRAL_ACTION = 0
def __init__(self, game, population=None):
"""Constructor; should only be called by Game.new_initial_state.*.
Args:
game: MFGPredatorPreyGame for which a state should be created.
population: ID of the population to create this state for. Must be in [0,
num_players()) or None. States with population=None cannot be used to
perform game actions.
"""
super().__init__(game)
# Initial state where the initial position is chosen according to
# an initial distribution.
self._is_position_init = True
self._player_id = pyspiel.PlayerId.CHANCE
# Population this state corresponds to. Can be None, in which
# case, ApplyAction() is forbidden.
self._population = population
if self._population is not None:
assert 0 <= self._population < self.num_players()
# When set, <int>[2] numpy array representing the x, y position on the grid.
self._pos = None # type: Optional[np.ndarray]
self._t = 0
self.size = game.size
# Number of states in the grid.
self.num_states = self.size**2
self.horizon = game.horizon
self.reward_matrix = game.reward_matrix
self.geometry = game.geometry
self._returns = np.zeros([self.num_players()], dtype=np.float64)
self._distribution = shared_value.SharedValue(game.initial_distribution)
self.noise_probability = game.noise_probability
self.congestion_coeff = game.congestion_coeff
@property
def population(self):
return self._population
@property
def pos(self):
return self._pos
@property
def t(self):
return self._t
def state_to_str(self, pos, t, population, player_id=0):
"""A string that uniquely identify (pos, t, population, player_id)."""
if self._is_position_init:
return f"position_init_{population}"
assert isinstance(pos, np.ndarray), f"Got type {type(pos)}"
assert len(pos.shape) == 1, f"Got {len(pos.shape)}, expected 1 (pos={pos})."
assert pos.shape[0] == 2, f"Got {pos.shape[0]}, expected 2 (pos={pos})."
return _state_to_str(pos[0], pos[1], t, population, player_id)
# OpenSpiel (PySpiel) API functions are below. This is the standard set that
# should be implemented by every perfect-information sequential-move game.
def mean_field_population(self):
return self._population
def _legal_actions(self, player):
"""Returns a list of legal actions for player and MFG nodes."""
if player == pyspiel.PlayerId.MEAN_FIELD:
return []
if player >= 0 and player == self.current_player():
return list(self._ACTION_TO_MOVE)
raise ValueError(
f"Unexpected player {player}."
"Expected a mean field or current player >=0."
)
def chance_outcomes(self) -> List[Tuple[int, float]]:
"""Returns the possible chance outcomes and their probabilities."""
if self._is_position_init:
if (
self._population is None
or not 0 <= self._population < self.num_players()
):
raise ValueError(f"Invalid population {self._population}")
return [
(i, self._distribution.value[self._population * self.num_states + i])
for i in range(self.num_states)
if self._distribution.value[self._population * self.num_states + i]
!= 0.0
]
return [
(0, 1.0 - self.noise_probability),
(1, self.noise_probability / 4.0),
(2, self.noise_probability / 4.0),
(3, self.noise_probability / 4.0),
(4, self.noise_probability / 4.0),
]
def update_pos(self, action):
"""Updates the position of the player given a move action."""
if action < 0 or action >= len(self._ACTION_TO_MOVE):
raise ValueError(
f"The action must be between 0 and {len(self._ACTION_TO_MOVE)}, "
f"got {action}"
)
candidate_pos = self._pos + self._ACTION_TO_MOVE[action]
if self.geometry == Geometry.TORUS:
candidate_pos += self.size
candidate_pos %= self.size
else:
assert (
self.geometry == Geometry.SQUARE
), f"Invalid geometry {self.geometry}"
# Keep the position within the bounds of the square.
candidate_pos = np.minimum(candidate_pos, self.size - 1)
candidate_pos = np.maximum(candidate_pos, 0)
self._pos = candidate_pos
def _apply_action(self, action):
"""Applies the specified action to the state."""
if self._population is None:
raise ValueError(
"Attempting to perform an action with a population-less state."
)
if self._player_id == pyspiel.PlayerId.MEAN_FIELD:
raise ValueError(
"_apply_action should not be called at a MEAN_FIELD state."
)
self._returns += np.array(self.rewards())
if self._is_position_init:
self._pos = merged_to_pos(action, self.size)
self._is_position_init = False
self._player_id = self._population
elif self._player_id == pyspiel.PlayerId.CHANCE:
self.update_pos(action)
self._t += 1
self._player_id = pyspiel.PlayerId.MEAN_FIELD
elif int(self._player_id) >= 0:
assert self._player_id == self._population, (
f"Invalid decision player id {self._player_id} "
f"expected {self._population}"
)
self.update_pos(action)
self._player_id = pyspiel.PlayerId.CHANCE
else:
raise ValueError(f"Unexpected state. Player id: {self._player_id}")
def _action_to_string(self, player, action):
"""Action -> string."""
del player
if self.is_chance_node() and self._is_position_init:
return f"init_position={action}"
return str(self._ACTION_TO_MOVE[action])
def distribution_support(self):
"""Returns a list of state string."""
support = []
for x in range(self.size):
for y in range(self.size):
for population in range(self.num_players()):
support.append(
self.state_to_str(
np.array([x, y]),
self._t,
population,
player_id=pyspiel.PlayerId.MEAN_FIELD,
)
)
return support
def get_pos_proba(self, pos: np.ndarray, population: int) -> float:
"""Gets the probability of a pos and population in the current distrib.
Args:
pos: 2D position.
population: Population requested.
Returns:
The probability for the provided position and population.
"""
assert (pos >= 0).all(), pos
assert (pos < self.size).all(), pos
assert 0 <= population < self.num_players(), population
# This logic needs to match the ordering defined in distribution_support().
index = population + self.num_players() * (pos[1] + self.size * pos[0])
assert 0 <= index < len(self._distribution.value), (
f"Invalid index {index} vs dist length: {len(self._distribution.value)}"
f", population={population}, pos={pos}, state={self}"
)
return self._distribution.value[index]
def update_distribution(self, distribution):
"""This function is central and specific to the logic of the MFG.
It should only be called when the node is in MEAN_FIELD state.
Args:
distribution: List of floats that should contain the probability of each
state returned by distribution_support().
"""
expected_dist_size = self.num_states * self.num_players()
assert len(distribution) == expected_dist_size, (
"Unexpected distribution length "
f"{len(distribution)} != {expected_dist_size}"
)
if self._player_id != pyspiel.PlayerId.MEAN_FIELD:
raise ValueError(
"update_distribution should only be called at a MEAN_FIELD state."
)
self._distribution = shared_value.SharedValue(distribution)
self._player_id = self._population
def is_terminal(self):
"""Returns True if the game is over."""
return self.t >= self.horizon
def current_player(self):
"""Returns id of the next player to move, or TERMINAL if game is over."""
if self.is_terminal():
return pyspiel.PlayerId.TERMINAL
return self._player_id
def rewards(self) -> List[float]:
"""Predator-prey rewards for all populations.
See section 5.4, paragraph Environment in https://arxiv.org/abs/2103.00623.
Returns:
One float per population.
"""
if int(self._player_id) < 0:
return [0.0] * self.num_players()
# TODO(author15): Remove this eps once b/191064186 is fixed.
eps = 1e-25
densities = np.array(
[
self.get_pos_proba(self._pos, population)
for population in range(self.num_players())
],
dtype=np.float64,
)
rew = -self.congestion_coeff * np.log(densities + eps) + np.dot(
self.reward_matrix, densities
)
return list(rew)
def returns(self) -> List[float]:
"""Returns is the sum of all payoffs collected so far."""
return list(self._returns + np.array(self.rewards()))
def __str__(self):
"""A string that uniquely identify the current state."""
return self.state_to_str(
self._pos, self._t, self._population, player_id=self._player_id
)
class Observer:
"""Observer, conforming to the PyObserver interface (see observation.py)."""
def __init__(self, params, game):
"""Initializes an empty observation tensor."""
del params
self.size = game.size
self.horizon = game.horizon
# +1 to allow t == horizon.
self.tensor = np.zeros(2 * self.size + self.horizon + 1, np.float32)
self.dict = {
"x": self.tensor[: self.size],
"y": self.tensor[self.size : self.size * 2],
"t": self.tensor[self.size * 2 :],
}
def set_from(self, state: MFGPredatorPreyState, player: int):
"""Updates `tensor` and `dict` to reflect `state` from PoV of `player`."""
del player
# We update the observation via the shaped tensor since indexing is more
# convenient than with the 1-D tensor. Both are views onto the same memory.
self.tensor.fill(0)
# state.pos is None for the initial (blank) state, don't set any
# position bit in that case.
if state.pos is not None:
if not (state.pos >= 0).all() or not (state.pos < self.size).all():
raise ValueError(
f"Expected {state} positions to be in [0, {self.size})"
)
self.dict["x"][state.pos[0]] = 1
self.dict["y"][state.pos[1]] = 1
if not 0 <= state.t <= self.horizon:
raise ValueError(f"Expected {state} time to be in [0, {self.horizon}]")
self.dict["t"][state.t] = 1
def string_from(self, state, player):
"""Observation of `state` from the PoV of `player`, as a string."""
del player
return str(state)
pyspiel.register_game(_GAME_TYPE, MFGPredatorPreyGame)
| open_spiel-master | open_spiel/python/mfg/games/predator_prey.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Tests for Python mean field routing game."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import numpy.testing as npt
from open_spiel.python import games # pylint:disable=unused-import
from open_spiel.python import policy
from open_spiel.python.games import dynamic_routing_utils
from open_spiel.python.mfg import games as mfg_games # pylint:disable=unused-import
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import mirror_descent
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.algorithms import policy_value
from open_spiel.python.mfg.games import dynamic_routing
from open_spiel.python.mfg.games import factory
from open_spiel.python.observation import make_observation
import pyspiel
_NUMBER_OF_ITERATIONS_TESTS = 1
class SocialOptimumBraess(policy.Policy):
def action_probabilities(self, state, player_id=None):
legal_actions = state.legal_actions()
if not legal_actions:
return {dynamic_routing_utils.NO_POSSIBLE_ACTION: 1.0}
elif len(legal_actions) == 1:
return {legal_actions[0]: 1.0}
else:
if legal_actions[0] == 1:
return {1: 0.5, 2: 0.5}
elif legal_actions[0] == 3:
return {4: 1.0}
raise ValueError(f"{legal_actions} is not correct.")
class NashEquilibriumBraess(policy.Policy):
def action_probabilities(self, state, player_id=None):
legal_actions = state.legal_actions()
if not legal_actions:
return {dynamic_routing_utils.NO_POSSIBLE_ACTION: 1.0}
elif len(legal_actions) == 1:
return {legal_actions[0]: 1.0}
else:
if legal_actions[0] == 1:
return {1: 0.75, 2: 0.25}
elif legal_actions[0] == 3:
return {3: 2 / 3, 4: 1 / 3}
raise ValueError(f"{legal_actions} is not correct. {state}.")
class MeanFieldRoutingGameTest(absltest.TestCase):
"""Checks we can create the game and clone states."""
def test_load(self):
"""Test load and game creation."""
game = pyspiel.load_game("python_mfg_dynamic_routing")
game.new_initial_state()
def test_create(self):
"""Checks we can create the game and clone states."""
game = pyspiel.load_game("python_mfg_dynamic_routing")
self.assertEqual(game.get_type().dynamics,
pyspiel.GameType.Dynamics.MEAN_FIELD)
state = game.new_initial_state()
state.clone()
def test_random_game(self):
"""Test random simulation."""
game = pyspiel.load_game("python_mfg_dynamic_routing")
pyspiel.random_sim_test(game, num_sims=10, serialize=False, verbose=True)
def test_evolving_trajectory_with_uniform_policy(self):
"""Test evolving distribution."""
game = pyspiel.load_game("python_mfg_dynamic_routing")
distribution.DistributionPolicy(game, policy.UniformRandomPolicy(game))
def test_non_default_param_from_string(self):
"""Check params can be given through string loading."""
game = pyspiel.load_game("python_mfg_dynamic_routing(max_num_time_step=5)")
self.assertEqual(game.max_game_length(), 5)
def test_non_default_param_from_dict(self):
"""Check params can be given through a dictionary."""
game = pyspiel.load_game("python_mfg_dynamic_routing",
{"max_num_time_step": 5})
self.assertEqual(game.max_game_length(), 5)
# Enable ficticious_play with game where the dynamics depend on the
# distribution.
# def test_ficticious_play(self):
# """Test that ficticious play can be used on this game."""
# mfg_game = pyspiel.load_game("python_mfg_dynamic_routing")
# fp = fictitious_play.FictitiousPlay(mfg_game)
# for _ in range(_NUMBER_OF_ITERATIONS_TESTS):
# fp.iteration()
# nash_conv.NashConv(mfg_game, fp.get_policy())
def test_online_mirror_descent(self):
"""Test that online mirror descent can be used on this game."""
mfg_game = pyspiel.load_game("python_mfg_dynamic_routing")
omd = mirror_descent.MirrorDescent(mfg_game)
for _ in range(_NUMBER_OF_ITERATIONS_TESTS):
omd.iteration()
nash_conv.NashConv(mfg_game, omd.get_policy())
def test_online_mirror_descent_convergence(self):
"""Test that online mirror descent converges to equilibrium in default game."""
mfg_game = pyspiel.load_game("python_mfg_dynamic_routing", {
"time_step_length": 0.05,
"max_num_time_step": 100
})
omd = mirror_descent.MirrorDescent(mfg_game, lr=1)
for _ in range(50):
omd.iteration()
self.assertAlmostEqual(
nash_conv.NashConv(mfg_game, omd.get_policy()).nash_conv(), 0)
def test_vehicle_origin_outside_network(self):
"""Check raise assertion if vehicle's origin is outside the Network."""
od_demand = [
dynamic_routing_utils.OriginDestinationDemand("I->O", "D->E", 0, 5)
]
with self.assertRaises(ValueError):
dynamic_routing.MeanFieldRoutingGame(
{
"max_num_time_step": 10,
"time_step_length": 0.5,
"players": -1
},
od_demand=od_demand)
def test_vehicle_destination_outside_network(self):
"""Check raise assertion if vehicle's destination is outside the Network."""
od_demand = [
dynamic_routing_utils.OriginDestinationDemand("O->A", "E->F", 0, 5)
]
with self.assertRaises(ValueError):
dynamic_routing.MeanFieldRoutingGame(
{
"max_num_time_step": 10,
"time_step_length": 0.5,
"players": -1
},
od_demand=od_demand)
def test_multiple_departure_time_vehicle(self):
"""Check that departure time can be define."""
od_demand = [
dynamic_routing_utils.OriginDestinationDemand("O->A", "D->E", 0, 5),
dynamic_routing_utils.OriginDestinationDemand("O->A", "D->E", 0.5, 5),
dynamic_routing_utils.OriginDestinationDemand("O->A", "D->E", 1.0, 5)
]
game = dynamic_routing.MeanFieldRoutingGame(
{
"max_num_time_step": 10,
"time_step_length": 0.5,
"players": -1
},
od_demand=od_demand)
pyspiel.random_sim_test(game, num_sims=10, serialize=False, verbose=True)
def test_game_evolution_uniform_policy(self):
"""Check game evolution under uniform policy."""
# Test evolution of the game as expected (test value of the state).
# Test legal_actions().
def test_observer_correct(self):
"""Checks that the observer is correctly updated."""
game = pyspiel.load_game("python_mfg_dynamic_routing")
num_locations, steps = 8, 10
self.assertEqual(game.num_distinct_actions(), num_locations)
self.assertEqual(game.max_game_length(), steps)
py_obs = make_observation(game)
state = game.new_initial_state()
self.assertEqual(state.current_player(), pyspiel.PlayerId.CHANCE)
state.apply_action(0)
self.assertEqual(state.current_player(), 0)
location, destination = 7, 6
self.assertEqual(state.get_location_as_int(), location)
self.assertEqual(state.get_destination_as_int(), destination)
py_obs.set_from(state, state.current_player())
obs_size = num_locations * 2 + steps + 2
expected_tensor = np.zeros(obs_size)
# location = 7
# destination + num_locations = 14
# time + 2 * num_locations = 16
# waiting bit at last index.
expected_tensor[[7, 14, 16]] = 1
npt.assert_array_equal(py_obs.tensor, expected_tensor)
def test_apply_actions_error_no_movement_with_negative_waiting_time(self):
"""Check that a vehicle cannot choose to not move if it has to move."""
# Test apply_actions().
def test_apply_actions_error_wrong_movement_with_negative_waiting_time(self):
"""Check that a vehicle cannot choose to move to a not successor link."""
# Test apply_actions().
def test_apply_actions_error_movement_with_positive_waiting_time(self):
"""Check that a vehicle cannot choose to move if it cannot move yet."""
# Test apply_actions().
@absltest.skip(
"Test of OMD on Sioux Falls is disabled as it takes a long time to run.")
def test_online_mirror_descent_sioux_falls_dummy(self):
"""Test that online mirror descent can be used on the Sioux Falls game."""
mfg_game = factory.create_game_with_setting(
"python_mfg_dynamic_routing",
"dynamic_routing_sioux_falls_dummy_demand")
omd = mirror_descent.MirrorDescent(mfg_game)
for _ in range(_NUMBER_OF_ITERATIONS_TESTS):
omd.iteration()
nash_conv.NashConv(mfg_game, omd.get_policy())
class CppVsPythonMeanFieldRoutingGameTest(parameterized.TestCase):
@parameterized.named_parameters(
("python", ("python_mfg_dynamic_routing(max_num_time_step=100,"
"time_step_length=0.05)")),
("cpp", ("mfg_dynamic_routing(max_num_time_step=100,"
"time_step_length=0.05,network=braess)")))
def test_braess_paradox_game(self, game_name):
"""Test that Braess paradox can be reproduced with the mean field game."""
mfg_game = pyspiel.load_game(game_name)
ne_policy = NashEquilibriumBraess(mfg_game, 1)
self.assertEqual(
-policy_value.PolicyValue(
mfg_game, distribution.DistributionPolicy(mfg_game, ne_policy),
ne_policy).value(mfg_game.new_initial_state()), 3.75)
self.assertEqual(nash_conv.NashConv(mfg_game, ne_policy).nash_conv(), 0.0)
so_policy = SocialOptimumBraess(mfg_game, 1)
self.assertEqual(
-policy_value.PolicyValue(
mfg_game, distribution.DistributionPolicy(mfg_game, so_policy),
so_policy).value(mfg_game.new_initial_state()), 3.5)
self.assertEqual(nash_conv.NashConv(mfg_game, so_policy).nash_conv(), 0.75)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/mfg/games/dynamic_routing_test.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mean Field Game on periodic domain with aversion cost.
This is a demonstration of implementing a mean field game in Python. The model
is an approximation of a continuous space, continuous time model introduced
to study ergodic MFG with explicit solution in:
Almulla, N.; Ferreira, R.; and Gomes, D. 2017.
Two numerical approaches to stationary mean-field games. Dyn. Games Appl.
7(4):657-682.
See also:
Elie, R., Perolat, J., Laurière, M., Geist, M., & Pietquin, O. (2020, April).
On the convergence of model free learning in mean field games.
In Proceedings of the AAAI Conference on Artificial Intelligence
(Vol. 34, No. 05, pp. 7143-7150).
"""
import functools
import math
from typing import Any, List, Mapping
import numpy as np
import scipy.stats
from open_spiel.python import observation
import pyspiel
_NUM_PLAYERS = 1
_SIZE = 21
_HORIZON = 20
_VOLATILITY = 1.0
_COEF_AVERSION = 1.0
_DELTA_T = 0.01
_X_MIN = 0.0
_X_MAX = 1.0
_N_ACTIONS_PER_SIDE = 10
_DEFAULT_PARAMS = {
"size": _SIZE,
"horizon": _HORIZON,
"dt": _DELTA_T,
"xmin": _X_MIN,
"xmax": _X_MAX,
"n_actions_per_side": _N_ACTIONS_PER_SIDE,
"volatility": _VOLATILITY,
"coef_aversion": _COEF_AVERSION,
}
_GAME_TYPE = pyspiel.GameType(
short_name="python_mfg_periodic_aversion",
long_name="Mean-Field Periodic Aversion Game",
dynamics=pyspiel.GameType.Dynamics.MEAN_FIELD,
chance_mode=pyspiel.GameType.ChanceMode.EXPLICIT_STOCHASTIC,
information=pyspiel.GameType.Information.PERFECT_INFORMATION,
utility=pyspiel.GameType.Utility.GENERAL_SUM,
reward_model=pyspiel.GameType.RewardModel.REWARDS,
max_num_players=_NUM_PLAYERS,
min_num_players=_NUM_PLAYERS,
provides_information_state_string=False,
provides_information_state_tensor=False,
provides_observation_string=True,
provides_observation_tensor=True,
parameter_specification=_DEFAULT_PARAMS,
)
@functools.lru_cache(maxsize=None)
def _state_to_str(x, t, player_id):
"""A string that uniquely identifies (x, t, player_id)."""
if int(player_id) == pyspiel.PlayerId.DEFAULT_PLAYER_ID:
return f"(t={t}, pos={x})"
if player_id == pyspiel.PlayerId.MEAN_FIELD:
return f"(t={t}_a, pos={x})"
if player_id == pyspiel.PlayerId.CHANCE:
return f"(t={t}_a_mu, pos={x})"
class MFGPeriodicAversionGame(pyspiel.Game):
"""A Mean-Field Game on periodic domain with crowd aversion cost.
A game starts by an initial chance node that select the initial state
of the player in the MFG.
Then the game sequentially alternates between:
- An action selection node (where the player id is >= 0)
- A chance node (the player id is pyspiel.PlayerId.CHANCE)
- A Mean Field node (the player id is pyspiel.PlayerId.MEAN_FIELD)
"""
# pylint:disable=dangerous-default-value
def __init__(self, params: Mapping[str, Any] = _DEFAULT_PARAMS):
self.size = params.get("size", _SIZE) # number of states
self.horizon = params.get("horizon", _HORIZON) # number of time steps
self.dt = params.get("dt", _DELTA_T) # size of one step in time
self.xmin = params.get("xmin", _X_MIN) # smallest position
self.xmax = params.get("xmax", _X_MAX) # largest position
self.dx = (self.xmax - self.xmin) / (
self.size - 1
) # size of one step in space
self.n_actions_per_side = params.get(
"n_actions_per_side", _N_ACTIONS_PER_SIDE
) # number of actions on each side, for both players and noise
self.volatility = params.get("volatility", _VOLATILITY)
self.coef_aversion = params.get("coef_aversion", _COEF_AVERSION)
game_info = pyspiel.GameInfo(
num_distinct_actions=2 * self.n_actions_per_side + 1,
max_chance_outcomes=2 * self.n_actions_per_side + 1,
num_players=_NUM_PLAYERS,
min_utility=-np.inf,
max_utility=+np.inf,
utility_sum=0.0,
max_game_length=self.horizon,
)
super().__init__(_GAME_TYPE, game_info, params)
def new_initial_state(self):
"""Returns a state corresponding to the start of a game."""
return MFGPeriodicAversionState(self)
def make_py_observer(self, iig_obs_type=None, params=None):
"""Returns an object used for observing game state."""
if (iig_obs_type is None) or (
iig_obs_type.public_info and not iig_obs_type.perfect_recall
):
return Observer(params, self)
return observation.IIGObserverForPublicInfoGame(iig_obs_type, params)
def max_chance_nodes_in_history(self):
"""Maximun chance nodes in game history."""
return self.horizon + 1
class MFGPeriodicAversionState(pyspiel.State):
"""A Mean Field Normal-Form state.
In this class, x and action are integers. They are converted, when needed, to
spatial variables by using a scaling factor representing the size of a step in
space and by shifting them depending on the minimal allowed value.
"""
def __init__(self, game):
"""Constructor; should only be called by Game.new_initial_state."""
super().__init__(game)
# Initial state where the initial position is chosen according to
# an initial distribution.
self._player_id = pyspiel.PlayerId.CHANCE
self._last_action = game.n_actions_per_side # neutral action
self.tick = 0
self.x = None
self.return_value = 0.0
self.game = game
self.size = game.size
self.horizon = game.horizon
self.dt = game.dt
self.xmin = game.xmin
self.xmax = game.xmax
self.dx = game.dx
self.da = game.dx
self.n_actions_per_side = game.n_actions_per_side
self.volatility = game.volatility
self.coef_aversion = game.coef_aversion
# Represents the current probability distribution over game states.
# Initialized with a uniform distribution.
self._distribution = [1.0 / self.size for _ in range(self.size)]
def to_string(self):
return self.state_to_str(self.x, self.tick)
def state_to_str(self, x, tick, player_id=pyspiel.PlayerId.DEFAULT_PLAYER_ID):
"""A string that uniquely identify a triplet x, t, player_id."""
if self.x is None:
return "initial"
if self._player_id == pyspiel.PlayerId.DEFAULT_PLAYER_ID:
return "({}, {})".format(x, tick)
elif self._player_id == pyspiel.PlayerId.MEAN_FIELD:
return "({}, {})_a".format(x, tick)
elif self._player_id == pyspiel.PlayerId.CHANCE:
return "({}, {})_a_mu".format(x, tick)
raise ValueError(
"player_id is not mean field, chance or default player id."
)
# OpenSpiel (PySpiel) API functions are below. This is the standard set that
# should be implemented by every perfect-information sequential-move game.
@property
def n_actions(self):
return 2 * self.n_actions_per_side + 1
def _legal_actions(self, player):
"""Returns a list of legal actions for player and MFG nodes."""
if player == pyspiel.PlayerId.MEAN_FIELD:
return []
if (
player == pyspiel.PlayerId.DEFAULT_PLAYER_ID
and player == self.current_player()
):
return list(range(self.n_actions))
raise ValueError(
f"Unexpected player {player}. "
"Expected a mean field or current player 0."
)
def _apply_action(self, action):
"""Applies the specified action to the state."""
if self._player_id == pyspiel.PlayerId.MEAN_FIELD:
raise ValueError(
"_apply_action should not be called at a MEAN_FIELD state."
)
self.return_value = self._rewards()
assert (
self._player_id == pyspiel.PlayerId.DEFAULT_PLAYER_ID
or self._player_id == pyspiel.PlayerId.CHANCE
)
if self.x is None:
self.x = action
self._player_id = pyspiel.PlayerId.DEFAULT_PLAYER_ID
return
if action < 0 or action >= self.n_actions:
raise ValueError(
"The action is between 0 and {} at any node".format(self.n_actions)
)
self.x = (self.x + action - self.n_actions_per_side) % self.size
if self._player_id == pyspiel.PlayerId.CHANCE:
self._player_id = pyspiel.PlayerId.MEAN_FIELD
self.tick += 1
elif self._player_id == pyspiel.PlayerId.DEFAULT_PLAYER_ID:
self._last_action = action
self._player_id = pyspiel.PlayerId.CHANCE
def _action_to_string(self, player, action):
"""Action -> string."""
del player
return str(action - self.n_actions_per_side)
def action_to_move(self, action):
return (action - self.n_actions_per_side) * self.da
def state_to_position(self, state):
return state * self.dx + self.xmin
def position_to_state(self, position):
return round((position - self.xmin) / self.dx)
def chance_outcomes(self):
"""Returns the possible chance outcomes and their probabilities."""
if self.x is None:
# Initial distribution
return list(enumerate(self._distribution))
actions = np.array(
[(a - self.n_actions_per_side) * self.da for a in range(self.n_actions)]
)
stddev = self.volatility * math.sqrt(self.dt)
probas = scipy.stats.norm.pdf(actions, scale=stddev)
probas /= np.sum(probas)
return [(act, p) for act, p in zip(list(range(self.n_actions)), probas)]
def distribution_support(self):
"""return a list of state string."""
return [
self.state_to_str(i, self.tick, player_id=pyspiel.PlayerId.MEAN_FIELD)
for i in range(self.size)
]
def get_state_proba(self, state: int) -> float:
"""Gets the probability of a position in the current distrib.
Args:
state: state requested.
Returns:
The probability for the provided position.
"""
assert state >= 0, state
assert state < self.size, state
# This logic needs to match the ordering defined in distribution_support().
index = state
assert 0 <= index < len(self._distribution), (
f"Invalid index {index} vs dist length:"
f" {len(self._distribution)}, state={state},"
f" state={self}"
)
return self._distribution[index]
def update_distribution(self, distribution):
"""This function is central and specific to the logic of the MFG.
Args:
distribution: a distribution to register. - function should be called
when the node is in MEAN_FIELD state. - distribution are probabilities
that correspond to each game state given by distribution_support.
"""
if self._player_id != pyspiel.PlayerId.MEAN_FIELD:
raise ValueError(
"update_distribution should only be called at a MEAN_FIELD state."
)
self._distribution = distribution.copy()
self._player_id = pyspiel.PlayerId.DEFAULT_PLAYER_ID
@property
def t(self):
return self.tick
def is_terminal(self):
"""Returns True if the game is over."""
return self.t >= self.horizon
def current_player(self):
"""Returns id of the next player to move, or TERMINAL if game is over."""
if self.is_terminal():
return pyspiel.PlayerId.TERMINAL
return self._player_id
def _rewards(self):
"""Reward for the player for this state."""
if self._player_id != pyspiel.PlayerId.DEFAULT_PLAYER_ID:
return 0.0
assert self.x is not None
velocity = self.action_to_move(self._last_action) / self.dt
action_r = -0.5 * velocity**2
eps = 1e-15
mu_x = self.get_state_proba(self.x) / self.dx # represents the density
# The density should have an integral equal to 1; here sum_x mu_x * dx = 1
aversion_r = -np.log(mu_x + eps)
pos = self.state_to_position(self.x)
pix2 = 2 * np.pi * pos
geom_r = (
self.volatility * 2 * np.pi**2 * np.sin(pix2)
- 2 * np.pi**2 * np.cos(pix2) ** 2
+ (2 / self.volatility**2) * np.sin(pix2)
)
return (action_r + self.coef_aversion * aversion_r + geom_r) * self.dt
def rewards(self) -> List[float]:
"""Rewards for all players."""
# For now, only single-population (single-player) mean field games
# are supported.
return [self._rewards()]
def _returns(self):
"""Returns is the sum of all payoffs collected so far."""
return self.return_value + self._rewards()
def returns(self) -> List[float]:
"""Returns for all players."""
# For now, only single-population (single-player) mean field games
# are supported.
return [self._returns()]
def __str__(self):
"""A string that uniquely identify the current state."""
return self.state_to_str(
x=self.x, tick=self.tick, player_id=self._player_id
)
class Observer:
"""Observer, conforming to the PyObserver interface (see observation.py)."""
def __init__(self, params, game):
"""Initializes an empty observation tensor."""
del params
self.size = game.size
self.horizon = game.horizon
# +1 to allow t == horizon.
self.tensor = np.zeros(self.size + self.horizon + 1, np.float32)
self.dict = {"x": self.tensor[: self.size], "t": self.tensor[self.size :]}
def set_from(self, state, player: int):
"""Updates `tensor` and `dict` to reflect `state` from PoV of `player`."""
del player
# We update the observation via the shaped tensor since indexing is more
# convenient than with the 1-D tensor. Both are views onto the same memory.
self.tensor.fill(0)
# state.x is None for the initial (blank) state, don't set any
# position bit in that case.
if state.x is not None:
if state.x < 0 or state.x > self.size:
raise ValueError(
f"Expected {state} positions to be in [0, {self.size})"
)
self.dict["x"][state.x] = 1
if not 0 <= state.tick <= self.horizon:
raise ValueError(f"Expected {state} time to be in [0, {self.horizon}]")
self.dict["t"][state.tick] = 1
def string_from(self, state, player):
"""Observation of `state` from the PoV of `player`, as a string."""
del player
return state.to_string()
pyspiel.register_game(_GAME_TYPE, MFGPeriodicAversionGame)
| open_spiel-master | open_spiel/python/mfg/games/periodic_aversion.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mean field games implemented in Python.
These games are registered as they are imported. It's perfectly possible to
import just a single game if you prefer. There is no need to add new games here,
so long as they register themselves and you import them when wanting to use
them. However, adding them here will make them available for playthroughs and
for automated API testing.
Registration looks like this:
```
pyspiel.register_game(_GAME_TYPE, KuhnPokerGame)
```
"""
from open_spiel.python.mfg.games import crowd_avoidance
from open_spiel.python.mfg.games import crowd_modelling
from open_spiel.python.mfg.games import dynamic_routing
from open_spiel.python.mfg.games import linear_quadratic
from open_spiel.python.mfg.games import periodic_aversion
from open_spiel.python.mfg.games import predator_prey
| open_spiel-master | open_spiel/python/mfg/games/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Tests for Python Periodic Aversion game."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.mfg.games import periodic_aversion
import pyspiel
MFG_STR_CONST = "_a"
class MFGPeriodicAversionTest(absltest.TestCase):
def test_load(self):
game = pyspiel.load_game("python_mfg_periodic_aversion")
game.new_initial_state()
def test_create(self):
"""Checks we can create the game and clone states."""
game = periodic_aversion.MFGPeriodicAversionGame()
self.assertEqual(game.size, periodic_aversion._SIZE)
self.assertEqual(game.horizon, periodic_aversion._HORIZON)
self.assertEqual(game.get_type().dynamics,
pyspiel.GameType.Dynamics.MEAN_FIELD)
print("Num distinct actions:", game.num_distinct_actions())
state = game.new_initial_state()
clone = state.clone()
print("Initial state:", state)
print("Cloned initial state:", clone)
def test_create_with_params(self):
game = pyspiel.load_game("python_mfg_periodic_aversion(horizon=30,size=41)")
self.assertEqual(game.size, 41)
self.assertEqual(game.horizon, 30)
def check_cloning(self, state):
cloned = state.clone()
self.assertEqual(str(cloned), str(state))
self.assertEqual(cloned._distribution, state._distribution)
self.assertEqual(cloned._returns(), state._returns())
self.assertEqual(cloned.current_player(), state.current_player())
self.assertEqual(cloned.size, state.size)
self.assertEqual(cloned.horizon, state.horizon)
self.assertEqual(cloned._last_action, state._last_action)
def test_random_game(self):
"""Tests basic API functions."""
np.random.seed(7)
horizon = 30
size = 41
game = periodic_aversion.MFGPeriodicAversionGame(params={
"horizon": horizon,
"size": size
})
state = game.new_initial_state()
t = 0
while not state.is_terminal():
if state.current_player() == pyspiel.PlayerId.CHANCE:
actions, probs = zip(*state.chance_outcomes())
action = np.random.choice(actions, p=probs)
self.check_cloning(state)
self.assertEqual(len(state.legal_actions()),
len(state.chance_outcomes()))
state.apply_action(action)
elif state.current_player() == pyspiel.PlayerId.MEAN_FIELD:
self.assertEqual(state.legal_actions(), [])
self.check_cloning(state)
num_states = len(state.distribution_support())
state.update_distribution([1 / num_states] * num_states)
else:
self.assertEqual(state.current_player(), 0)
self.check_cloning(state)
state.observation_string()
state.information_state_string()
legal_actions = state.legal_actions()
action = np.random.choice(legal_actions)
state.apply_action(action)
t += 1
self.assertEqual(t, horizon)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/mfg/games/periodic_aversion_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Mean Field Crowd Modelling, implemented in Python.
This is a demonstration of implementing a mean field game in Python.
Fictitious play for mean field games: Continuous time analysis and applications,
Perrin & al. 2019 (https://arxiv.org/abs/2007.03458). This game corresponds
to the game in section 4.2.
"""
from typing import Any, List, Mapping
import numpy as np
from open_spiel.python import observation
import pyspiel
_NUM_PLAYERS = 1
_SIZE = 10
_HORIZON = 10
_NUM_ACTIONS = 3
_NUM_CHANCE = 3
_EPSILON = 10**(-25)
_DEFAULT_PARAMS = {"size": _SIZE, "horizon": _HORIZON}
_GAME_TYPE = pyspiel.GameType(
short_name="python_mfg_crowd_modelling",
long_name="Python Mean Field Crowd Modelling",
dynamics=pyspiel.GameType.Dynamics.MEAN_FIELD,
chance_mode=pyspiel.GameType.ChanceMode.EXPLICIT_STOCHASTIC,
information=pyspiel.GameType.Information.PERFECT_INFORMATION,
utility=pyspiel.GameType.Utility.GENERAL_SUM,
reward_model=pyspiel.GameType.RewardModel.REWARDS,
max_num_players=_NUM_PLAYERS,
min_num_players=_NUM_PLAYERS,
provides_information_state_string=True,
provides_information_state_tensor=False,
provides_observation_string=True,
provides_observation_tensor=True,
parameter_specification=_DEFAULT_PARAMS)
class MFGCrowdModellingGame(pyspiel.Game):
"""A Mean Field Crowd Modelling game.
A game starts by an initial chance node that select the initial state
of the MFG.
Then the game sequentially alternates between:
- An action selection node (Where the player Id >= 0)
- A chance node (the player id is pyspiel.PlayerId.CHANCE)
- A Mean Field node (the player id is pyspiel.PlayerId.MEAN_FIELD)
"""
# pylint:disable=dangerous-default-value
def __init__(self, params: Mapping[str, Any] = _DEFAULT_PARAMS):
game_info = pyspiel.GameInfo(
num_distinct_actions=_NUM_ACTIONS,
max_chance_outcomes=max(params["size"], _NUM_CHANCE),
num_players=_NUM_PLAYERS,
min_utility=-np.inf,
max_utility=+np.inf,
utility_sum=None,
max_game_length=params["horizon"])
super().__init__(_GAME_TYPE, game_info, params)
self.size = params["size"]
self.horizon = params["horizon"]
def new_initial_state(self):
"""Returns a state corresponding to the start of a game."""
return MFGCrowdModellingState(self)
def make_py_observer(self, iig_obs_type=None, params=None):
"""Returns an object used for observing game state."""
if ((iig_obs_type is None) or
(iig_obs_type.public_info and not iig_obs_type.perfect_recall)):
return Observer(params, self)
return observation.IIGObserverForPublicInfoGame(iig_obs_type, params)
def max_chance_nodes_in_history(self):
"""Maximun chance nodes in game history."""
return self.horizon + 1
class MFGCrowdModellingState(pyspiel.State):
"""A Mean Field Crowd Modelling state."""
# Maps legal actions to the corresponding move along the 1-D axis of the game.
_ACTION_TO_MOVE = {0: -1, 1: 0, 2: 1}
# Action that corresponds to no displacement.
_NEUTRAL_ACTION = 1
def __init__(self, game):
"""Constructor; should only be called by Game.new_initial_state."""
super().__init__(game)
self._is_chance_init = True # is true for the first state of the game.
self._player_id = pyspiel.PlayerId.CHANCE
self._x = None
self._t = 0
# We initialize last_action to the neutral action. This makes sure
# that the first reward does not include any displacement penalty.
self._last_action = self._NEUTRAL_ACTION
self.size = game.size
self.horizon = game.horizon
self.return_value = 0.0
# Represents the current probability distribution over game states.
# Initialized with a uniform distribution.
self._distribution = [1. / self.size for _ in range(self.size)]
@property
def x(self):
return self._x
@property
def t(self):
return self._t
def state_to_str(self, x, t, player_id=pyspiel.PlayerId.DEFAULT_PLAYER_ID):
"""A string that uniquely identify a triplet x, t, player_id."""
if self._is_chance_init:
return "initial"
if player_id == pyspiel.PlayerId.DEFAULT_PLAYER_ID:
return str((x, t))
if player_id == pyspiel.PlayerId.MEAN_FIELD:
return str((x, t)) + "_a"
if player_id == pyspiel.PlayerId.CHANCE:
return str((x, t)) + "_a_mu"
raise ValueError(
"player_id is not mean field, chance or default player id.")
# OpenSpiel (PySpiel) API functions are below. This is the standard set that
# should be implemented by every perfect-information sequential-move game.
def _legal_actions(self, player):
"""Returns a list of legal actions for player and MFG nodes."""
if player == pyspiel.PlayerId.MEAN_FIELD:
return []
if (player == pyspiel.PlayerId.DEFAULT_PLAYER_ID
and player == self.current_player()):
return [0, 1, 2]
raise ValueError(f"Unexpected player {player}. "
"Expected a mean field or current player 0.")
def chance_outcomes(self):
"""Returns the possible chance outcomes and their probabilities."""
if self._is_chance_init:
return list(enumerate(self._distribution))
return [(0, 1. / 3.), (1, 1. / 3.), (2, 1. / 3.)]
def _apply_action(self, action):
"""Applies the specified action to the state."""
if self._player_id == pyspiel.PlayerId.MEAN_FIELD:
raise ValueError(
"_apply_action should not be called at a MEAN_FIELD state.")
self.return_value += self._rewards()
if self._is_chance_init:
# Here the action is between 0 and self.size - 1
if action < 0 or action >= self.size:
raise ValueError(
"The action is between 0 and self.size - 1 at an init chance node")
self._x = action
self._is_chance_init = False
self._player_id = pyspiel.PlayerId.DEFAULT_PLAYER_ID
elif self._player_id == pyspiel.PlayerId.CHANCE:
# Here the action is between 0 and 2
if action < 0 or action > 2:
raise ValueError(
"The action is between 0 and 2 at any chance node")
self._x = (self.x + self._ACTION_TO_MOVE[action]) % self.size
self._t += 1
self._player_id = pyspiel.PlayerId.MEAN_FIELD
elif self._player_id == pyspiel.PlayerId.DEFAULT_PLAYER_ID:
# Here the action is between 0 and 2
if action < 0 or action > 2:
raise ValueError(
"The action is between 0 and 2 at any chance node")
self._x = (self.x + self._ACTION_TO_MOVE[action]) % self.size
self._last_action = action
self._player_id = pyspiel.PlayerId.CHANCE
def _action_to_string(self, player, action):
"""Action -> string."""
del player
if self.is_chance_node() and self._is_chance_init:
return f"init_state={action}"
return str(self._ACTION_TO_MOVE[action])
def distribution_support(self):
"""return a list of state string."""
return [
self.state_to_str(
i, self.t, player_id=pyspiel.PlayerId.MEAN_FIELD)
for i in range(self.size)
]
def update_distribution(self, distribution):
"""This function is central and specific to the logic of the MFG.
Args:
distribution: a distribution to register.
- function should be called when the node is in MEAN_FIELD state.
- distribution are probabilities that correspond to each game state
given by distribution_support.
"""
if self._player_id != pyspiel.PlayerId.MEAN_FIELD:
raise ValueError(
"update_distribution should only be called at a MEAN_FIELD state.")
self._distribution = distribution.copy()
self._player_id = pyspiel.PlayerId.DEFAULT_PLAYER_ID
def is_terminal(self):
"""Returns True if the game is over."""
return self.t >= self.horizon
def current_player(self):
"""Returns id of the next player to move, or TERMINAL if game is over."""
if self.is_terminal():
return pyspiel.PlayerId.TERMINAL
return self._player_id
def _rewards(self):
"""Reward for the player for this state."""
if self._player_id == pyspiel.PlayerId.DEFAULT_PLAYER_ID:
r_x = 1 - (1.0 * np.abs(self.x - self.size // 2)) / (self.size // 2)
r_a = -(1.0 * np.abs(self._ACTION_TO_MOVE[self._last_action])) / self.size
r_mu = - np.log(self._distribution[self.x] + _EPSILON)
return r_x + r_a + r_mu
return 0.0
def rewards(self) -> List[float]:
"""Rewards for all players."""
# For now, only single-population (single-player) mean field games
# are supported.
return [self._rewards()]
def _returns(self):
"""Returns is the sum of all payoffs collected so far."""
return self.return_value + self._rewards()
def returns(self) -> List[float]:
"""Returns for all players."""
# For now, only single-population (single-player) mean field games
# are supported.
return [self._returns()]
def __str__(self):
"""A string that uniquely identify the current state."""
return self.state_to_str(self.x, self.t, player_id=self._player_id)
class Observer:
"""Observer, conforming to the PyObserver interface (see observation.py)."""
def __init__(self, params, game):
"""Initializes an empty observation tensor."""
del params
self.size = game.size
self.horizon = game.horizon
# +1 to allow t == horizon.
self.tensor = np.zeros(self.size + self.horizon + 1, np.float32)
self.dict = {"x": self.tensor[:self.size], "t": self.tensor[self.size:]}
def set_from(self, state: MFGCrowdModellingState, player: int):
"""Updates `tensor` and `dict` to reflect `state` from PoV of `player`."""
del player
# We update the observation via the shaped tensor since indexing is more
# convenient than with the 1-D tensor. Both are views onto the same memory.
self.tensor.fill(0)
# state.x is None for the initial (blank) state, don't set any
# position bit in that case.
if state.x is not None:
if not 0 <= state.x < self.size:
raise ValueError(
f"Expected {state} x position to be in [0, {self.size})")
self.dict["x"][state.x] = 1
if not 0 <= state.t <= self.horizon:
raise ValueError(f"Expected {state} time to be in [0, {self.horizon}]")
self.dict["t"][state.t] = 1
def string_from(self, state, player):
"""Observation of `state` from the PoV of `player`, as a string."""
del player
return str(state)
# Register the game with the OpenSpiel library
pyspiel.register_game(_GAME_TYPE, MFGCrowdModellingGame)
| open_spiel-master | open_spiel/python/mfg/games/crowd_modelling.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory to create (benchmark) MFG games with different settings."""
from typing import Optional
from absl import logging
from open_spiel.python.games import dynamic_routing_data
from open_spiel.python.mfg import games # pylint: disable=unused-import
from open_spiel.python.mfg.games import crowd_modelling_2d
from open_spiel.python.mfg.games import dynamic_routing
from open_spiel.python.mfg.games import predator_prey
import pyspiel
# For each game, the setting with the game name, e.g. python_mfg_dynamic_routing
# for dynamic routing, denotes the default parameters. Variations are not
# prefixed by the exact game name so that they can be used with different
# implementations, e.g. Python or C++, of the same game. Empty parameters use
# the default values as specified in the game.
GAME_SETTINGS = {
# Crowd avoidance game.
"crowd_avoidance": {},
# 2D crowd modelling game.
"crowd_modelling_2d_10x10": {},
"crowd_modelling_2d_four_rooms": {
**crowd_modelling_2d.FOUR_ROOMS,
"only_distribution_reward": True,
},
"crowd_modelling_2d_maze": {
**crowd_modelling_2d.MAZE,
"only_distribution_reward": True,
},
# Dynamic routing game.
"dynamic_routing_braess": {
"max_num_time_step": 100,
"network": "braess",
"time_step_length": 0.05,
},
"dynamic_routing_line": {
"max_num_time_step": 5,
"network": "line",
"time_step_length": 1.0,
},
"dynamic_routing_sioux_falls_dummy_demand": {
"max_num_time_step": 81,
"network": "sioux_falls_dummy_demand",
"time_step_length": 0.5,
},
"dynamic_routing_sioux_falls": {
"max_num_time_step": 81,
"network": "sioux_falls",
"time_step_length": 0.5,
},
# Predator and prey game.
"predator_prey_5x5x3": {
**predator_prey.THREE_POPULATIONS,
},
"predator_prey_5x5x4": {
**predator_prey.FOUR_POPULATIONS,
},
# Linear-quadratic game.
"linear_quadratic": {},
# Periodic aversion game.
"periodic_aversion": {},
}
# Default settings for the games.
GAME_SETTINGS.update({
"python_mfg_crowd_avoidance": GAME_SETTINGS["crowd_avoidance"],
"mean_field_lin_quad": GAME_SETTINGS["linear_quadratic"],
"mfg_crowd_modelling_2d": GAME_SETTINGS["crowd_modelling_2d_10x10"],
"mfg_dynamic_routing": GAME_SETTINGS["dynamic_routing_line"],
"python_mfg_dynamic_routing": GAME_SETTINGS["dynamic_routing_line"],
"python_mfg_periodic_aversion": GAME_SETTINGS["periodic_aversion"],
"python_mfg_predator_prey": GAME_SETTINGS["predator_prey_5x5x3"],
})
DYNAMIC_ROUTING_NETWORK = {
"line": (dynamic_routing_data.LINE_NETWORK,
dynamic_routing_data.LINE_NETWORK_OD_DEMAND),
"braess": (dynamic_routing_data.BRAESS_NETWORK,
dynamic_routing_data.BRAESS_NETWORK_OD_DEMAND),
"sioux_falls_dummy_demand":
(dynamic_routing_data.SIOUX_FALLS_NETWORK,
dynamic_routing_data.SIOUX_FALLS_DUMMY_OD_DEMAND),
"sioux_falls": (dynamic_routing_data.SIOUX_FALLS_NETWORK,
dynamic_routing_data.SIOUX_FALLS_OD_DEMAND)
}
def create_game_with_setting(game_name: str,
setting: Optional[str] = None) -> pyspiel.Game:
"""Creates an OpenSpiel game with the specified setting.
Args:
game_name: Name of a registered game, e.g. mfg_crowd_modelling_2d.
setting: Name of the pre-defined setting. If None, game_name will be used
instead. The setting should be present in the GAME_SETTINGS map above.
Returns:
a Game.
"""
setting = setting or game_name
params = GAME_SETTINGS.get(setting)
if params is None:
raise ValueError(f"{setting} setting does not exist for {game_name}.")
logging.info("Creating %s game with parameters: %r", game_name, params)
# Dynamic routing game requires setting the network and demand explicitly.
if game_name == "python_mfg_dynamic_routing":
# Create a copy since we modify it below removing the network key.
params = params.copy()
network = params.pop("network")
network, od_demand = DYNAMIC_ROUTING_NETWORK[network]
return dynamic_routing.MeanFieldRoutingGame(
params, network=network, od_demand=od_demand)
return pyspiel.load_game(game_name, params)
| open_spiel-master | open_spiel/python/mfg/games/factory.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Tests for Python Predator-Prey game."""
import math
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import numpy.testing as npt
from open_spiel.python.mfg.games import predator_prey
import pyspiel
class MFGPredatorPreyGameTest(parameterized.TestCase):
def test_load(self):
game = pyspiel.load_game('python_mfg_predator_prey')
game.new_initial_state_for_population(0)
game.new_initial_state_for_population(1)
@parameterized.parameters(
{
'geometry': predator_prey.Geometry.SQUARE,
'expected_pos': np.array([0, 4]),
},
{
'geometry': predator_prey.Geometry.TORUS,
'expected_pos': np.array([0, 0]),
},
)
def test_dynamics(self, geometry, expected_pos):
num_players = 3
reward_matrix = np.array([[0, -1, 1], [1, 0, -1], [-1, 1, 0]])
init_distrib = np.array([
# First population
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
# Second population
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
# Third population
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
])
game = pyspiel.load_game(
'python_mfg_predator_prey',
{
'geometry': geometry,
'reward_matrix': ' '.join(str(v) for v in reward_matrix.flatten()),
'init_distrib': ' '.join(str(v) for v in init_distrib.flatten()),
'players': num_players,
'horizon': 10,
},
)
state = game.new_initial_state_for_population(2)
# Initial chance node.
self.assertEqual(state.current_player(), pyspiel.PlayerId.CHANCE)
self.assertLen(state.chance_outcomes(), 1)
self.assertEqual(
state.chance_outcomes()[0][0],
predator_prey.pos_to_merged(np.array([0, 4]), state.size),
)
state.apply_action(state.chance_outcomes()[0][0])
self.assertEqual(state.current_player(), 2)
npt.assert_array_equal(state.pos, [0, 4])
self.assertEqual(state._action_to_string(player=2, action=2), '[0 1]')
state.apply_action(2)
npt.assert_array_equal(state.pos, expected_pos)
def test_create_with_params(self):
horizon = 100
size = 20
num_players = 3
zero_mat = np.zeros((size, size))
reward_matrix = np.array([[0, -1, 1], [1, 0, -1], [-1, 1, 0]])
reward_matrix_flat = ' '.join(str(v) for v in reward_matrix.flatten())
pop_1 = zero_mat.copy()
pop_1[0, 0] = 1.0
pop_1 = pop_1.tolist()
pop_2 = zero_mat.copy()
pop_2[0, -1] = 1.0
pop_2 = pop_2.tolist()
pop_3 = zero_mat.copy()
pop_3[-1, 0] = 1.0
pop_3 = pop_3.tolist()
init_distrib = np.array(pop_1 + pop_2 + pop_3)
init_distrib_flat = ' '.join(str(v) for v in init_distrib.flatten())
setting = 'python_mfg_predator_prey(horizon={}'.format(horizon)
setting += ',size={}'.format(size)
setting += ',players={}'.format(num_players)
setting += ',reward_matrix={}'.format(reward_matrix_flat)
setting += ',init_distrib={}'.format(init_distrib_flat)
setting += ')'
game = pyspiel.load_game(setting)
self.assertEqual(game.size, 20)
self.assertEqual(game.horizon, 100)
@parameterized.parameters(
{'population': 0},
{'population': 1},
{'population': 2},
)
def test_random_game(self, population):
"""Tests basic API functions."""
horizon = 10
size = 20
num_players = 3
reward_matrix = np.array([[0, -1, 1], [1, 0, -1], [-1, 1, 0]])
zero_mat = np.zeros((size, size))
pop_1 = zero_mat.copy()
pop_1[0, 0] = 1.0
pop_1 = pop_1.tolist()
pop_2 = zero_mat.copy()
pop_2[0, -1] = 1.0
pop_2 = pop_2.tolist()
pop_3 = zero_mat.copy()
pop_3[-1, 0] = 1.0
pop_3 = pop_3.tolist()
pop_4 = zero_mat.copy()
pop_4[-1, -1] = 1.0
pop_4 = pop_4.tolist()
pops = [pop_1, pop_2, pop_3, pop_4]
init_distrib = []
for p in range(3):
init_distrib += pops[p]
init_distrib = np.array(init_distrib)
game = predator_prey.MFGPredatorPreyGame(
params={
'horizon': horizon,
'size': size,
'players': num_players,
'reward_matrix': ' '.join(str(v) for v in reward_matrix.flatten()),
'init_distrib': ' '.join(str(v) for v in init_distrib.flatten()),
}
)
pyspiel.random_sim_test(
game,
num_sims=10,
serialize=False,
verbose=True,
mean_field_population=population,
)
@parameterized.parameters(
{
'reward_matrix': np.array([[0, 1], [-1, 0]]), #
'population': 0,
'players': 2,
'initial_pos': np.array([0, 0]),
'distributions': [
# First pop.
np.array([[1, 0], [0, 0]]), #
# Second pop.
np.array([[0.5, 0.1], [0, 0.9]]), #
],
'expected_rewards': np.array([
-math.log(1 + 1e-25) + 0.5, #
-math.log(0.5 + 1e-25) - 1,
]),
'init_distrib': np.array([
# First population
[1.0, 0.0],
[0.0, 0.0],
# Second population
[0.0, 1.0],
[0.0, 0.0],
]),
},
{
'reward_matrix': np.array([
[0, -1, 0.5], #
[0.5, 0, -1], #
[-0.5, 1, 0],
]),
'population': 2,
'players': 3,
'initial_pos': np.array([1, 1]),
'distributions': [
# First pop.
np.array([[0.1, 0.2], [0.3, 0.4]]), #
# Second pop.
np.array([[0.2, 0.1], [0.1, 0.6]]), #
# Third pop.
np.array([[0, 0.1], [0.1, 0.8]]), #
],
'expected_rewards': np.array([
-math.log(0.4 + 1e-25) - 0.6 + 0.5 * 0.8,
-math.log(0.6 + 1e-25) + 0.5 * 0.4 - 0.8,
-math.log(0.8 + 1e-25) - 0.5 * 0.4 + 0.6,
]),
'init_distrib': np.array([
# First population
[1.0, 0.0],
[0.0, 0.0],
# Second population
[0.0, 1.0],
[0.0, 0.0],
# Third population
[0.0, 0.0],
[1.0, 0.0],
]),
},
)
def test_rewards(
self,
reward_matrix,
players,
population,
initial_pos,
distributions,
expected_rewards,
init_distrib,
):
game = pyspiel.load_game(
'python_mfg_predator_prey',
{
'size': 2,
'reward_matrix': ' '.join(str(v) for v in reward_matrix.flatten()),
'players': players,
'init_distrib': ' '.join(str(v) for v in init_distrib.flatten()),
},
)
state = game.new_initial_state_for_population(population)
# Initial chance node.
self.assertEqual(state.current_player(), pyspiel.PlayerId.CHANCE)
state.apply_action(predator_prey.pos_to_merged(initial_pos, state.size))
self.assertEqual(state.current_player(), population)
npt.assert_array_equal(state.pos, initial_pos)
state.apply_action(state._NEUTRAL_ACTION)
npt.assert_array_equal(state.pos, initial_pos)
self.assertEqual(state.current_player(), pyspiel.PlayerId.CHANCE)
state.apply_action(state._NEUTRAL_ACTION)
self.assertEqual(state.current_player(), pyspiel.PlayerId.MEAN_FIELD)
# Maps states (in string representation) to their proba.
dist = {}
for x in range(state.size):
for y in range(state.size):
for pop in range(len(reward_matrix)):
state_str = state.state_to_str(
np.array([x, y]),
state.t,
pop,
player_id=pyspiel.PlayerId.MEAN_FIELD,
)
dist[state_str] = distributions[pop][y][x]
support = state.distribution_support()
state.update_distribution([dist[s] for s in support])
# Decision node where we get a reward.
self.assertEqual(state.current_player(), population)
npt.assert_array_equal(state.rewards(), expected_rewards)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/mfg/games/predator_prey_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Tests for Python Linear Quadratic game."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.mfg.games import linear_quadratic
import pyspiel
MFG_STR_CONST = "_a"
class MFGLinearQuadraticGameTest(absltest.TestCase):
def test_load(self):
game = pyspiel.load_game("mean_field_lin_quad")
game.new_initial_state()
def test_create(self):
"""Checks we can create the game and clone states."""
game = linear_quadratic.MFGLinearQuadraticGame()
self.assertEqual(game.size, linear_quadratic._SIZE)
self.assertEqual(game.horizon, linear_quadratic._HORIZON)
self.assertEqual(game.get_type().dynamics,
pyspiel.GameType.Dynamics.MEAN_FIELD)
print("Num distinct actions:", game.num_distinct_actions())
state = game.new_initial_state()
clone = state.clone()
print("Initial state:", state)
print("Cloned initial state:", clone)
def test_create_with_params(self):
game = pyspiel.load_game("mean_field_lin_quad(horizon=30,size=100)")
self.assertEqual(game.size, 100)
self.assertEqual(game.horizon, 30)
def check_cloning(self, state):
cloned = state.clone()
self.assertEqual(str(cloned), str(state))
self.assertEqual(cloned._distribution, state._distribution)
self.assertEqual(cloned._returns(), state._returns())
self.assertEqual(cloned.current_player(), state.current_player())
self.assertEqual(cloned.size, state.size)
self.assertEqual(cloned.horizon, state.horizon)
self.assertEqual(cloned._last_action, state._last_action)
def test_random_game(self):
"""Tests basic API functions."""
np.random.seed(7)
horizon = 30
size = 100
game = linear_quadratic.MFGLinearQuadraticGame(params={
"horizon": horizon,
"size": size
})
state = game.new_initial_state()
t = 0
while not state.is_terminal():
if state.current_player() == pyspiel.PlayerId.CHANCE:
actions, probs = zip(*state.chance_outcomes())
action = np.random.choice(actions, p=probs)
self.check_cloning(state)
self.assertEqual(len(state.legal_actions()),
len(state.chance_outcomes()))
state.apply_action(action)
elif state.current_player() == pyspiel.PlayerId.MEAN_FIELD:
self.assertEqual(state.legal_actions(), [])
self.check_cloning(state)
num_states = len(state.distribution_support())
state.update_distribution([1 / num_states] * num_states)
else:
self.assertEqual(state.current_player(), 0)
self.check_cloning(state)
state.observation_string()
state.information_state_string()
legal_actions = state.legal_actions()
action = np.random.choice(legal_actions)
state.apply_action(action)
t += 1
self.assertEqual(t, horizon)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/mfg/games/linear_quadratic_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mean Field Crowd Modelling Game in 2d.
Please see the C++ implementation under games/mfg/crowd_modelling_2d.h for
more information.
"""
from typing import Sequence
def grid_to_forbidden_states(grid: Sequence[str]) -> str:
"""Converts a grid into string representation of forbidden states.
Args:
grid: Rows of the grid. '#' character denotes a forbidden state. All rows
should have the same number of columns, i.e. cells.
Returns:
String representation of forbidden states in the form of x (column) and y
(row) pairs, e.g. [1|1;0|2].
"""
forbidden_states = []
num_cols = len(grid[0])
for y, row in enumerate(grid):
assert len(row) == num_cols, f'Number of columns should be {num_cols}.'
for x, cell in enumerate(row):
if cell == '#':
forbidden_states.append(f'{x}|{y}')
return '[' + ';'.join(forbidden_states) + ']'
FOUR_ROOMS_FORBIDDEN_STATES = grid_to_forbidden_states([
'#############',
'# # #',
'# # #',
'# #',
'# # #',
'# # #',
'### ##### ###',
'# # #',
'# # #',
'# #',
'# # #',
'# # #',
'#############',
])
# Four rooms with an initial state at top-left corner.
FOUR_ROOMS = {
'forbidden_states': FOUR_ROOMS_FORBIDDEN_STATES,
'horizon': 40,
'initial_distribution': '[1|1]',
'initial_distribution_value': '[1.0]',
'size': 13,
}
MAZE_FORBIDDEN_STATES = grid_to_forbidden_states([
'######################',
'# # # # #',
'# # # # #',
'###### # # ## # #',
'# # # # # #',
'# # # ### # #',
'# ######## # # #',
'# # # # ## # #',
'# # # # # # ###',
'# # # # # # # #',
'###### # ####### # # #',
'# # # # # #',
'# # ## ### # # # #',
'## # # # ##### # #',
'## # # # # # # #',
'# # #### # #',
'# #### # ######## #',
'# # # # ### #',
'# # # # # # # # #',
'# ##### # # # #',
'# # #',
'######################',
])
# 22x22 maze with an initial state at top-left corner,
MAZE = {
'forbidden_states': MAZE_FORBIDDEN_STATES,
'horizon': 100,
'initial_distribution': '[1|1]',
'initial_distribution_value': '[1.0]',
'size': 22,
}
| open_spiel-master | open_spiel/python/mfg/games/crowd_modelling_2d.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Implementation of a mean field routing game.
The game is derived from https://arxiv.org/abs/2110.11943.
It is the extension of the dynamic routing game python_dynamic_routing.
The list of vehicles decribing the N player of the dynamic routing game is
replaced by a list of OriginDestinationDemand. One OriginDestinationDemand
corresponds to one population of vehicles (with the same origin, destination and
departure time).
This game is a variant of the mean field route choice game as the vehicle
movement depends on the current network congestion. In the mean field route
choice game, the number of time step to reach the destination is constant and
does not depend on the network congestion, neither of the vehicle cost function.
In the dynamic driving and routing games the vehicle choose its speed to travel
on each link in order to minimize its cost function. Therefore the congestion is
encoded in the cost function.
More context can be found on the docstring of the python_dynamic_routing class.
"""
import functools
from typing import Any, Iterable, List, Mapping, Optional, Tuple
import numpy as np
from open_spiel.python.games import dynamic_routing_data
from open_spiel.python.games import dynamic_routing_utils
from open_spiel.python.observation import IIGObserverForPublicInfoGame
import pyspiel
_DEFAULT_PARAMS = {
"max_num_time_step": 10,
"time_step_length": 0.5,
"players": -1
}
_GAME_TYPE = pyspiel.GameType(
short_name="python_mfg_dynamic_routing",
long_name="Python Mean Field Routing Game",
dynamics=pyspiel.GameType.Dynamics.MEAN_FIELD,
chance_mode=pyspiel.GameType.ChanceMode.EXPLICIT_STOCHASTIC,
information=pyspiel.GameType.Information.PERFECT_INFORMATION,
utility=pyspiel.GameType.Utility.GENERAL_SUM,
reward_model=pyspiel.GameType.RewardModel.REWARDS,
max_num_players=1,
min_num_players=1,
provides_information_state_string=True,
provides_information_state_tensor=True,
provides_observation_string=True,
provides_observation_tensor=True,
default_loadable=True,
provides_factored_observation_string=True,
parameter_specification=_DEFAULT_PARAMS)
WAITING_TIME_NOT_ASSIGNED = -1
@functools.lru_cache(maxsize=None)
def _state_to_str(
is_chance_init: bool,
location: str,
time_step: int,
player_id: int,
waiting_time: int,
destination: str,
final_arrival_time: float,
) -> str:
"""Convert the state to a string representation.
As the string representation will be used in dictionaries for various
algorithms that computes the state value, expected return, best response or
find the mean field Nash equilibrium.
The state is uniquely define by the current time, the type of node
(decision, mean field or chance), the vehicle location, its destination and
its waiting time.
Args:
is_chance_init: True if at chance initialization.
location: the location of the representative player.
time_step: the current time step.
player_id: the current node type as a player id.
waiting_time: the representative player waiting time.
destination: the destination of the representative player.
final_arrival_time: time of arrival.
Returns:
state_string: string representing uniquely the mean field game.
"""
if is_chance_init:
return "initial chance node"
if player_id == pyspiel.PlayerId.DEFAULT_PLAYER_ID:
time = str(time_step)
elif player_id == pyspiel.PlayerId.MEAN_FIELD:
time = f"{time_step}_mean_field"
elif player_id == pyspiel.PlayerId.CHANCE:
time = f"{time_step}_chance"
else:
raise ValueError(
"Player id should be DEFAULT_PLAYER_ID, MEAN_FIELD or CHANCE")
if final_arrival_time:
return (f"Arrived at {location}, with arrival time "
f"{final_arrival_time}, t={time}")
return (f"Location={location}, waiting_time={waiting_time},"
f" t={time}, destination='{destination}'")
class MeanFieldRoutingGame(pyspiel.Game):
"""Implementation of mean field routing game.
The representative vehicle/player is represented as a tuple current location,
current waiting time and destination. When the waiting time is negative, the
vehicle choose on with successor link it would like to go. When arriving on
the link, a waiting time is assigned to the player based on the distribution
of players on the link. The vehicle arrival time is equal to the time step
when they first reach their destination. See module docstring for more
information.
Attributes inherited from GameInfo:
max_chance_outcomes: maximum number of chance actions. Set to the length of
od_demand, i.e. the number of `OriginDestinationDemand`s.
max_game_length: maximum number of time step played. Passed during
construction.
max_utility: maximum utility is the opposite of the minimum arrival
time. Set to 0.
min_utility: minimum utility is the opposite of the maximum arrival
time. Set to - max_game_length - 1.
num_distinct_actions: maximum number of possible actions. This is
equal to the number of links + 1 (corresponding to having no
possible action _NO_POSSIBLE_ACTION).
num_players: the number of vehicles. Should be 1 as this mean field
game is a one population game.
Attributes:
network: the network of the game.
od_demand: a list of the vehicle. Their origin and their destination should
be road sections of the game.
time_step_length: size of the time step, used to convert travel times into
number of game time steps.
perform_sanity_checks: if true, sanity checks are done during the game,
should be set to false to speed up the game.
total_num_vehicle: total number of vehicles as the sum of the od_demand.
chance_outcomes: chance outcomes based on the initial probability
distribution and their probabilities.
"""
network: dynamic_routing_utils.Network
od_demand: List[dynamic_routing_utils.OriginDestinationDemand]
perform_sanity_checks: bool
time_step_length: float
def __init__(self,
params: Mapping[str, Any],
network: Optional[dynamic_routing_utils.Network] = None,
od_demand: Optional[List[
dynamic_routing_utils.OriginDestinationDemand]] = None,
perform_sanity_checks: bool = True):
"""Initiliaze the game.
Args:
params: game parameters. It should define max_num_time_step and
time_step_length.
network: the network of the game.
od_demand: a list of the vehicle. Their origin and their destination
should be road sections of the game.
perform_sanity_checks: set the perform_sanity_checks attribute.
"""
max_num_time_step = params["max_num_time_step"]
time_step_length = params["time_step_length"]
self.network = network if network else dynamic_routing_data.BRAESS_NETWORK
self.od_demand = (
od_demand
if od_demand else dynamic_routing_data.BRAESS_NETWORK_OD_DEMAND)
self.network.check_list_of_od_demand_is_correct(self.od_demand)
self.perform_sanity_checks = perform_sanity_checks
self.time_step_length = time_step_length
self.total_num_vehicle = sum(
[od_demand_item.counts for od_demand_item in self.od_demand])
self.chance_outcomes = [(i, od_demand_item.counts / self.total_num_vehicle)
for i, od_demand_item in enumerate(self.od_demand)]
game_info = pyspiel.GameInfo(
num_distinct_actions=self.network.num_actions(),
max_chance_outcomes=len(self.od_demand),
num_players=1,
min_utility=-max_num_time_step - 1,
max_utility=0,
max_game_length=max_num_time_step)
super().__init__(_GAME_TYPE, game_info, params if params else {})
def new_initial_state(self) -> "MeanFieldRoutingGameState":
"""Returns the state corresponding to the start of a game."""
return MeanFieldRoutingGameState(self, self.time_step_length)
def make_py_observer(self, iig_obs_type=None, params=None):
"""Returns a NetworkObserver object used for observing game state."""
if ((iig_obs_type is None) or
(iig_obs_type.public_info and not iig_obs_type.perfect_recall)):
return NetworkObserver(self.network.num_actions(), self.max_game_length())
return IIGObserverForPublicInfoGame(iig_obs_type, params)
def max_chance_nodes_in_history(self):
"""Maximun chance nodes in game history."""
return self.max_game_length() + 1
def get_road_section_as_int(self, section: Optional[str]) -> int:
"""Returns the integer representation of the road section."""
if section is None:
return 0
start_node, end_node = (
dynamic_routing_utils._nodes_from_road_section(section)) # pylint:disable=protected-access
return self.network.get_action_id_from_movement(start_node, end_node)
class MeanFieldRoutingGameState(pyspiel.State):
"""State of the DynamicRoutingGame.
One player is equal to one vehicle.
See docstring of the game class and of the file for more information.
Attributes:
_current_time_step: current time step of the game.
_is_chance_init: boolean that encodes weither the current node is the
initial chance node.
_is_terminal: boolean that encodes weither the game is over.
_max_arrival_time: int that encodes maximum arrival time on any link in
number of time steps. Needed to enumerate all the possible state of a
vehicle being on a link to compute volume of cars on the link.
_max_waiting_time: maximum time a vehicle can wait on a time. This is done
in order to limit the number of possible state with a vehicle on a
specific link.
_normed_density_on_vehicle_link: density of vehicles on the link that is
used by the representative vehicle. This is given by the mean field
distribution.
_time_step_length: size of the time step, used to convert travel times into
number of game time steps.
_vehicle_at_destination: boolean that encodes if the representative vehicle
has reached its destination.
_vehicle_destination: the destination of the representative vehicle
corresponding to this state. It is associated to the representative
vehicle after the initial chance node according to the od_demand
distribution.
_vehicle_final_arrival_time: the arrival time of the representative vehicle,
the arrival is either 0 if the vehicle is still in the network or its
arrival time if the vehicle has reached its destination.
_vehicle_location: current location of the vehicle as a network road
section.
_vehicle_without_legal_action: boolean that encodes if the representative
vehicle has reach a sink node, meaning that it will not be able to move
anymore.
_waiting_time: time that the vehicle has to wait before moving to the next
link (equal to the link travel time when the vehicle just reached the
link).
"""
_current_time_step: int
_is_chance_init: bool
_is_terminal: bool
_max_arrival_time: int
_max_waiting_time: int
_normed_density_on_vehicle_link: float
_time_step_length: float
_vehicle_at_destination: bool
_vehicle_destination: Optional[str]
_vehicle_final_arrival_time: float
_vehicle_location: Optional[str]
_vehicle_without_legal_action: bool
_waiting_time: int
def __init__(self, game: MeanFieldRoutingGame, time_step_length: float):
"""Constructor; should only be called by Game.new_initial_state."""
super().__init__(game)
self._current_time_step = 0
self._is_chance_init = True # is true for the first state of the game.
self._is_terminal = False
if self.get_game().perform_sanity_checks:
assert game.num_players() == 1, (
"This mean field routing game should have a unique player.")
self._player_id = pyspiel.PlayerId.CHANCE
self._time_step_length = time_step_length
self._vehicle_at_destination = False
self._vehicle_final_arrival_time = 0.0
self._vehicle_without_legal_action = False
self._vehicle_location = None
self._vehicle_destination = None
self._max_arrival_time = self.get_game().max_game_length()
# Cap maximum link waiting time to faster simulations.
self._max_waiting_time = self._max_arrival_time
self._waiting_time = WAITING_TIME_NOT_ASSIGNED
@property
def current_time_step(self) -> int:
"""Return current time step."""
return self._current_time_step
def current_player(self) -> pyspiel.PlayerId:
"""Returns the current player."""
if self._is_terminal:
return pyspiel.PlayerId.TERMINAL
return self._player_id
def state_to_str(self,
location: str,
time_step: int,
player_id: int = pyspiel.PlayerId.DEFAULT_PLAYER_ID,
waiting_time: int = 0,
destination: str = ""):
"""Convert the state to a string representation."""
return _state_to_str(
self._is_chance_init,
location,
time_step,
player_id,
waiting_time,
destination or self._vehicle_destination,
self._vehicle_final_arrival_time,
)
def distribution_support(self) -> List[str]:
"""Returns the state that should be used for update_distribution.
The distribution of the vehicle is used to determined the number of
cars on the same link of the representative vehicle in order to define
the waiting time of the representative vehicle when joining a link.
Therefore, only the states corresponding to be on the link of the
representative vehicle at this current time are useful.
Returns:
list of the two state: being on the link of the representative vehicle at
the current time and being stuck in traffic or not.
"""
if self._vehicle_without_legal_action:
return []
od_demand = self.get_game().od_demand
dist = [
self.state_to_str( # pylint:disable=g-complex-comprehension
self._vehicle_location,
self._current_time_step,
player_id=pyspiel.PlayerId.MEAN_FIELD,
waiting_time=waiting_time,
destination=destination)
for waiting_time in range(WAITING_TIME_NOT_ASSIGNED,
self._max_arrival_time)
for destination in {od._destination for od in od_demand} # pylint:disable=protected-access
]
assert len(set(dist)) == len(dist), (
f"Distribution should not have duplicated states: {dist}.")
return dist
def update_distribution(self, distribution: List[float]):
"""Get the number of cars on the same link as the representative player.
_normed_density_on_vehicle_link stores the number of cars on the link
where the representative player is.
Args:
distribution: the probability for a vehicle to be in the states in
distribution_support. The distribution is a list of probabilities.
"""
game = self.get_game()
if game.perform_sanity_checks:
if self._player_id != pyspiel.PlayerId.MEAN_FIELD:
raise ValueError(("update_distribution should only be called at"
" a MEAN_FIELD state."))
self._player_id = pyspiel.PlayerId.DEFAULT_PLAYER_ID
if not self._vehicle_without_legal_action:
self._normed_density_on_vehicle_link = sum(distribution)
if game.perform_sanity_checks:
assert 0 <= self._normed_density_on_vehicle_link <= 1 + 1e-4, (
f"{self._normed_density_on_vehicle_link} is not in [0, 1].")
if self._waiting_time == WAITING_TIME_NOT_ASSIGNED:
volume = (game.total_num_vehicle * self._normed_density_on_vehicle_link)
self._waiting_time = int(
game.network.get_travel_time(self._vehicle_location, volume) /
self._time_step_length) - 1
self._waiting_time = max(0, self._waiting_time)
def chance_outcomes(self) -> List[Tuple[int, float]]:
"""Returns the initial probability distribution is returned.
One chance outcome correspond to each possible OD pair with a departure
time, the probability of each chance outcome is the proportion of vehicle in
each OD pair with a departure time.
Returns:
list_tuple_outcome_probabilities: chance outcomes and their probability.
"""
game = self.get_game()
if game.perform_sanity_checks:
assert self._player_id == pyspiel.PlayerId.CHANCE
assert self._is_chance_init
return game.chance_outcomes
def _legal_actions(self, player: pyspiel.PlayerId) -> List[int]:
"""Return the legal actions of the vehicle.
Legal actions are the succesor road section of the vehicle current road
section.
Args:
player: the vehicle id.
Returns:
list_legal_actions: a list of legal actions. If the game is finished then
the list is empty. If the vehicle is at its destination, has a positive
waiting time or if it is on a node without successors then an empty list
is returned. Otherwise the list of successors nodes of the current
vehicle location is returned.
"""
if self._is_terminal:
return []
if self.get_game().perform_sanity_checks:
assert player == pyspiel.PlayerId.DEFAULT_PLAYER_ID, str(player)
if self._vehicle_without_legal_action:
# If the vehicle is at destination it cannot do anything.
return [dynamic_routing_utils.NO_POSSIBLE_ACTION]
if self._waiting_time > 0:
return [dynamic_routing_utils.NO_POSSIBLE_ACTION]
_, end_section_node = dynamic_routing_utils._nodes_from_road_section( # pylint:disable=protected-access
self._vehicle_location)
successors = self.get_game().network.get_successors(end_section_node)
if self.get_game().perform_sanity_checks:
if not successors:
raise ValueError(("If a vehicle is not without legal action, it"
" should have an action."))
assert isinstance(successors, Iterable)
actions = [
self.get_game().network.get_action_id_from_movement(
end_section_node, d) for d in successors
]
map(self.get_game().network.assert_valid_action, actions)
return sorted(actions)
def _apply_action(self, action: int):
"""Apply the action to the state.
This function can be either called on a chance node or on a decision
node. If called on the initial chance node, the action gives in which OD
demand the representative vehicle belongs too (it put the vehicle at
this location and define its destination).
If called on decision node, the action defines on which link the vehicle
will move (if it is not stuck in traffic) and assign a waiting time to the
vehicle.
Args:
action: the action to apply.
"""
if self._player_id == pyspiel.PlayerId.CHANCE:
self._player_id = pyspiel.PlayerId.DEFAULT_PLAYER_ID
assert self._is_chance_init
# Apply action is called on initial chance node to initialized
# the vehicle position based on the initial location
# distribution.
od_demand = self.get_game().od_demand
self._vehicle_destination = od_demand[action].destination
self._vehicle_location = od_demand[action].origin
self._waiting_time = int(od_demand[action].departure_time /
self._time_step_length)
self._is_chance_init = False
self._normed_density_on_vehicle_link = 0
elif self._player_id == pyspiel.PlayerId.DEFAULT_PLAYER_ID:
self._player_id = pyspiel.PlayerId.MEAN_FIELD
# Apply action is called on a descision node. If the vehicle can
# move, then it will move to the next road section.
# Has the vehicle already reached a sink node?
if not self._vehicle_without_legal_action:
# If the vehicle is stuck in traffic it cannot move.
if self._waiting_time > 0:
self._waiting_time -= 1
else:
if self.get_game().perform_sanity_checks:
self.get_game().network.assert_valid_action(action,
self._vehicle_location)
self._vehicle_location = (
self.get_game().network.get_road_section_from_action_id(action))
# Has the vehicle just reached its destination?
if self._vehicle_location == self._vehicle_destination:
self._vehicle_final_arrival_time = self._current_time_step
self._vehicle_at_destination = True
self._vehicle_without_legal_action = True
# Will the vehicle have a legal action for next time step?
elif self.get_game().network.is_location_at_sink_node(
self._vehicle_location):
self._vehicle_without_legal_action = True
self._vehicle_final_arrival_time = -self.get_game().min_utility()
else:
self._waiting_time = WAITING_TIME_NOT_ASSIGNED
self._current_time_step += 1
elif self.get_game().perform_sanity_checks:
if self._is_terminal:
raise ValueError(
"_apply_action should not be called at a end of the game.")
if self._player_id == pyspiel.PlayerId.MEAN_FIELD:
raise ValueError(
"_apply_action should not be called at a MEAN_FIELD state.")
# Is the game finished?
if self._current_time_step >= self.get_game().max_game_length():
self._is_terminal = True
if not self._vehicle_at_destination:
self._vehicle_final_arrival_time = -self.get_game().min_utility()
def _action_to_string(self, player, action) -> str:
"""Action -> string."""
if player == pyspiel.PlayerId.CHANCE:
if self._is_chance_init:
return f"Vehicle is assigned to population {action}."
return f"Change node; the vehicle movement is {bool(action)}."
if self.get_game().perform_sanity_checks:
assert player == pyspiel.PlayerId.DEFAULT_PLAYER_ID
if action == dynamic_routing_utils.NO_POSSIBLE_ACTION:
return f"Vehicle {player} reach a sink node or its destination."
if self.get_game().perform_sanity_checks:
self.get_game().network.assert_valid_action(action)
return (f"Vehicle {player} would like to move to " + str(
self.get_game().network.get_road_section_from_action_id(action)) + ".")
def is_terminal(self) -> bool:
"""Returns True if the game is over."""
return self._is_terminal
def is_waiting(self) -> bool:
"""Returns True if the wait time is non-zero."""
return self._waiting_time > 0
def returns(self) -> List[float]:
"""Total reward for each player over the course of the game so far."""
if not self._is_terminal:
return [0]
return [-self._vehicle_final_arrival_time * self._time_step_length]
def get_location_as_int(self) -> int:
"""Returns the vehicle location.
This will be 1-based action index of the location, or 0 when the location is
None before the initial chance node.
"""
return self.get_game().get_road_section_as_int(self._vehicle_location)
def get_destination_as_int(self) -> int:
"""Returns the vehicle destination.
This will be 1-based action index of the destination, or 0 when the
destination is None before the initial chance node.
"""
return self.get_game().get_road_section_as_int(self._vehicle_destination)
def __str__(self) -> str:
"""String for debug purposes. No particular semantics are required."""
if self._vehicle_location is not None:
return self.state_to_str(
self._vehicle_location,
self._current_time_step,
player_id=self._player_id,
waiting_time=self._waiting_time)
assert self._current_time_step == 0
return "Before initial chance node"
class NetworkObserver:
"""Network observer used by the learning algorithm.
The state string is the state history string. The state tensor is an array
of size number of locations * 2 + maximum number of time steps + 2, which is
the concatenation of one-hot encodings of the location, destination (1-based;
if location or destination is None, then the 0th element will be set to 1) and
the current time (0-based). The last element of the array will be set to 1 if
waiting time is positive, or 0 otherwise.
Attributes:
dict: Dictionary of tensors for the components of the observation
corresponding to the location, destination and time.
tensor: The concatenated form of the observation.
"""
def __init__(self, num_locations: int, max_num_time_step: int):
"""Initializes an empty observation tensor."""
self.tensor = np.zeros(num_locations * 2 + max_num_time_step + 1 + 1,
np.float32)
self.dict = {
"location": self.tensor[:num_locations],
"destination": self.tensor[num_locations:num_locations * 2],
"time": self.tensor[num_locations * 2:-1],
"waiting": self.tensor[-1:]
}
def set_from(self, state, player):
"""Sets the state tensor based on the specified state.
Note that the function may be called with arbitrary states of the game, e.g.
from different runs, and therefore the tensor should be cleared and updated
instead of preserving any earlier values.
Args:
state: state of the game.
player: player id that should play.
"""
assert player == pyspiel.PlayerId.DEFAULT_PLAYER_ID
self.tensor.fill(0)
self.dict["location"][state.get_location_as_int()] = 1
self.dict["destination"][state.get_destination_as_int()] = 1
self.dict["time"][state.current_time_step] = 1
self.dict["waiting"][0] = state.is_waiting()
def string_from(self, state, player):
"""Return the state history string."""
assert player == pyspiel.PlayerId.DEFAULT_PLAYER_ID
return str(state)
# Register the game with the OpenSpiel library
pyspiel.register_game(_GAME_TYPE, MeanFieldRoutingGame)
| open_spiel-master | open_spiel/python/mfg/games/dynamic_routing.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for factory."""
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python.mfg.games import factory
import pyspiel
class FactoryTest(parameterized.TestCase):
@parameterized.parameters(
("mfg_crowd_modelling_2d", None),
("mfg_crowd_modelling_2d", "crowd_modelling_2d_10x10"),
("mfg_crowd_modelling_2d", "crowd_modelling_2d_four_rooms"),
("mfg_dynamic_routing", None),
("mfg_dynamic_routing", "dynamic_routing_line"),
("mfg_dynamic_routing", "dynamic_routing_braess"),
("python_mfg_dynamic_routing", None),
("python_mfg_dynamic_routing", "dynamic_routing_line"),
("python_mfg_dynamic_routing", "dynamic_routing_braess"),
("python_mfg_dynamic_routing",
"dynamic_routing_sioux_falls_dummy_demand"),
("python_mfg_dynamic_routing", "dynamic_routing_sioux_falls"),
("python_mfg_periodic_aversion", None),
("python_mfg_predator_prey", None),
("python_mfg_predator_prey", "predator_prey_5x5x3"))
def test_smoke(self, game_name, setting):
game = factory.create_game_with_setting(game_name, setting)
self.assertIsInstance(game, pyspiel.Game)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/mfg/games/factory_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Mean Field Linear Quadratic, implemented in Python.
This is a demonstration of implementing a mean field game in Python.
Fictitious play for mean field games: Continuous time analysis and applications,
Perrin & al. 2019 (https://arxiv.org/abs/2007.03458). This game corresponds
to the game in section 4.1.
"""
import math
from typing import Any, List, Mapping
import numpy as np
import scipy.stats
from open_spiel.python import observation
import pyspiel
_NUM_PLAYERS = 1
_SIZE = 10
_HORIZON = 10
_MEAN_REVERT = 0.0
_VOLATILITY = 1.0
_CROSS_Q = 0.01
_KAPPA = 0.5
_TERMINAL_COST = 1.0
_DELTA_T = 1.0
_N_ACTIONS_PER_SIDE = 3
_SPATIAL_BIAS = 0
_DEFAULT_PARAMS = {
"size": _SIZE,
"horizon": _HORIZON,
"dt": _DELTA_T,
"n_actions_per_side": _N_ACTIONS_PER_SIDE,
"volatility": _VOLATILITY,
"mean_revert": _MEAN_REVERT,
"cross_q": _CROSS_Q,
"kappa": _KAPPA,
"terminal_cost": _TERMINAL_COST,
"spatial_bias": _SPATIAL_BIAS,
}
_GAME_TYPE = pyspiel.GameType(
short_name="mean_field_lin_quad",
long_name="Mean-Field Linear Quadratic Game",
dynamics=pyspiel.GameType.Dynamics.MEAN_FIELD,
chance_mode=pyspiel.GameType.ChanceMode.EXPLICIT_STOCHASTIC,
information=pyspiel.GameType.Information.PERFECT_INFORMATION,
utility=pyspiel.GameType.Utility.GENERAL_SUM,
reward_model=pyspiel.GameType.RewardModel.REWARDS,
max_num_players=_NUM_PLAYERS,
min_num_players=_NUM_PLAYERS,
provides_information_state_string=True,
provides_information_state_tensor=False,
provides_observation_string=True,
provides_observation_tensor=True,
parameter_specification=_DEFAULT_PARAMS,
)
class MFGLinearQuadraticGame(pyspiel.Game):
"""A Mean-Field Linear Quadratic game.
For now, only single-population setting is covered. A game starts by an
initial chance node that selects the initial state of the player in the MFG.
Then the game sequentially alternates between:
- An action selection node (where the player id is >= 0)
- A chance node (the player id is pyspiel.PlayerId.CHANCE)
- A Mean Field node (the player id is pyspiel.PlayerId.MEAN_FIELD)
"""
# pylint:disable=dangerous-default-value
def __init__(self, params: Mapping[str, Any] = _DEFAULT_PARAMS):
self.size = params.get("size", _SIZE)
self.horizon = params.get("horizon", _HORIZON)
self.dt = params.get("dt", _DELTA_T)
self.n_actions_per_side = params.get(
"n_actions_per_side", _N_ACTIONS_PER_SIDE
)
self.volatility = params.get("volatility", _VOLATILITY)
self.mean_revert = params.get("mean_revert", _MEAN_REVERT)
self.cross_q = params.get("cross_q", _CROSS_Q)
self.kappa = params.get("kappa", _KAPPA)
self.terminal_cost = params.get("terminal_cost", _TERMINAL_COST)
self.spatial_bias = params.get("spatial_bias", _SPATIAL_BIAS)
game_info = pyspiel.GameInfo(
num_distinct_actions=2 * self.n_actions_per_side + 1,
max_chance_outcomes=2 * self.n_actions_per_side + 1,
num_players=_NUM_PLAYERS,
min_utility=-np.inf,
max_utility=+np.inf,
utility_sum=0.0,
max_game_length=self.horizon,
)
super().__init__(_GAME_TYPE, game_info, params)
def new_initial_state(self):
"""Returns a state corresponding to the start of a game."""
return MFGLinearQuadraticState(self)
def make_py_observer(self, iig_obs_type=None, params=None):
"""Returns an object used for observing game state."""
if (iig_obs_type is None) or (
iig_obs_type.public_info and not iig_obs_type.perfect_recall
):
return Observer(params, self)
return observation.IIGObserverForPublicInfoGame(iig_obs_type, params)
def max_chance_nodes_in_history(self):
"""Maximun chance nodes in game history."""
return self.horizon + 1
class MFGLinearQuadraticState(pyspiel.State):
"""A Mean Field Normal-Form state."""
def __init__(self, game):
"""Constructor; should only be called by Game.new_initial_state."""
super().__init__(game)
self._player_id = pyspiel.PlayerId.CHANCE
self._last_action = game.n_actions_per_side
self.tick = 0
self.x = None
self.return_value = 0.0
self.game = game
self.size = game.size
self.horizon = game.horizon
self.dt = game.dt
self.n_actions_per_side = game.n_actions_per_side
self.volatility = game.volatility
self.mean_revert = game.mean_revert
self.cross_q = game.cross_q
self.kappa = game.kappa
self.terminal_cost = game.terminal_cost
# Represents the current probability distribution over game states.
# Initialized with a uniform distribution.
self._distribution = [1.0 / self.size for _ in range(self.size)]
def to_string(self):
return self.state_to_str(self.x, self.tick)
def state_to_str(self, x, tick, player_id=pyspiel.PlayerId.DEFAULT_PLAYER_ID):
"""A string that uniquely identifies a triplet x, t, player_id."""
if self.x is None:
return "initial"
if self._player_id == pyspiel.PlayerId.DEFAULT_PLAYER_ID:
return "({}, {})".format(x, tick)
elif self._player_id == pyspiel.PlayerId.MEAN_FIELD:
return "({}, {})_a".format(x, tick)
elif self._player_id == pyspiel.PlayerId.CHANCE:
return "({}, {})_a_mu".format(x, tick)
raise ValueError(
"player_id is not mean field, chance or default player id."
)
# OpenSpiel (PySpiel) API functions are below. This is the standard set that
# should be implemented by every perfect-information sequential-move game.
@property
def n_actions(self):
return 2 * self.n_actions_per_side + 1
def _legal_actions(self, player):
"""Returns a list of legal actions for player and MFG nodes."""
if player == pyspiel.PlayerId.MEAN_FIELD:
return []
if (
player == pyspiel.PlayerId.DEFAULT_PLAYER_ID
and player == self.current_player()
):
return list(range(self.n_actions))
raise ValueError(
f"Unexpected player {player}. "
"Expected a mean field or current player 0."
)
def _apply_action(self, action):
"""Applies the specified action to the state."""
if self._player_id == pyspiel.PlayerId.MEAN_FIELD:
raise ValueError(
"_apply_action should not be called at a MEAN_FIELD state."
)
self.return_value = self._rewards()
assert (
self._player_id == pyspiel.PlayerId.DEFAULT_PLAYER_ID
or self._player_id == pyspiel.PlayerId.CHANCE
)
if self.x is None:
self.x = action
self._player_id = pyspiel.PlayerId.DEFAULT_PLAYER_ID
return
if action < 0 or action >= self.n_actions:
raise ValueError(
"The action is between 0 and {} at any node".format(self.n_actions)
)
move = self.action_to_move(action)
if self._player_id == pyspiel.PlayerId.CHANCE:
self.x += move * math.sqrt(self.dt) * self.volatility
self.x = round(self.x) % self.size
self._player_id = pyspiel.PlayerId.MEAN_FIELD
self.tick += 1
elif self._player_id == pyspiel.PlayerId.DEFAULT_PLAYER_ID:
dist_mean = self.distribution_average() - self.x
full_move = move
full_move += self.mean_revert * dist_mean
full_move *= self.dt
self.x += round(full_move)
self.x = round(self.x) % self.size
self._last_action = action
self._player_id = pyspiel.PlayerId.CHANCE
def _action_to_string(self, player, action):
"""Action -> string."""
del player
return str(action)
def action_to_move(self, action):
return action - self.n_actions_per_side
def actions_to_position(self):
return [a - self.n_actions_per_side for a in range(self.n_actions)]
def chance_outcomes(self):
"""Returns the possible chance outcomes and their probabilities."""
if self.x is None:
return list(enumerate(self._distribution))
a = np.array(self.actions_to_position())
gaussian_vals = scipy.stats.norm.cdf(
a + 0.5, scale=self.volatility
) - scipy.stats.norm.cdf(a - 0.5, scale=self.volatility)
gaussian_vals[0] += (
scipy.stats.norm.cdf(a[0] - 0.5, scale=self.volatility) - 0.0
)
gaussian_vals[-1] += 1.0 - scipy.stats.norm.cdf(
a[-1] + 0.5, scale=self.volatility
)
return [
(act, p) for act, p in zip(list(range(self.n_actions)), gaussian_vals)
]
def distribution_support(self):
"""return a list of state string."""
return [
self.state_to_str(i, self.tick, player_id=pyspiel.PlayerId.MEAN_FIELD)
for i in range(self.size)
]
def distribution_average(self):
"""return the average of the distribution over the states: 0, ..., Size."""
states = np.arange(self.size)
pos = states * (self._distribution)
return np.sum(pos)
def update_distribution(self, distribution):
"""This function is central and specific to the logic of the MFG.
Args:
distribution: a distribution to register. - function should be called
when the node is in MEAN_FIELD state. - distribution are probabilities
that correspond to each game state given by distribution_support.
"""
if self._player_id != pyspiel.PlayerId.MEAN_FIELD:
raise ValueError(
"update_distribution should only be called at a MEAN_FIELD state."
)
self._distribution = distribution.copy()
self._player_id = pyspiel.PlayerId.DEFAULT_PLAYER_ID
@property
def t(self):
return self.tick * self.dt
def is_terminal(self):
"""Returns True if the game is over."""
return self.t >= self.horizon
def current_player(self):
"""Returns id of the next player to move, or TERMINAL if game is over."""
if self.is_terminal():
return int(pyspiel.PlayerId.TERMINAL)
return int(self._player_id)
def eta_t(self):
"""Computes the theoretical policy's `eta_t` term."""
# pylint: disable=invalid-name
kappa = self.kappa
K = self.mean_revert
q = self.cross_q
c = self.terminal_cost
T = self.horizon
t = self.t
R = (K + q) ** 2 + (kappa - q**2)
deltap = -(K + q) + math.sqrt(R)
deltam = -(K + q) - math.sqrt(R)
numerator = -(kappa - q**2) * (
math.exp((deltap - deltam) * (T - t)) - 1
) - c * (deltap * math.exp((deltap - deltam) * (T - t)) - deltam)
denominator = (
deltam * math.exp((deltap - deltam) * (T - t)) - deltap
) - c * (math.exp((deltap - deltam) * (T - t)) - 1)
return numerator / denominator
def _rewards(self):
"""Reward for the player for this state."""
if self._player_id == pyspiel.PlayerId.DEFAULT_PLAYER_ID:
dist_mean = self.distribution_average() - self.x
move = self.action_to_move(self._last_action)
action_reward = (
self.dt
/ 2
* (
-(move**2)
+ 2 * self.cross_q * move * dist_mean
- self.kappa * dist_mean**2
)
)
if self.is_terminal():
terminal_reward = -self.terminal_cost * dist_mean**2 / 2.0
return action_reward + terminal_reward
return action_reward
return 0.0
def rewards(self) -> List[float]:
"""Rewards for all players."""
# For now, only single-population mean field games are supported.
return [self._rewards()]
def _returns(self):
"""Returns is the sum of all payoffs collected so far."""
return self._rewards()
def returns(self) -> List[float]:
"""Returns for all players."""
# For now, only single-population mean field games are supported.
return [self._returns()]
def __str__(self):
"""A string that uniquely identify the current state."""
return self.state_to_str(
x=self.x, tick=self.tick, player_id=self._player_id
)
class Observer:
"""Observer, conforming to the PyObserver interface (see observation.py)."""
def __init__(self, params, game):
"""Initializes an empty observation tensor."""
del params
self.size = game.size
self.horizon = game.horizon
self.tensor = np.zeros(2, np.float32)
self.dict = {
"x": self.tensor[0],
"t": self.tensor[1],
"observation": self.tensor,
}
def set_from(self, state, player: int):
"""Updates `tensor` and `dict` to reflect `state` from PoV of `player`."""
del player
# We update the observation via the shaped tensor since indexing is more
# convenient than with the 1-D tensor. Both are views onto the same memory.
self.tensor[0] = state.x
self.tensor[1] = state.t
# state.x is None for the initial (blank) state, don't set any
# position bit in that case.
if state.x is not None:
if not 0 <= state.x < self.size:
raise ValueError(
f"Expected {state} x position to be in [0, {self.size})"
)
self.dict["x"] = np.array([state.x])
if not 0 <= state.t <= self.horizon:
raise ValueError(f"Expected {state} time to be in [0, {self.horizon}]")
self.dict["t"] = np.array([state.t])
def string_from(self, state, player):
"""Observation of `state` from the PoV of `player`, as a string."""
del player
return state.to_string()
def plot_mean_field_flow(self, policy):
a = policy
return a
pyspiel.register_game(_GAME_TYPE, MFGLinearQuadraticGame)
| open_spiel-master | open_spiel/python/mfg/games/linear_quadratic.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Tests for Python Crowd avoidance game."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import numpy.testing as npt
from open_spiel.python.mfg.games import crowd_avoidance
import pyspiel
class MFGCrowdAvoidanceGameTest(parameterized.TestCase):
def test_load(self):
game = pyspiel.load_game('python_mfg_crowd_avoidance')
game.new_initial_state_for_population(0)
game.new_initial_state_for_population(1)
@parameterized.parameters(
{
'geometry': crowd_avoidance.Geometry.SQUARE,
'expected_pos': np.array([5, 3]),
},
{
'geometry': crowd_avoidance.Geometry.TORUS,
'expected_pos': np.array([5, 3]),
},
)
def test_dynamics(self, geometry, expected_pos):
game = pyspiel.load_game(
'python_mfg_crowd_avoidance',
{
'geometry': geometry,
},
)
state = game.new_initial_state_for_population(1)
# Initial chance node.
self.assertEqual(state.current_player(), pyspiel.PlayerId.CHANCE)
self.assertLen(state.chance_outcomes(), 3)
self.assertEqual(
state.chance_outcomes()[0][0],
crowd_avoidance.pos_to_merged(np.array([5, 2]), state.size),
)
state.apply_action(state.chance_outcomes()[0][0])
self.assertEqual(state.current_player(), 1)
npt.assert_array_equal(state.pos, [5, 2])
self.assertEqual(state._action_to_string(player=1, action=2), '[0 1]')
state.apply_action(2)
npt.assert_array_equal(state.pos, expected_pos)
def test_create_with_params(self):
setting = 'python_mfg_crowd_avoidance()'
game = pyspiel.load_game(setting)
self.assertEqual(game.size, 7)
self.assertEqual(game.horizon, 10)
@parameterized.parameters(
{'population': 0},
{'population': 1},
)
def test_random_game(self, population):
"""Tests basic API functions."""
congestion_matrix = np.array([[0, 1], [1, 0]])
init_distrib = np.array([
# First population
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.4, 0.4, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
# Second population
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.2, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.4, 0.4, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
])
forbidden_states_grid = [
'#######',
'# # #',
'# #',
'# # #',
'# #',
'# # #',
'#######',
]
forbidden_states = crowd_avoidance.grid_to_forbidden_states(
forbidden_states_grid
)
game = crowd_avoidance.MFGCrowdAvoidanceGame(
params={
'horizon': 10,
'size': 7,
'players': 2,
'congestion_matrix': ' '.join(
str(v) for v in congestion_matrix.flatten()
),
'init_distrib': ' '.join(str(v) for v in init_distrib.flatten()),
'forbidden_states': forbidden_states,
}
)
pyspiel.random_sim_test(
game,
num_sims=10,
serialize=False,
verbose=True,
mean_field_population=population,
)
@parameterized.parameters(
{
'coef_congestion': 1.5,
'coef_target': 0.6,
'congestion_matrix': np.array([[0, 1], [1, 0]]),
'population': 0,
'players': 2,
'initial_pos': np.array([0, 0]),
'distributions': [
# First population
np.array([[0.8, 0.2], [0.0, 0.0]]),
# Second population
np.array([[0.3, 0.7], [0.0, 0.0]]),
],
'expected_rewards': np.array([
-1.5 * 0.3 + 0.0,
-1.5 * 0.8 + 0.0,
]),
'init_distrib': np.array([
# First population
[0.8, 0.2],
[0.0, 0.0],
# Second population
[0.3, 0.7],
[0.0, 0.0],
]),
},
)
def test_rewards(
self,
coef_congestion,
coef_target,
congestion_matrix,
players,
population,
initial_pos,
distributions,
expected_rewards,
init_distrib,
):
game = pyspiel.load_game(
'python_mfg_crowd_avoidance',
{
'size': 2,
'coef_congestion': coef_congestion,
'coef_target': coef_target,
'congestion_matrix': ' '.join(
str(v) for v in congestion_matrix.flatten()
),
'players': players,
'init_distrib': ' '.join(str(v) for v in init_distrib.flatten()),
'forbidden_states': '[]',
},
)
state = game.new_initial_state_for_population(population)
# Initial chance node.
self.assertEqual(state.current_player(), pyspiel.PlayerId.CHANCE)
state.apply_action(crowd_avoidance.pos_to_merged(initial_pos, state.size))
self.assertEqual(state.current_player(), population)
npt.assert_array_equal(state.pos, initial_pos)
state.apply_action(state._NEUTRAL_ACTION)
npt.assert_array_equal(state.pos, initial_pos)
self.assertEqual(state.current_player(), pyspiel.PlayerId.CHANCE)
state.apply_action(state._NEUTRAL_ACTION)
self.assertEqual(state.current_player(), pyspiel.PlayerId.MEAN_FIELD)
# Maps states (in string representation) to their proba.
dist = {}
for x in range(state.size):
for y in range(state.size):
for pop in range(len(congestion_matrix)):
state_str = state.state_to_str(
np.array([x, y]),
state.t,
pop,
player_id=pyspiel.PlayerId.MEAN_FIELD,
)
dist[state_str] = distributions[pop][y][x]
support = state.distribution_support()
state.update_distribution([dist[s] for s in support])
# Decision node where we get a reward.
self.assertEqual(state.current_player(), population)
npt.assert_array_equal(state.rewards(), expected_rewards)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/mfg/games/crowd_avoidance_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a dummy trajectory and compute the distribution of a policy."""
# pylint: disable=unused-import
from typing import Sequence
from absl import app
from absl import flags
import numpy as np
from open_spiel.python import policy
from open_spiel.python.mfg import games
from open_spiel.python.mfg.algorithms import best_response_value
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import fictitious_play
from open_spiel.python.mfg.algorithms import greedy_policy
from open_spiel.python.mfg.algorithms import mirror_descent
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.algorithms import policy_value
from open_spiel.python.mfg.games import linear_quadratic
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string('game', 'mean_field_lin_quad', 'Game to use.')
flags.DEFINE_integer('size', 10, 'Number of states.')
flags.DEFINE_integer('horizon', 5, 'Horizon size.')
flags.DEFINE_float('dt', 1.0, 'Delta t.')
flags.DEFINE_integer('n_actions_per_side', 3,
'Number actions per side (Total num actions = 2*x+1).')
flags.DEFINE_float('volatility', 1.0, 'Action noise.')
flags.DEFINE_float('learning_rate', 0.01, 'OMD learning rate.')
def get_l1_distribution_dist(mu1, mu2):
mu1d = mu1.distribution
mu2d = mu2.distribution
states = set(list(mu1d.keys()) + list(mu2d.keys()))
return sum([abs(mu1d.get(a, 0.0) - mu2d.get(a, 0.0)) for a in states
]) * FLAGS.dt / FLAGS.horizon
class LinearPolicy(policy.Policy):
"""Project values on the policy simplex."""
def __init__(self, game, player_ids): # pylint:disable=useless-super-delegation
"""Initializes the projected policy.
Args:
game: The game to analyze.
player_ids: list of player ids for which this policy applies; each should
be in the range 0..game.num_players()-1.
"""
super(LinearPolicy, self).__init__(game, player_ids)
def action_probabilities(self, state, player_id=None):
mu_bar_t = state.distribution_average()
x_t = state.x
q = state.cross_q
n_actions_per_side = state.n_actions_per_side
lin_action = (q + state.eta_t()) * (mu_bar_t - x_t)
action = n_actions_per_side + min(
n_actions_per_side, max(round(lin_action), -n_actions_per_side))
action_prob = [(a, 0.0) for a in state.legal_actions()]
action_prob[action] = (action, 1.0)
return dict(action_prob)
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
mfg_game = pyspiel.load_game(
FLAGS.game, {
'dt': FLAGS.dt,
'size': FLAGS.size,
'horizon': FLAGS.horizon,
'n_actions_per_side': FLAGS.n_actions_per_side,
'volatility': FLAGS.volatility
})
uniform_policy = policy.UniformRandomPolicy(mfg_game)
nash_conv_fp = nash_conv.NashConv(mfg_game, uniform_policy)
print('Uniform Policy Nashconv:', nash_conv_fp.nash_conv())
# Optimal control in the continuous setting.
theoretical_control = LinearPolicy(mfg_game,
list(range(mfg_game.num_players())))
theoretical_distribution = distribution.DistributionPolicy(
mfg_game, theoretical_control)
discretized_optimal_value = policy_value.PolicyValue(
mfg_game, theoretical_distribution,
theoretical_control).eval_state(mfg_game.new_initial_state())
th_expl = nash_conv.NashConv(mfg_game, theoretical_control).nash_conv()
print('Theoretical policy NashConv : {}'.format(th_expl))
print('Theoretical policy Value : {}'.format(discretized_optimal_value))
fp = fictitious_play.FictitiousPlay(mfg_game)
md = mirror_descent.MirrorDescent(mfg_game)
for j in range(1000):
print('\n\nIteration', j, '\n')
fp.iteration()
fp_policy = fp.get_policy()
nash_conv_fp = nash_conv.NashConv(mfg_game, fp_policy)
print('Nashconv of the current FP policy', nash_conv_fp.nash_conv())
fp_current_distribution = distribution.DistributionPolicy(
mfg_game, fp.get_policy())
fp_l1_dist = get_l1_distribution_dist(fp_current_distribution,
theoretical_distribution)
print(
'L1 distance between FP and theoretical policy : {}'.format(fp_l1_dist))
md.iteration()
md_policy = md.get_policy()
nash_conv_md = nash_conv.NashConv(mfg_game, md_policy)
print('')
print('Nashconv of the current MD policy', nash_conv_md.nash_conv())
md_current_distribution = md._distribution # pylint:disable=protected-access
md_l1_dist = get_l1_distribution_dist(md_current_distribution,
theoretical_distribution)
print('L1 distance between OMD and theoretical policy : {}'.format(
md_l1_dist))
if __name__ == '__main__':
app.run(main)
| open_spiel-master | open_spiel/python/mfg/games/linear_quadratic_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Mean Field Crowd Avoidance game, implemented in Python.
This corresponds to an environment in which two populations try to avoid each
other.
The environment is configurable in the following high-level ways:
- Congestion coefficients matrix.
- Initial distribution.
- Geometry (torus, basic square).
"""
import enum
import functools
import math
from typing import Any, List, Mapping, Optional, Tuple
import numpy as np
from open_spiel.python import observation
import pyspiel
from open_spiel.python.utils import shared_value
class Geometry(enum.IntEnum):
SQUARE = 0
TORUS = 1
_DEFAULT_SIZE = 7
_DEFAULT_HORIZON = 10
_NUM_ACTIONS = 5
_NUM_CHANCE = 5
_DEFAULT_CONGESTION_MATRIX = np.array(
# The first population feels congestion with respect to the second one,
# and vice-versa.
[[0, 1], [1, 0]]
)
_DEFAULT_NUM_PLAYERS = 2
# Each population starts in a corner.
_DEFAULT_INIT_DISTRIB = np.array([
# First population
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.4, 0.4, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
# Second population
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.2, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.4, 0.4, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
])
def grid_to_forbidden_states(grid):
"""Converts a grid into string representation of forbidden states.
Args:
grid: Rows of the grid. '#' character denotes a forbidden state. All rows
should have the same number of columns, i.e. cells.
Returns:
String representation of forbidden states in the form of x (column) and y
(row) pairs, e.g. [1|1;0|2].
"""
forbidden_states = []
num_cols = len(grid[0])
for y, row in enumerate(grid):
assert len(row) == num_cols, f"Number of columns should be {num_cols}."
for x, cell in enumerate(row):
if cell == "#":
forbidden_states.append(f"{x}|{y}")
return "[" + ";".join(forbidden_states) + "]"
def pairs_string_to_list(positions: str) -> List[np.ndarray]:
"""Converts a string representing positions into a list of positions."""
pos = positions[1:-1] # remove [ and ]
split = pos.split(";")
return [np.array([i for i in s.split("|")]) for s in split]
forbidden_states_grid = [
"#######",
"# # #",
"# #",
"# # #",
"# #",
"# # #",
"#######",
]
_DEFAULT_FORBIDDEN_STATES = grid_to_forbidden_states(forbidden_states_grid)
forbidden_states_indicator = np.array(
[
[math.nan if c == "#" else 0 for c in [*row]]
for row in forbidden_states_grid
]
)
_DEFAULT_PROBA_NOISE = 0.5
_DEFAULT_GEOMETRY = Geometry.SQUARE
_DEFAULT_COEF_CONGESTION = 0.0
_DEFAULT_COEF_TARGET = 1.0
_DEFAULT_PARAMS = {
"size": _DEFAULT_SIZE,
"horizon": _DEFAULT_HORIZON,
"players": _DEFAULT_NUM_PLAYERS,
# The congestion matrix is represented as a string containing a
# space-separated list of values.
# Its size defines the number of populations in the mean field game.
"congestion_matrix": " ".join(
str(v) for v in _DEFAULT_CONGESTION_MATRIX.flatten()
),
"geometry": _DEFAULT_GEOMETRY,
"init_distrib": " ".join(str(v) for v in _DEFAULT_INIT_DISTRIB.flatten()),
# Probability that the transition is affected by noise
"proba_noise": _DEFAULT_PROBA_NOISE,
# Weight of congestion term in the reward
"coef_congestion": _DEFAULT_COEF_CONGESTION,
"forbidden_states": _DEFAULT_FORBIDDEN_STATES,
"coef_target": _DEFAULT_COEF_TARGET,
}
_GAME_TYPE = pyspiel.GameType(
short_name="python_mfg_crowd_avoidance",
long_name="Python Mean Field Crowd Avoidance",
dynamics=pyspiel.GameType.Dynamics.MEAN_FIELD,
chance_mode=pyspiel.GameType.ChanceMode.EXPLICIT_STOCHASTIC,
information=pyspiel.GameType.Information.PERFECT_INFORMATION,
utility=pyspiel.GameType.Utility.GENERAL_SUM,
reward_model=pyspiel.GameType.RewardModel.REWARDS,
# We cannot pass math.inf here, so we pass a very high integer value.
max_num_players=2,
min_num_players=2,
provides_information_state_string=True,
provides_information_state_tensor=False,
provides_observation_string=True,
provides_observation_tensor=True,
parameter_specification=_DEFAULT_PARAMS,
)
def get_param(param_name, params):
return params.get(param_name, _DEFAULT_PARAMS[param_name])
@functools.lru_cache(maxsize=None)
def _state_to_str(x, y, t, population, player_id):
"""A string that uniquely identify (pos, t, population, player_id)."""
if int(player_id) >= 0:
return f"(pop={population}, t={t}, pos=[{x} {y}])"
if player_id == pyspiel.PlayerId.MEAN_FIELD:
return f"(pop={population}, t={t}_a, pos=[{x} {y}])"
if player_id == pyspiel.PlayerId.CHANCE:
return f"(pop={population}, t={t}_a_mu, pos=[{x} {y}])"
class MFGCrowdAvoidanceGame(pyspiel.Game):
"""Multi-population MFG."""
# pylint:disable=dangerous-default-value
def __init__(self, params: Mapping[str, Any] = _DEFAULT_PARAMS):
self.size = get_param("size", params)
self.horizon = get_param("horizon", params)
flat_congestion_matrix = np.fromstring(
get_param("congestion_matrix", params), dtype=np.float64, sep=" "
)
num_players = get_param("players", params)
if len(flat_congestion_matrix) != num_players**2:
raise ValueError(
"Congestion matrix passed in flat representation does not represent "
f"a square matrix: {flat_congestion_matrix}"
)
self.congestion_matrix = flat_congestion_matrix.reshape(
[num_players, num_players]
)
self.geometry = get_param("geometry", params)
num_states = self.size**2
game_info = pyspiel.GameInfo(
num_distinct_actions=_NUM_ACTIONS,
max_chance_outcomes=max(num_states, _NUM_CHANCE),
num_players=num_players,
min_utility=-np.inf,
max_utility=+np.inf,
utility_sum=None,
max_game_length=self.horizon,
)
self.proba_noise = get_param("proba_noise", params)
self.coef_congestion = get_param("coef_congestion", params)
self.forbidden_states = pairs_string_to_list(
get_param("forbidden_states", params)
)
self.coef_target = get_param("coef_target", params)
# TODO(lauriere): should be given as a parameter of the model.
self.target_positions = np.array([[5, 3], [1, 3]])
# Represents the current probability distribution over game states
# (when grouped for each population).
str_init_distrib = get_param("init_distrib", params)
if str_init_distrib:
flat_init_distrib = np.fromstring(
str_init_distrib, dtype=np.float64, sep=" "
)
if len(flat_init_distrib) != num_players * self.size**2:
raise ValueError(
"Initial distribution matrix passed in flat representation does"
f" not represent a sequence of square matrices: {flat_init_distrib}"
)
self.initial_distribution = flat_init_distrib
else:
# Initialized with a uniform distribution.
self.initial_distribution = [1.0 / num_states] * (
num_states * num_players
)
super().__init__(_GAME_TYPE, game_info, params)
def new_initial_state(self):
"""Returns a new population-less blank state.
This state is provided for some internal operations that use blank
states (e.g. cloning), but cannot be used to play the game, i.e.
ApplyAction() will fail. Proper playable states should be
instantiated with new_initial_state_for_population().
"""
return MFGCrowdAvoidanceState(self)
def max_chance_nodes_in_history(self):
"""Maximun chance nodes in game history."""
return self.horizon + 1
def new_initial_state_for_population(self, population):
"""State corresponding to the start of a game for a given population."""
return MFGCrowdAvoidanceState(self, population)
def make_py_observer(self, iig_obs_type=None, params=None):
"""Returns an object used for observing game state."""
if (iig_obs_type is None) or (
iig_obs_type.public_info and not iig_obs_type.perfect_recall
):
return Observer(params, self)
return observation.IIGObserverForPublicInfoGame(iig_obs_type, params)
def pos_to_merged(pos: np.ndarray, size: int) -> int:
"""Converts a [x, y] position into a single integer."""
assert (pos >= 0).all(), pos
assert (pos < size).all(), pos
return pos[0] + pos[1] * size
def merged_to_pos(merged_pos: int, size: int) -> np.ndarray:
"""Inverse of pos_to_merged()."""
assert 0 <= merged_pos < size * size
return np.array([merged_pos % size, merged_pos // size])
class MFGCrowdAvoidanceState(pyspiel.State):
"""State for the avoidance MFG."""
# Maps legal actions to the corresponding move on the grid of the game.
_ACTION_TO_MOVE = {
0: np.array([0, 0]),
1: np.array([1, 0]),
2: np.array([0, 1]),
3: np.array([0, -1]),
4: np.array([-1, 0]),
}
# Action that corresponds to no displacement.
_NEUTRAL_ACTION = 0
def __init__(self, game, population=None):
"""Constructor; should only be called by Game.new_initial_state.*.
Args:
game: MFGCrowdAvoidanceGame for which a state should be created.
population: ID of the population to create this state for. Must be in [0,
num_players()) or None. States with population=None cannot be used to
perform game actions.
"""
super().__init__(game)
# Initial state where the initial position is chosen according to
# an initial distribution.
self._is_position_init = True
self._player_id = pyspiel.PlayerId.CHANCE
# Population this state corresponds to. Can be None, in which
# case, ApplyAction() is forbidden.
self._population = population
if self._population is not None:
assert 0 <= self._population < self.num_players()
# When set, <int>[2] numpy array representing the x, y position on the grid.
self._pos = None # type: Optional[np.ndarray]
self._t = 0
self.size = game.size
# Number of states in the grid.
self.num_states = self.size**2
self.horizon = game.horizon
self.congestion_matrix = game.congestion_matrix
self.geometry = game.geometry
self._returns = np.zeros([self.num_players()], dtype=np.float64)
self._distribution = shared_value.SharedValue(game.initial_distribution)
self.proba_noise = game.proba_noise
self.coef_congestion = game.coef_congestion
self.forbidden_states = game.forbidden_states
self.coef_target = game.coef_target
self.target_positions = game.target_positions
@property
def population(self):
return self._population
@property
def pos(self):
return self._pos
@property
def t(self):
return self._t
def state_to_str(self, pos, t, population, player_id=0):
"""A string that uniquely identify (pos, t, population, player_id)."""
if self._is_position_init:
return f"position_init_{population}"
assert isinstance(pos, np.ndarray), f"Got type {type(pos)}"
assert len(pos.shape) == 1, f"Got {len(pos.shape)}, expected 1 (pos={pos})."
assert pos.shape[0] == 2, f"Got {pos.shape[0]}, expected 2 (pos={pos})."
return _state_to_str(pos[0], pos[1], t, population, player_id)
# OpenSpiel (PySpiel) API functions are below. This is the standard set that
# should be implemented by every perfect-information sequential-move game.
def mean_field_population(self):
return self._population
def _legal_actions(self, player):
"""Returns a list of legal actions for player and MFG nodes."""
if player == pyspiel.PlayerId.MEAN_FIELD:
return []
if player >= 0 and player == self.current_player():
return list(self._ACTION_TO_MOVE)
raise ValueError(
f"Unexpected player {player}."
"Expected a mean field or current player >=0."
)
def chance_outcomes(self) -> List[Tuple[int, float]]:
"""Returns the possible chance outcomes and their probabilities."""
if self._is_position_init:
if (
self._population is None
or not 0 <= self._population < self.num_players()
):
raise ValueError(f"Invalid population {self._population}")
p = self._population % 2
dist = self._distribution.value
dist_p = dist[p * self.num_states : (p + 1) * self.num_states]
pos_indices_flat = np.nonzero(dist_p)[0]
pos_indices = [
np.array([i % self.size, (i - i % self.size) // self.size])
for i in pos_indices_flat
]
# Beware: In the initial distribution representation, x and y correspond
# respectively to the row and the column, but in the state representation,
# they correspond to the column and the row.
return [
(pos_to_merged(i, self.size), dist_p[i[1] * self.size + i[0]])
for i in pos_indices
]
return [
(0, 1.0 - self.proba_noise),
(1, self.proba_noise / 4.0),
(2, self.proba_noise / 4.0),
(3, self.proba_noise / 4.0),
(4, self.proba_noise / 4.0),
]
def update_pos(self, action):
"""Updates the position of the player given a move action."""
if action < 0 or action >= len(self._ACTION_TO_MOVE):
raise ValueError(
f"The action must be between 0 and {len(self._ACTION_TO_MOVE)}, "
f"got {action}"
)
candidate_pos = self._pos + self._ACTION_TO_MOVE[action]
# if candidate_pos in self.forbidden_states:
# if np.any(np.all(candidate_pos == self.forbidden_states, axis=1)):
if any(np.array_equal(candidate_pos, x) for x in self.forbidden_states):
candidate_pos = self._pos
elif self.geometry == Geometry.TORUS:
candidate_pos += self.size
candidate_pos %= self.size
else:
assert (
self.geometry == Geometry.SQUARE
), f"Invalid geometry {self.geometry}"
# Keep the position within the bounds of the square.
candidate_pos = np.minimum(candidate_pos, self.size - 1)
candidate_pos = np.maximum(candidate_pos, 0)
self._pos = candidate_pos
def _apply_action(self, action):
"""Applies the specified action to the state."""
if self._population is None:
raise ValueError(
"Attempting to perform an action with a population-less state."
)
if self._player_id == pyspiel.PlayerId.MEAN_FIELD:
raise ValueError(
"_apply_action should not be called at a MEAN_FIELD state."
)
self._returns += np.array(self.rewards())
if self._is_position_init:
self._pos = merged_to_pos(action, self.size)
self._is_position_init = False
self._player_id = self._population
elif self._player_id == pyspiel.PlayerId.CHANCE:
self.update_pos(action)
self._t += 1
self._player_id = pyspiel.PlayerId.MEAN_FIELD
elif int(self._player_id) >= 0:
assert self._player_id == self._population, (
f"Invalid decision player id {self._player_id} "
f"expected {self._population}"
)
self.update_pos(action)
self._player_id = pyspiel.PlayerId.CHANCE
else:
raise ValueError(f"Unexpected state. Player id: {self._player_id}")
def _action_to_string(self, player, action):
"""Action -> string."""
del player
if self.is_chance_node() and self._is_position_init:
return f"init_position={action}"
return str(self._ACTION_TO_MOVE[action])
def distribution_support(self):
"""Returns a list of state string."""
support = []
for x in range(self.size):
for y in range(self.size):
for population in range(self.num_players()):
support.append(
self.state_to_str(
np.array([x, y]),
self._t,
population,
player_id=pyspiel.PlayerId.MEAN_FIELD,
)
)
return support
def get_pos_proba(self, pos: np.ndarray, population: int) -> float:
"""Gets the probability of a pos and population in the current distrib.
Args:
pos: 2D position.
population: Population requested.
Returns:
The probability for the provided position and population.
"""
assert (pos >= 0).all(), pos
assert (pos < self.size).all(), pos
assert 0 <= population < self.num_players(), population
# This logic needs to match the ordering defined in distribution_support().
index = population + self.num_players() * (pos[1] + self.size * pos[0])
assert 0 <= index < len(self._distribution.value), (
f"Invalid index {index} vs dist length:"
f" {len(self._distribution.value)}, population={population}, pos={pos},"
f" state={self}"
)
return self._distribution.value[index]
def update_distribution(self, distribution):
"""This function is central and specific to the logic of the MFG.
It should only be called when the node is in MEAN_FIELD state.
Args:
distribution: List of floats that should contain the probability of each
state returned by distribution_support().
"""
expected_dist_size = self.num_states * self.num_players()
assert len(distribution) == expected_dist_size, (
"Unexpected distribution length "
f"{len(distribution)} != {expected_dist_size}"
)
if self._player_id != pyspiel.PlayerId.MEAN_FIELD:
raise ValueError(
"update_distribution should only be called at a MEAN_FIELD state."
)
self._distribution = shared_value.SharedValue(distribution)
self._player_id = self._population
def is_terminal(self):
"""Returns True if the game is over."""
return self.t >= self.horizon
def current_player(self):
"""Returns id of the next player to move, or TERMINAL if game is over."""
if self.is_terminal():
return pyspiel.PlayerId.TERMINAL
return self._player_id
def rewards(self) -> List[float]:
"""Crowd avoidance rewards for all populations.
Returns:
One float per population.
"""
if int(self._player_id) < 0:
return [0.0] * self.num_players()
densities = np.array(
[
self.get_pos_proba(self._pos, population)
for population in range(self.num_players())
],
dtype=np.float64,
)
rew = -self.coef_congestion * np.dot(self.congestion_matrix, densities)
# Rewards for target positions.
rew[0] += self.coef_target * np.array_equal(
self._pos, self.target_positions[0]
)
rew[1] += self.coef_target * np.array_equal(
self._pos, self.target_positions[1]
)
return list(rew)
def returns(self) -> List[float]:
"""Returns is the sum of all payoffs collected so far."""
return list(self._returns + np.array(self.rewards()))
def __str__(self):
"""A string that uniquely identify the current state."""
return self.state_to_str(
self._pos, self._t, self._population, player_id=self._player_id
)
class Observer:
"""Observer, conforming to the PyObserver interface (see observation.py)."""
def __init__(self, params, game):
"""Initializes an empty observation tensor."""
del params
self.size = game.size
self.horizon = game.horizon
# +1 to allow t == horizon.
self.tensor = np.zeros(2 * self.size + self.horizon + 1, np.float32)
self.dict = {
"x": self.tensor[: self.size],
"y": self.tensor[self.size : self.size * 2],
"t": self.tensor[self.size * 2 :],
}
def set_from(self, state: MFGCrowdAvoidanceState, player: int):
"""Updates `tensor` and `dict` to reflect `state` from PoV of `player`."""
del player
# We update the observation via the shaped tensor since indexing is more
# convenient than with the 1-D tensor. Both are views onto the same memory.
self.tensor.fill(0)
# state.pos is None for the initial (blank) state, don't set any
# position bit in that case.
if state.pos is not None:
if not (state.pos >= 0).all() or not (state.pos < self.size).all():
raise ValueError(
f"Expected {state} positions to be in [0, {self.size})"
)
self.dict["x"][state.pos[0]] = 1
self.dict["y"][state.pos[1]] = 1
if not 0 <= state.t <= self.horizon:
raise ValueError(f"Expected {state} time to be in [0, {self.horizon}]")
self.dict["t"][state.t] = 1
def string_from(self, state, player):
"""Observation of `state` from the PoV of `player`, as a string."""
del player
return str(state)
pyspiel.register_game(_GAME_TYPE, MFGCrowdAvoidanceGame)
| open_spiel-master | open_spiel/python/mfg/games/crowd_avoidance.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Tests for Python Crowd Modelling game."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.mfg.games import crowd_modelling
import pyspiel
MFG_STR_CONST = "_a"
class MFGCrowdModellingGameTest(absltest.TestCase):
def test_load(self):
game = pyspiel.load_game("python_mfg_crowd_modelling")
game.new_initial_state()
def test_create(self):
"""Checks we can create the game and clone states."""
game = crowd_modelling.MFGCrowdModellingGame()
self.assertEqual(game.size, crowd_modelling._SIZE)
self.assertEqual(game.horizon, crowd_modelling._HORIZON)
self.assertEqual(game.get_type().dynamics,
pyspiel.GameType.Dynamics.MEAN_FIELD)
print("Num distinct actions:", game.num_distinct_actions())
state = game.new_initial_state()
clone = state.clone()
print("Initial state:", state)
print("Cloned initial state:", clone)
def test_create_with_params(self):
game = pyspiel.load_game("python_mfg_crowd_modelling(horizon=100,size=20)")
self.assertEqual(game.size, 20)
self.assertEqual(game.horizon, 100)
def test_random_game(self):
"""Tests basic API functions."""
horizon = 20
size = 50
game = crowd_modelling.MFGCrowdModellingGame(params={
"horizon": horizon,
"size": size
})
pyspiel.random_sim_test(
game, num_sims=10, serialize=False, verbose=True)
def test_reward(self):
game = crowd_modelling.MFGCrowdModellingGame()
state = game.new_initial_state()
self.assertEqual(state.current_player(), pyspiel.PlayerId.CHANCE)
state.apply_action(game.size // 2)
self.assertEqual(state.current_player(), 0)
# This expected reward assumes that the game is initialized with
# uniform state distribution.
self.assertAlmostEqual(state.rewards()[0], 1. + np.log(game.size))
self.assertAlmostEqual(state.returns()[0], 1. + np.log(game.size))
state.apply_action(1)
self.assertEqual(state.current_player(), pyspiel.PlayerId.CHANCE)
self.assertAlmostEqual(state.returns()[0], 1. + np.log(game.size))
def test_distribution(self):
"""Checks that distribution-related functions work."""
game = crowd_modelling.MFGCrowdModellingGame()
state = game.new_initial_state()
self.assertEqual(state.current_player(), pyspiel.PlayerId.CHANCE)
state.apply_action(game.size // 2)
self.assertEqual(state.current_player(), 0)
# This expected reward assumes that the game is initialized with
# uniform state distribution.
self.assertAlmostEqual(state.rewards()[0], 1. + np.log(game.size))
state.apply_action(crowd_modelling.MFGCrowdModellingState._NEUTRAL_ACTION)
# Chance node.
self.assertEqual(state.current_player(), pyspiel.PlayerId.CHANCE)
state.apply_action(crowd_modelling.MFGCrowdModellingState._NEUTRAL_ACTION)
self.assertEqual(state.distribution_support(), [
"(0, 1)_a", "(1, 1)_a", "(2, 1)_a", "(3, 1)_a", "(4, 1)_a", "(5, 1)_a",
"(6, 1)_a", "(7, 1)_a", "(8, 1)_a", "(9, 1)_a"
])
new_distrib = [0.01] * 9 + [1. - 0.01 * 9]
state.update_distribution(new_distrib)
self.assertAlmostEqual(state._distribution, new_distrib)
# Check that the distribution is taken into account for the reward
# computation.
self.assertAlmostEqual(state.rewards()[0], 1. - np.log(0.01))
def test_compare_py_cpp(self):
"""Compares py and cpp implementations of this game."""
py_game = pyspiel.load_game("python_mfg_crowd_modelling")
cpp_game = pyspiel.load_game("mfg_crowd_modelling")
np.random.seed(7)
py_state = py_game.new_initial_state()
cpp_state = cpp_game.new_initial_state()
t = 0
while not cpp_state.is_terminal():
self.assertFalse(py_state.is_terminal())
self.assertEqual(str(cpp_state), str(py_state))
self.assertAlmostEqual(cpp_state.returns()[0], py_state.returns()[0])
if cpp_state.current_player() == pyspiel.PlayerId.CHANCE:
actions, probs = zip(*cpp_state.chance_outcomes())
action = np.random.choice(actions, p=probs)
self.assertEqual(
cpp_state.action_to_string(action),
py_state.action_to_string(action))
cpp_state.apply_action(action)
py_state.apply_action(action)
elif cpp_state.current_player() == pyspiel.PlayerId.MEAN_FIELD:
num_cpp_states = len(cpp_state.distribution_support())
distribution = [1 / num_cpp_states] * num_cpp_states
cpp_state.update_distribution(distribution)
py_state.update_distribution(distribution)
else:
self.assertEqual(cpp_state.current_player(), 0)
legal_actions = cpp_state.legal_actions()
action = np.random.choice(legal_actions)
self.assertEqual(
cpp_state.action_to_string(action),
py_state.action_to_string(action))
cpp_state.apply_action(action)
py_state.apply_action(action)
t += 1
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/mfg/games/crowd_modelling_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is replaces google's gfile used for network storage.
A more complete public version of gfile:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/platform/gfile.py
"""
import os
# pylint: disable=invalid-name
Exists = os.path.exists
IsDirectory = os.path.isdir
ListDir = os.listdir
MakeDirs = os.makedirs
Open = open
| open_spiel-master | open_spiel/python/utils/gfile.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Least Recently Used cache."""
import collections
class CacheInfo(collections.namedtuple("CacheInfo", [
"hits", "misses", "size", "max_size"])):
"""Info for LRUCache."""
@property
def usage(self):
return self.size / self.max_size if self.max_size else 0
@property
def total(self):
return self.hits + self.misses
@property
def hit_rate(self):
return self.hits / self.total if self.total else 0
class LRUCache(object):
"""A Least Recently Used cache.
This is more general than functools.lru_cache since that one requires the
key to also be the input to the function to generate the value, which
isn't possible when the input is not hashable, eg a numpy.ndarray.
"""
def __init__(self, max_size):
self._max_size = max_size
self._data = collections.OrderedDict()
self._hits = 0
self._misses = 0
def clear(self):
self._data.clear()
self._hits = 0
self._misses = 0
def make(self, key, fn):
"""Return the value, either from cache, or make it and save it."""
try:
val = self._data.pop(key) # Take it out.
self._hits += 1
except KeyError:
self._misses += 1
val = fn()
if len(self._data) >= self._max_size:
self._data.popitem(False)
self._data[key] = val # Insert/reinsert it at the back.
return val
def get(self, key):
"""Get the value and move it to the back, or return None on a miss."""
try:
val = self._data.pop(key) # Take it out.
self._data[key] = val # Reinsert it at the back.
self._hits += 1
return val
except KeyError:
self._misses += 1
return None
def set(self, key, val):
"""Set the value."""
self._data.pop(key, None) # Take it out if it existed.
self._data[key] = val # Insert/reinsert it at the back.
if len(self._data) > self._max_size:
self._data.popitem(False)
return val
def info(self):
return CacheInfo(self._hits, self._misses, len(self._data), self._max_size)
def __len__(self):
return len(self._data)
| open_spiel-master | open_spiel/python/utils/lru_cache.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reservoir buffer implemented in Numpy.
See https://en.wikipedia.org/wiki/Reservoir_sampling for more details.
"""
import random
import numpy as np
# TODO(author18): refactor the reservoir with the NFSP Pytorch implementation
class ReservoirBuffer(object):
"""Allows uniform sampling over a stream of data.
This class supports the storage of arbitrary elements, such as observation
tensors, integer actions, etc.
See https://en.wikipedia.org/wiki/Reservoir_sampling for more details.
"""
def __init__(self, reservoir_buffer_capacity):
self._reservoir_buffer_capacity = reservoir_buffer_capacity
self._data = []
self._add_calls = 0
def add(self, element):
"""Potentially adds `element` to the reservoir buffer.
Args:
element: data to be added to the reservoir buffer.
"""
if len(self._data) < self._reservoir_buffer_capacity:
self._data.append(element)
else:
idx = np.random.randint(0, self._add_calls + 1)
if idx < self._reservoir_buffer_capacity:
self._data[idx] = element
self._add_calls += 1
def sample(self, num_samples):
"""Returns `num_samples` uniformly sampled from the buffer.
Args:
num_samples: `int`, number of samples to draw.
Returns:
An iterable over `num_samples` random elements of the buffer.
Raises:
ValueError: If there are less than `num_samples` elements in the buffer
"""
if len(self._data) < num_samples:
raise ValueError("{} elements could not be sampled from size {}".format(
num_samples, len(self._data)))
return random.sample(self._data, num_samples)
def clear(self):
self._data = []
self._add_calls = 0
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
| open_spiel-master | open_spiel/python/utils/reservoir_buffer.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics and logging helpers."""
from typing import Optional
# pylint: disable=g-import-not-at-top disable=unused-import
try:
from clu import metric_writers
from clu.metric_writers import ensure_flushes
from clu.metric_writers import write_values
from clu.values import * # pylint: disable=wildcard-import
except ImportError as e:
raise ImportError(
str(e) +
"\nCLU not found. Please install CLU: python3 -m pip install clu") from e
# pylint: enable=g-import-not-at-top enable=unused-import
def create_default_writer(logdir: Optional[str] = None,
just_logging: bool = False,
**kwargs) -> metric_writers.MetricWriter:
"""Create the default metrics writer.
See metric_writers.LoggingWriter interface for the API to write the metrics
and other metadata, e.g. hyper-parameters. Sample usage is as follows:
writer = metrics.create_default_writer('/some/path')
writer.write_hparams({"learning_rate": 0.001, "batch_size": 64})
...
# e.g. in training loop.
writer.write_scalars(step, {"loss": loss})
...
writer.flush()
Args:
logdir: Path of the directory to store the metric logs as TF summary files.
If None, files will not be created.
just_logging: If true, metrics will be outputted only to INFO log.
**kwargs: kwargs passed to the CLU default writer.
Returns:
a metric_writers.MetricWriter.
"""
return metric_writers.create_default_writer(
logdir=logdir, just_logging=just_logging, **kwargs)
| open_spiel-master | open_spiel/python/utils/metrics.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.utils.replay_buffer."""
from absl.testing import absltest
from open_spiel.python.utils.replay_buffer import ReplayBuffer
class ReplayBufferTest(absltest.TestCase):
def test_replay_buffer_add(self):
# pylint: disable=g-generic-assert
replay_buffer = ReplayBuffer(replay_buffer_capacity=10)
self.assertEqual(len(replay_buffer), 0)
replay_buffer.add("entry1")
self.assertEqual(len(replay_buffer), 1)
replay_buffer.add("entry2")
self.assertEqual(len(replay_buffer), 2)
self.assertIn("entry1", replay_buffer)
self.assertIn("entry2", replay_buffer)
def test_replay_buffer_max_capacity(self):
# pylint: disable=g-generic-assert
replay_buffer = ReplayBuffer(replay_buffer_capacity=2)
replay_buffer.add("entry1")
replay_buffer.add("entry2")
replay_buffer.add("entry3")
self.assertEqual(len(replay_buffer), 2)
self.assertIn("entry2", replay_buffer)
self.assertIn("entry3", replay_buffer)
def test_replay_buffer_sample(self):
replay_buffer = ReplayBuffer(replay_buffer_capacity=3)
replay_buffer.add("entry1")
replay_buffer.add("entry2")
replay_buffer.add("entry3")
samples = replay_buffer.sample(3)
self.assertIn("entry1", samples)
self.assertIn("entry2", samples)
self.assertIn("entry3", samples)
def test_replay_buffer_reset(self):
replay_buffer = ReplayBuffer(replay_buffer_capacity=3)
replay_buffer.add("entry1")
replay_buffer.add("entry2")
replay_buffer.reset()
self.assertEmpty(replay_buffer)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/utils/replay_buffer_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/utils/__init__.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metrics."""
import glob
import os
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python.utils import metrics
class MetricsTest(parameterized.TestCase):
@parameterized.parameters((True,), (False,))
def test_create(self, just_logging: bool):
logdir = self.create_tempdir()
# Create the writer.
writer = metrics.create_default_writer(
logdir.full_path, just_logging=just_logging)
self.assertIsInstance(writer, metrics.metric_writers.MultiWriter)
# Write some metrics.
writer.write_hparams({"param1": 1.0, "param2": 2.0})
for step in range(5):
writer.write_scalars(step, {"value": step * step})
metrics.write_values(writer, 5, {
"scalar": 1.23,
"text": metrics.Text(value="foo")
})
# Flush the writer.
writer.flush()
# Check that the summary file exists if not just logging.
self.assertLen(
glob.glob(os.path.join(logdir.full_path, "events.out.tfevents.*")),
0 if just_logging else 1)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/utils/metrics_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Log data to a jsonl file."""
import datetime
import json
import os
import time
from typing import Any, Dict, Text
from open_spiel.python.utils import gfile
class DataLoggerJsonLines:
"""Log data to a jsonl file."""
def __init__(self, path: str, name: str, flush=True):
self._fd = gfile.Open(os.path.join(path, name + ".jsonl"), "w")
self._flush = flush
self._start_time = time.time()
def __del__(self):
self.close()
def close(self):
if hasattr(self, "_fd") and self._fd is not None:
self._fd.flush()
self._fd.close()
self._fd = None
def flush(self):
self._fd.flush()
def write(self, data: Dict[Text, Any]):
now = time.time()
data["time_abs"] = now
data["time_rel"] = now - self._start_time
dt_now = datetime.datetime.utcfromtimestamp(now)
data["time_str"] = dt_now.strftime("%Y-%m-%d %H:%M:%S.%f +0000")
self._fd.write(json.dumps(data))
self._fd.write("\n")
if self._flush:
self.flush()
| open_spiel-master | open_spiel/python/utils/data_logger.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A shared value without deep copy."""
class SharedValue(object):
"""A shared value without deep copy."""
def __init__(self, value):
self.value = value
def __deepcopy__(self, memo):
return SharedValue(self.value)
| open_spiel-master | open_spiel/python/utils/shared_value.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.utils.lru_cache."""
from absl.testing import absltest
from open_spiel.python.utils import lru_cache
class LruCacheTest(absltest.TestCase):
def test_lru_cache(self):
cache = lru_cache.LRUCache(4)
self.assertEmpty(cache)
info = cache.info()
self.assertEqual(info.hits, 0)
self.assertEqual(info.misses, 0)
self.assertEqual(info.size, 0)
self.assertEqual(info.max_size, 4)
self.assertEqual(info.usage, 0)
self.assertEqual(info.hit_rate, 0)
self.assertIsNone(cache.get(1))
cache.set(13, "13")
self.assertLen(cache, 1)
self.assertIsNone(cache.get(1))
self.assertEqual(cache.get(13), "13")
cache.set(14, "14")
cache.set(15, "15")
cache.set(16, "16")
self.assertLen(cache, 4)
cache.set(17, "17")
self.assertLen(cache, 4)
self.assertIsNone(cache.get(13)) # evicted
self.assertTrue(cache.get(14))
self.assertLen(cache, 4)
cache.set(18, "18")
self.assertIsNone(cache.get(15)) # evicted
self.assertTrue(cache.get(14)) # older but more recently used
info = cache.info()
self.assertEqual(info.usage, 1)
cache.clear()
self.assertIsNone(cache.get(18)) # evicted
self.assertEqual(cache.make(19, lambda: "19"), "19")
self.assertEqual(cache.get(19), "19")
self.assertEqual(cache.make(19, lambda: "20"), "19")
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/utils/lru_cache_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some basic stats classes."""
import math
from typing import List
class BasicStats(object):
"""A set of statistics about a single value series."""
__slots__ = ("_num", "_min", "_max", "_sum", "_sum_sq")
def __init__(self):
self.reset()
def reset(self):
self._num = 0
self._min = float("inf")
self._max = float("-inf")
self._sum = 0
self._sum_sq = 0
def add(self, val: float):
self._num += 1
if self._min > val:
self._min = val
if self._max < val:
self._max = val
self._sum += val
self._sum_sq += val**2
@property
def num(self):
return self._num
@property
def min(self):
return 0 if self._num == 0 else self._min
@property
def max(self):
return 0 if self._num == 0 else self._max
@property
def avg(self):
return 0 if self._num == 0 else self._sum / self._num
@property
def std_dev(self):
"""Standard deviation."""
if self._num == 0:
return 0
return math.sqrt(
max(0, self._sum_sq / self._num - (self._sum / self._num)**2))
def merge(self, other: "BasicStats"):
# pylint: disable=protected-access
self._num += other._num
self._min = min(self._min, other._min)
self._max = max(self._max, other._max)
self._sum += other._sum
self._sum_sq += other._sum_sq
# pylint: enable=protected-access
@property
def as_dict(self):
return {
"num": self.num,
"min": self.min,
"max": self.max,
"avg": self.avg,
"std_dev": self.std_dev,
}
def __str__(self):
if self.num == 0:
return "num=0"
return "sum: %.4f, avg: %.4f, dev: %.4f, min: %.4f, max: %.4f, num: %d" % (
self.sum, self.avg, self.dev, self.min, self.max, self.num)
class HistogramNumbered:
"""Track a histogram of occurences for `count` buckets.
You need to decide how to map your data into the buckets. Mainly useful for
scalar values.
"""
def __init__(self, num_buckets: int):
self._counts = [0] * num_buckets
def reset(self):
self._counts = [0] * len(self._counts)
def add(self, bucket_id: int):
self._counts[bucket_id] += 1
@property
def data(self):
return self._counts
class HistogramNamed:
"""Track a histogram of occurences for named buckets.
Same as HistogramNumbered, but each bucket has a name associated with it.
Mainly useful for categorical values.
"""
def __init__(self, bucket_names: List[str]):
self._names = bucket_names
self.reset()
def reset(self):
self._counts = [0] * len(self._names)
def add(self, bucket_id: int):
self._counts[bucket_id] += 1
@property
def data(self):
return {
"counts": self._counts,
"names": self._names,
}
| open_spiel-master | open_spiel/python/utils/stats.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper around multiprocessing to be compatible at google."""
import contextlib
import multiprocessing
import queue
Empty = queue.Empty
# Without this line, this fails on latest MacOS with Python 3.8. See
# https://github.com/pytest-dev/pytest-flask/issues/104#issuecomment-577908228
# and for more details see
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
multiprocessing.set_start_method("fork")
# For compatibility so that it works inside Google.
@contextlib.contextmanager
def main_handler():
yield
class Process(object):
"""A wrapper around `multiprocessing` that allows it to be used at google.
It spawns a subprocess from the given target function. That function should
take an additional argument `queue` which will get a bidirectional
_ProcessQueue for communicating with the parent.
"""
def __init__(self, target, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
elif "queue" in kwargs:
raise ValueError("`queue` is reserved for use by `Process`.")
q1 = multiprocessing.Queue()
q2 = multiprocessing.Queue()
self._queue = _ProcessQueue(q1, q2)
kwargs["queue"] = _ProcessQueue(q2, q1)
self._process = multiprocessing.Process(
target=target, args=args, kwargs=kwargs)
self._process.start()
def join(self, *args):
return self._process.join(*args)
@property
def exitcode(self):
return self._process.exitcode
@property
def queue(self):
return self._queue
class _ProcessQueue(object):
"""A bidirectional queue for talking to a subprocess.
`empty`, `get` and `get_nowait` act on the incoming queue, while
`full`, `put` and `put_nowait` act on the outgoing queue.
This class should only be created by the Process object.
"""
def __init__(self, q_in, q_out):
self._q_in = q_in
self._q_out = q_out
def empty(self):
return self._q_in.empty()
def full(self):
return self._q_out.full()
def get(self, block=True, timeout=None):
return self._q_in.get(block=block, timeout=timeout)
def get_nowait(self):
return self.get(False)
def put(self, obj, block=True, timeout=None):
return self._q_out.put(obj, block=block, timeout=timeout)
def put_nowait(self, obj):
return self.put(obj, False)
| open_spiel-master | open_spiel/python/utils/spawn.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry point for applications with platform specific initializations."""
from absl.app import * # pylint: disable=wildcard-import
| open_spiel-master | open_spiel/python/utils/app.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Replay buffer of fixed size with a FIFI replacement policy."""
import random
class ReplayBuffer(object):
"""ReplayBuffer of fixed size with a FIFO replacement policy.
Stored transitions can be sampled uniformly.
The underlying datastructure is a ring buffer, allowing 0(1) adding and
sampling.
"""
def __init__(self, replay_buffer_capacity):
self._replay_buffer_capacity = replay_buffer_capacity
self._data = []
self._next_entry_index = 0
def add(self, element):
"""Adds `element` to the buffer.
If the buffer is full, the oldest element will be replaced.
Args:
element: data to be added to the buffer.
"""
if len(self._data) < self._replay_buffer_capacity:
self._data.append(element)
else:
self._data[self._next_entry_index] = element
self._next_entry_index += 1
self._next_entry_index %= self._replay_buffer_capacity
def sample(self, num_samples):
"""Returns `num_samples` uniformly sampled from the buffer.
Args:
num_samples: `int`, number of samples to draw.
Returns:
An iterable over `num_samples` random elements of the buffer.
Raises:
ValueError: If there are less than `num_samples` elements in the buffer
"""
if len(self._data) < num_samples:
raise ValueError("{} elements could not be sampled from size {}".format(
num_samples, len(self._data)))
return random.sample(self._data, num_samples)
def reset(self):
"""Resets the contents of the replay buffer."""
self._data = []
self._next_entry_index = 0
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
| open_spiel-master | open_spiel/python/utils/replay_buffer.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File utilities."""
import os
def find_file(filename, levels):
if os.path.isfile(filename):
return filename
else:
for _ in range(levels):
filename = '../' + filename
if os.path.isfile(filename):
return filename
return None
| open_spiel-master | open_spiel/python/utils/file_utils.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class to log stuff to a file, mainly useful in parallel situations."""
import datetime
import os
class FileLogger(object):
"""A logger to print stuff to a file."""
def __init__(self, path, name, quiet=False, also_to_stdout=False):
self._fd = open(os.path.join(path, "log-{}.txt".format(name)), "w")
self._quiet = quiet
self.also_to_stdout = also_to_stdout
def print(self, *args):
# Date/time with millisecond precision.
date_prefix = "[{}]".format(datetime.datetime.now().isoformat(" ")[:-3])
print(date_prefix, *args, file=self._fd, flush=True)
if self.also_to_stdout:
print(date_prefix, *args, flush=True)
def opt_print(self, *args):
if not self._quiet:
self.print(*args)
def __enter__(self):
return self
def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback):
self.close()
def close(self):
if self._fd:
self._fd.close()
self._fd = None
def __del__(self):
self.close()
| open_spiel-master | open_spiel/python/utils/file_logger.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.utils.spawn."""
import random
import time
from absl.testing import absltest
from open_spiel.python.utils import spawn
class SpawnTest(absltest.TestCase):
def test_spawn_works(self):
max_sleep_time = 0.01 # 10ms
def worker_fn(worker_id, queue):
queue.put(worker_id) # Show it's up and running.
random.seed(time.time() + worker_id)
while True:
value = queue.get()
if value is None:
break
time.sleep(max_sleep_time * random.random())
queue.put((worker_id, value))
num_workers = 5
workers = [spawn.Process(worker_fn, kwargs={"worker_id": i})
for i in range(num_workers)]
# Make sure they're warmed up.
for worker_id, worker in enumerate(workers):
self.assertEqual(worker_id, worker.queue.get())
num_work_units = 40
expected_output = []
for worker_id, worker in enumerate(workers):
for i in range(num_work_units):
worker.queue.put(i)
expected_output.append((worker_id, i))
worker.queue.put(None)
start_time = time.time()
output = []
i = 0
while len(output) < len(expected_output):
for worker in workers:
try:
output.append(worker.queue.get_nowait())
except spawn.Empty:
pass
time.sleep(0.001)
i += 1
self.assertLess(time.time() - start_time,
20 * max_sleep_time * num_work_units,
msg=f"Don't wait forever. Loop {i}, found {len(output)}")
time_taken = time.time() - start_time
print("Finished in {:.3f}s, {:.2f}x the max".format(
time_taken, time_taken / (max_sleep_time * num_work_units)))
for worker in workers:
worker.join()
# All messages arrived
self.assertLen(output, len(expected_output))
self.assertCountEqual(output, expected_output)
# The messages arrived out of order, showing parallelism.
self.assertNotEqual(output, expected_output)
if __name__ == "__main__":
with spawn.main_handler():
absltest.main()
| open_spiel-master | open_spiel/python/utils/spawn_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.utils.file_logger."""
import os
import tempfile
from absl.testing import absltest
from open_spiel.python.utils import file_logger
class FileLoggerTest(absltest.TestCase):
def test_file_logger(self):
tmp_dir = tempfile.mkdtemp()
try:
log_name = "test"
log_file_name = os.path.join(tmp_dir, "log-{}.txt".format(log_name))
self.assertTrue(os.path.isdir(tmp_dir))
self.assertFalse(os.path.exists(log_file_name))
with file_logger.FileLogger(tmp_dir, log_name) as logger:
logger.print("line 1")
logger.print("line", 2)
logger.print("line", 3, "asdf")
with open(log_file_name, "r") as f:
lines = f.readlines()
self.assertLen(lines, 3)
self.assertIn("line 1", lines[0])
self.assertIn("line 2", lines[1])
self.assertIn("line 3 asdf", lines[2])
finally:
if os.path.exists(log_file_name):
os.remove(log_file_name)
os.rmdir(tmp_dir)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/utils/file_logger_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training utilities."""
from typing import Sequence
from open_spiel.python import rl_agent
from open_spiel.python import rl_environment
def run_episodes(envs: Sequence[rl_environment.Environment],
agents: Sequence[rl_agent.AbstractAgent],
num_episodes: int = 1,
is_evaluation: bool = False) -> None:
"""Runs the agents on the environments for the specified number of episodes.
Args:
envs: RL environments.
agents: RL agents.
num_episodes: Number of episodes to run.
is_evaluation: Indicates whether the agent should use the evaluation or
training behavior.
"""
assert len(envs) == len(agents), 'Environments should match the agents.'
for _ in range(num_episodes):
for env, agent in zip(envs, agents):
time_step = env.reset()
while not time_step.last():
agent_output = agent.step(time_step, is_evaluation=is_evaluation)
if agent_output:
action_list = [agent_output.action]
time_step = env.step(action_list)
# Episode is over, step all agents with final info state.
agent.step(time_step)
| open_spiel-master | open_spiel/python/utils/training.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visualizing game trees with graphviz.
GameTree builds a `pygraphviz.AGraph` reprensentation of the game tree. The
resulting tree can be directly visualized in Jupyter notebooks or Google Colab
via SVG plotting - or written to a file by calling `draw(filename, prog="dot")`.
See `examples/treeviz_example.py` for a more detailed example.
This module relies on external dependencies, which need to be installed before
use. On a debian system follow these steps:
```
sudo apt-get install graphviz libgraphviz-dev
pip install pygraphviz
```
"""
import collections
import pyspiel
# pylint: disable=g-import-not-at-top
try:
import pygraphviz
except (ImportError, Exception) as e:
raise ImportError(
str(e) + "\nPlease make sure to install the following dependencies:\n"
"sudo apt-get install graphviz libgraphviz-dev\n"
"pip install pygraphviz") from None
# pylint: enable=g-import-not-at-top
_PLAYER_SHAPES = {0: "square", 1: "ellipse"}
_PLAYER_COLORS = {-1: "black", 0: "blue", 1: "red"}
_FONTSIZE = 8
_WIDTH = _HEIGHT = 0.25
_ARROWSIZE = .5
_MARGIN = 0.01
def default_node_decorator(state):
"""Decorates a state-node of the game tree.
This method can be called by a custom decorator to prepopulate the attributes
dictionary. Then only relevant attributes need to be changed, or added.
Args:
state: The state.
Returns:
`dict` with graphviz node style attributes.
"""
player = state.current_player()
attrs = {
"label": "",
"fontsize": _FONTSIZE,
"width": _WIDTH,
"height": _HEIGHT,
"margin": _MARGIN
}
if state.is_terminal():
attrs["label"] = ", ".join(map(str, state.returns()))
attrs["shape"] = "diamond"
elif state.is_chance_node():
attrs["shape"] = "point"
attrs["width"] = _WIDTH / 2.
attrs["height"] = _HEIGHT / 2.
else:
attrs["label"] = str(state.information_state_string())
attrs["shape"] = _PLAYER_SHAPES.get(player, "ellipse")
attrs["color"] = _PLAYER_COLORS.get(player, "black")
return attrs
def default_edge_decorator(parent, unused_child, action):
"""Decorates a state-node of the game tree.
This method can be called by a custom decorator to prepopulate the attributes
dictionary. Then only relevant attributes need to be changed, or added.
Args:
parent: The parent state.
unused_child: The child state, not used in the default decorator.
action: `int` the selected action in the parent state.
Returns:
`dict` with graphviz node style attributes.
"""
player = parent.current_player()
attrs = {
"label": " " + parent.action_to_string(player, action),
"fontsize": _FONTSIZE,
"arrowsize": _ARROWSIZE
}
attrs["color"] = _PLAYER_COLORS.get(player, "black")
return attrs
class GameTree(pygraphviz.AGraph):
"""Builds `pygraphviz.AGraph` of the game tree.
Attributes:
game: A `pyspiel.Game` object.
depth_limit: Maximum depth of the tree. Optional, default=-1 (no limit).
node_decorator: Decorator function for nodes (states). Optional, default=
`treeviz.default_node_decorator`.
edge_decorator: Decorator function for edges (actions). Optional, default=
`treeviz.default_edge_decorator`.
group_terminal: Whether to display all terminal states at same level,
default=False.
group_infosets: Whether to group infosets together, default=False.
group_pubsets: Whether to group public sets together, default=False.
target_pubset: Whether to group all public sets "*" or a specific one.
infoset_attrs: Attributes to style infoset grouping.
pubset_attrs: Attributes to style public set grouping.
kwargs: Keyword arguments passed on to `pygraphviz.AGraph.__init__`.
"""
def __init__(self,
game=None,
depth_limit=-1,
node_decorator=default_node_decorator,
edge_decorator=default_edge_decorator,
group_terminal=False,
group_infosets=False,
group_pubsets=False,
target_pubset="*",
infoset_attrs=None,
pubset_attrs=None,
**kwargs):
kwargs["directed"] = kwargs.get("directed", True)
super(GameTree, self).__init__(**kwargs)
# We use pygraphviz.AGraph.add_subgraph to cluster nodes, and it requires a
# default constructor. Thus game needs to be optional.
if game is None:
return
self.game = game
self._node_decorator = node_decorator
self._edge_decorator = edge_decorator
self._group_infosets = group_infosets
self._group_pubsets = group_pubsets
if self._group_infosets:
if not self.game.get_type().provides_information_state_string:
raise RuntimeError(
"Grouping of infosets requested, but the game does not "
"provide information state string.")
if self._group_pubsets:
if not self.game.get_type().provides_factored_observation_string:
raise RuntimeError(
"Grouping of public sets requested, but the game does not "
"provide factored observations strings.")
self._infosets = collections.defaultdict(lambda: [])
self._pubsets = collections.defaultdict(lambda: [])
self._terminal_nodes = []
root = game.new_initial_state()
self.add_node(self.state_to_str(root), **self._node_decorator(root))
self._build_tree(root, 0, depth_limit)
for (player, info_state), sibblings in self._infosets.items():
cluster_name = "cluster_{}_{}".format(player, info_state)
self.add_subgraph(sibblings, cluster_name,
**(infoset_attrs or {
"style": "dashed"
}))
for pubset, sibblings in self._pubsets.items():
if target_pubset == "*" or target_pubset == pubset:
cluster_name = "cluster_{}".format(pubset)
self.add_subgraph(sibblings, cluster_name,
**(pubset_attrs or {
"style": "dashed"
}))
if group_terminal:
self.add_subgraph(self._terminal_nodes, rank="same")
def state_to_str(self, state):
"""Unique string representation of a state.
Args:
state: The state.
Returns:
String representation of state.
"""
assert not state.is_simultaneous_node()
# AGraph nodes can't have empty string == None as a key, thus we prepend " "
return " " + state.history_str()
def _build_tree(self, state, depth, depth_limit):
"""Recursively builds the game tree."""
state_str = self.state_to_str(state)
if state.is_terminal():
self._terminal_nodes.append(state_str)
return
if depth > depth_limit >= 0:
return
for action in state.legal_actions():
child = state.child(action)
child_str = self.state_to_str(child)
self.add_node(child_str, **self._node_decorator(child))
self.add_edge(state_str, child_str,
**self._edge_decorator(state, child, action))
if (self._group_infosets and not child.is_chance_node() and
not child.is_terminal()):
player = child.current_player()
info_state = child.information_state_string()
self._infosets[(player, info_state)].append(child_str)
if self._group_pubsets:
pub_obs_history = str(pyspiel.PublicObservationHistory(child))
self._pubsets[pub_obs_history].append(child_str)
self._build_tree(child, depth + 1, depth_limit)
def _repr_svg_(self):
"""Allows to render directly in Jupyter notebooks and Google Colab."""
if not self.has_layout:
self.layout(prog="dot")
return self.draw(format="svg").decode(self.encoding)
| open_spiel-master | open_spiel/python/visualizations/treeviz.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/visualizations/__init__.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
r"""Two BlueChip bridge bots agains simplest open_spiel (take the first possible action).
The bot_cmd FLAG should contain a command-line to launch an external bot, e.g.
`Wbridge5 Autoconnect {port}`.
"""
# pylint: enable=line-too-long
import os
import pickle
import re
import socket
import subprocess
import time
from absl import app
from absl import flags
import haiku as hk
import jax
import numpy as np
from open_spiel.python.bots import bluechip_bridge
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_float("timeout_secs", 60, "Seconds to wait for bot to respond")
flags.DEFINE_integer("rng_seed", 1234, "Seed to use to generate hands")
flags.DEFINE_integer("num_deals", 10, "How many deals to play")
flags.DEFINE_integer("sleep", 0, "How many seconds to wait before next action")
flags.DEFINE_string("params_path", ".",
"directory path for trained model params-snapshot.pkl")
flags.DEFINE_string(
"bot_cmd", None,
"Command to launch the external bot; must include {port} which will be "
"replaced by the port number to attach to.")
# Make the network.
NUM_ACTIONS = 38
MIN_ACTION = 52
def net_fn(x):
"""Haiku module for our network."""
net = hk.Sequential([
hk.Linear(1024),
jax.nn.relu,
hk.Linear(1024),
jax.nn.relu,
hk.Linear(1024),
jax.nn.relu,
hk.Linear(1024),
jax.nn.relu,
hk.Linear(NUM_ACTIONS),
jax.nn.log_softmax,
])
return net(x)
def load_model():
net = hk.without_apply_rng(hk.transform(net_fn))
params = pickle.load(
open(os.path.join(FLAGS.params_path, "params-snapshot.pkl"), "rb"))
return net, params
def ai_action(state, net, params):
observation = np.array(state.observation_tensor(), np.float32)
policy = np.exp(net.apply(params, observation))
probs_actions = [(p, a + MIN_ACTION) for a, p in enumerate(policy)]
pred = max(probs_actions)[1]
return pred
def _run_once(state, bots, net, params):
"""Plays bots with each other, returns terminal utility for each player."""
for bot in bots:
bot.restart()
while not state.is_terminal():
if state.is_chance_node():
outcomes, probs = zip(*state.chance_outcomes())
state.apply_action(np.random.choice(outcomes, p=probs))
else:
if FLAGS.sleep:
time.sleep(FLAGS.sleep) # wait for the human to see how it goes
if state.current_player() % 2 == 1:
# Have simplest play for now
action = state.legal_actions()[0]
if action > 51:
# TODO(ed2k) extend beyond just bidding
action = ai_action(state, net, params)
state.apply_action(action)
else:
result = bots[state.current_player() // 2].step(state)
state.apply_action(result)
return state
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
game = pyspiel.load_game("bridge(use_double_dummy_result=false)")
net, params = load_model()
bots = [
bluechip_bridge.BlueChipBridgeBot(game, 0, controller_factory),
bluechip_bridge.BlueChipBridgeBot(game, 2, controller_factory)
]
results = []
for i_deal in range(FLAGS.num_deals):
state = _run_once(game.new_initial_state(), bots, net, params)
print("Deal #{}; final state:\n{}".format(i_deal, state))
results.append(state.returns())
stats = np.array(results)
mean = np.mean(stats, axis=0)
stderr = np.std(stats, axis=0, ddof=1) / np.sqrt(FLAGS.num_deals)
print(u"Absolute score: {:+.1f}\u00b1{:.1f}".format(mean[0], stderr[0]))
print(u"Relative score: {:+.1f}\u00b1{:.1f}".format(mean[1], stderr[1]))
def controller_factory():
"""Implements bluechip_bridge.BlueChipBridgeBot."""
client = _WBridge5Client(FLAGS.bot_cmd)
client.start()
return client
class _WBridge5Client(object):
"""Manages the connection to a WBridge5 bot."""
def __init__(self, command):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind(("", 0))
self.port = self.sock.getsockname()[1]
self.sock.listen(1)
self.process = None
self.command = command.format(port=self.port)
def start(self):
if self.process is not None:
self.process.kill()
self.process = subprocess.Popen(self.command.split(" "))
self.conn, self.addr = self.sock.accept()
def read_line(self):
line = ""
while True:
self.conn.settimeout(FLAGS.timeout_secs)
data = self.conn.recv(1024)
if not data:
raise EOFError("Connection closed")
line += data.decode("ascii")
if line.endswith("\n"):
return re.sub(r"\s+", " ", line).strip()
def send_line(self, line):
self.conn.send((line + "\r\n").encode("ascii"))
def terminate(self):
self.process.kill()
self.process = None
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/bridge_wb5.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example running PSRO on OpenSpiel Sequential games.
To reproduce results from (Muller et al., "A Generalized Training Approach for
Multiagent Learning", ICLR 2020; https://arxiv.org/abs/1909.12823), run this
script with:
- `game_name` in ['kuhn_poker', 'leduc_poker']
- `n_players` in [2, 3, 4, 5]
- `meta_strategy_method` in ['alpharank', 'uniform', 'nash', 'prd']
- `rectifier` in ['', 'rectified']
The other parameters keeping their default values.
"""
import time
from absl import app
from absl import flags
import numpy as np
# pylint: disable=g-bad-import-order
import pyspiel
import tensorflow.compat.v1 as tf
# pylint: enable=g-bad-import-order
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import get_all_states
from open_spiel.python.algorithms import policy_aggregator
from open_spiel.python.algorithms.psro_v2 import best_response_oracle
from open_spiel.python.algorithms.psro_v2 import psro_v2
from open_spiel.python.algorithms.psro_v2 import rl_oracle
from open_spiel.python.algorithms.psro_v2 import rl_policy
from open_spiel.python.algorithms.psro_v2 import strategy_selectors
FLAGS = flags.FLAGS
# Game-related
flags.DEFINE_string("game_name", "kuhn_poker", "Game name.")
flags.DEFINE_integer("n_players", 2, "The number of players.")
# PSRO related
flags.DEFINE_string("meta_strategy_method", "alpharank",
"Name of meta strategy computation method.")
flags.DEFINE_integer("number_policies_selected", 1,
"Number of new strategies trained at each PSRO iteration.")
flags.DEFINE_integer("sims_per_entry", 1000,
("Number of simulations to run to estimate each element"
"of the game outcome matrix."))
flags.DEFINE_integer("gpsro_iterations", 100,
"Number of training steps for GPSRO.")
flags.DEFINE_bool("symmetric_game", False, "Whether to consider the current "
"game as a symmetric game.")
# Rectify options
flags.DEFINE_string("rectifier", "",
"Which rectifier to use. Choices are '' "
"(No filtering), 'rectified' for rectified.")
flags.DEFINE_string("training_strategy_selector", "probabilistic",
"Which strategy selector to use. Choices are "
" - 'top_k_probabilities': select top "
"`number_policies_selected` strategies. "
" - 'probabilistic': Randomly samples "
"`number_policies_selected` strategies with probability "
"equal to their selection probabilities. "
" - 'uniform': Uniformly sample `number_policies_selected` "
"strategies. "
" - 'rectified': Select every non-zero-selection-"
"probability strategy available to each player.")
# General (RL) agent parameters
flags.DEFINE_string("oracle_type", "BR", "Choices are DQN, PG (Policy "
"Gradient) or BR (exact Best Response)")
flags.DEFINE_integer("number_training_episodes", int(1e4), "Number training "
"episodes per RL policy. Used for PG and DQN")
flags.DEFINE_float("self_play_proportion", 0.0, "Self play proportion")
flags.DEFINE_integer("hidden_layer_size", 256, "Hidden layer size")
flags.DEFINE_integer("batch_size", 32, "Batch size")
flags.DEFINE_float("sigma", 0.0, "Policy copy noise (Gaussian Dropout term).")
flags.DEFINE_string("optimizer_str", "adam", "'adam' or 'sgd'")
# Policy Gradient Oracle related
flags.DEFINE_string("loss_str", "qpg", "Name of loss used for BR training.")
flags.DEFINE_integer("num_q_before_pi", 8, "# critic updates before Pi update")
flags.DEFINE_integer("n_hidden_layers", 4, "# of hidden layers")
flags.DEFINE_float("entropy_cost", 0.001, "Self play proportion")
flags.DEFINE_float("critic_learning_rate", 1e-2, "Critic learning rate")
flags.DEFINE_float("pi_learning_rate", 1e-3, "Policy learning rate.")
# DQN
flags.DEFINE_float("dqn_learning_rate", 1e-2, "DQN learning rate.")
flags.DEFINE_integer("update_target_network_every", 1000, "Update target "
"network every [X] steps")
flags.DEFINE_integer("learn_every", 10, "Learn every [X] steps.")
# General
flags.DEFINE_integer("seed", 1, "Seed.")
flags.DEFINE_bool("local_launch", False, "Launch locally or not.")
flags.DEFINE_bool("verbose", True, "Enables verbose printing and profiling.")
def init_pg_responder(sess, env):
"""Initializes the Policy Gradient-based responder and agents."""
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
agent_class = rl_policy.PGPolicy
agent_kwargs = {
"session": sess,
"info_state_size": info_state_size,
"num_actions": num_actions,
"loss_str": FLAGS.loss_str,
"loss_class": False,
"hidden_layers_sizes": [FLAGS.hidden_layer_size] * FLAGS.n_hidden_layers,
"batch_size": FLAGS.batch_size,
"entropy_cost": FLAGS.entropy_cost,
"critic_learning_rate": FLAGS.critic_learning_rate,
"pi_learning_rate": FLAGS.pi_learning_rate,
"num_critic_before_pi": FLAGS.num_q_before_pi,
"optimizer_str": FLAGS.optimizer_str
}
oracle = rl_oracle.RLOracle(
env,
agent_class,
agent_kwargs,
number_training_episodes=FLAGS.number_training_episodes,
self_play_proportion=FLAGS.self_play_proportion,
sigma=FLAGS.sigma)
agents = [
agent_class( # pylint: disable=g-complex-comprehension
env,
player_id,
**agent_kwargs)
for player_id in range(FLAGS.n_players)
]
for agent in agents:
agent.freeze()
return oracle, agents
def init_br_responder(env):
"""Initializes the tabular best-response based responder and agents."""
random_policy = policy.TabularPolicy(env.game)
oracle = best_response_oracle.BestResponseOracle(
game=env.game, policy=random_policy)
agents = [random_policy.__copy__() for _ in range(FLAGS.n_players)]
return oracle, agents
def init_dqn_responder(sess, env):
"""Initializes the Policy Gradient-based responder and agents."""
state_representation_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
agent_class = rl_policy.DQNPolicy
agent_kwargs = {
"session": sess,
"state_representation_size": state_representation_size,
"num_actions": num_actions,
"hidden_layers_sizes": [FLAGS.hidden_layer_size] * FLAGS.n_hidden_layers,
"batch_size": FLAGS.batch_size,
"learning_rate": FLAGS.dqn_learning_rate,
"update_target_network_every": FLAGS.update_target_network_every,
"learn_every": FLAGS.learn_every,
"optimizer_str": FLAGS.optimizer_str
}
oracle = rl_oracle.RLOracle(
env,
agent_class,
agent_kwargs,
number_training_episodes=FLAGS.number_training_episodes,
self_play_proportion=FLAGS.self_play_proportion,
sigma=FLAGS.sigma)
agents = [
agent_class( # pylint: disable=g-complex-comprehension
env,
player_id,
**agent_kwargs)
for player_id in range(FLAGS.n_players)
]
for agent in agents:
agent.freeze()
return oracle, agents
def print_policy_analysis(policies, game, verbose=False):
"""Function printing policy diversity within game's known policies.
Warning : only works with deterministic policies.
Args:
policies: List of list of policies (One list per game player)
game: OpenSpiel game object.
verbose: Whether to print policy diversity information. (True : print)
Returns:
List of list of unique policies (One list per player)
"""
states_dict = get_all_states.get_all_states(game, np.infty, False, False)
unique_policies = []
for player in range(len(policies)):
cur_policies = policies[player]
cur_set = set()
for pol in cur_policies:
cur_str = ""
for state_str in states_dict:
if states_dict[state_str].current_player() == player:
pol_action_dict = pol(states_dict[state_str])
max_prob = max(list(pol_action_dict.values()))
max_prob_actions = [
a for a in pol_action_dict if pol_action_dict[a] == max_prob
]
cur_str += "__" + state_str
for a in max_prob_actions:
cur_str += "-" + str(a)
cur_set.add(cur_str)
unique_policies.append(cur_set)
if verbose:
print("\n=====================================\nPolicy Diversity :")
for player, cur_set in enumerate(unique_policies):
print("Player {} : {} unique policies.".format(player, len(cur_set)))
print("")
return unique_policies
def gpsro_looper(env, oracle, agents):
"""Initializes and executes the GPSRO training loop."""
sample_from_marginals = True # TODO(somidshafiei) set False for alpharank
training_strategy_selector = FLAGS.training_strategy_selector or strategy_selectors.probabilistic
g_psro_solver = psro_v2.PSROSolver(
env.game,
oracle,
initial_policies=agents,
training_strategy_selector=training_strategy_selector,
rectifier=FLAGS.rectifier,
sims_per_entry=FLAGS.sims_per_entry,
number_policies_selected=FLAGS.number_policies_selected,
meta_strategy_method=FLAGS.meta_strategy_method,
prd_iterations=50000,
prd_gamma=1e-10,
sample_from_marginals=sample_from_marginals,
symmetric_game=FLAGS.symmetric_game)
start_time = time.time()
for gpsro_iteration in range(FLAGS.gpsro_iterations):
if FLAGS.verbose:
print("Iteration : {}".format(gpsro_iteration))
print("Time so far: {}".format(time.time() - start_time))
g_psro_solver.iteration()
meta_game = g_psro_solver.get_meta_game()
meta_probabilities = g_psro_solver.get_meta_strategies()
policies = g_psro_solver.get_policies()
if FLAGS.verbose:
print("Meta game : {}".format(meta_game))
print("Probabilities : {}".format(meta_probabilities))
# The following lines only work for sequential games for the moment.
if env.game.get_type().dynamics == pyspiel.GameType.Dynamics.SEQUENTIAL:
aggregator = policy_aggregator.PolicyAggregator(env.game)
aggr_policies = aggregator.aggregate(
range(FLAGS.n_players), policies, meta_probabilities)
exploitabilities, expl_per_player = exploitability.nash_conv(
env.game, aggr_policies, return_only_nash_conv=False)
_ = print_policy_analysis(policies, env.game, FLAGS.verbose)
if FLAGS.verbose:
print("Exploitabilities : {}".format(exploitabilities))
print("Exploitabilities per player : {}".format(expl_per_player))
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
np.random.seed(FLAGS.seed)
game = pyspiel.load_game_as_turn_based(FLAGS.game_name,
{"players": FLAGS.n_players})
env = rl_environment.Environment(game)
# Initialize oracle and agents
with tf.Session() as sess:
if FLAGS.oracle_type == "DQN":
oracle, agents = init_dqn_responder(sess, env)
elif FLAGS.oracle_type == "PG":
oracle, agents = init_pg_responder(sess, env)
elif FLAGS.oracle_type == "BR":
oracle, agents = init_br_responder(env)
sess.run(tf.global_variables_initializer())
gpsro_looper(env, oracle, agents)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/psro_v2_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export game in gambit .efg format."""
from absl import app
from absl import flags
from absl import logging
from open_spiel.python.algorithms.gambit import export_gambit
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "kuhn_poker", "Name of the game")
flags.DEFINE_string("out", "/tmp/gametree.efg", "Name of output file, e.g., "
"[*.efg].")
flags.DEFINE_boolean("print", False, "Print the tree to stdout "
"instead of saving to file.")
def main(argv):
del argv
game = pyspiel.load_game(FLAGS.game)
game_type = game.get_type()
if game_type.dynamics == pyspiel.GameType.Dynamics.SIMULTANEOUS:
logging.warn("%s is not turn-based. Trying to reload game as turn-based.",
FLAGS.game)
game = pyspiel.load_game_as_turn_based(FLAGS.game)
gametree = export_gambit(game) # use default decorators
if FLAGS.print:
print(gametree)
else:
with open(FLAGS.out, "w") as f:
f.write(gametree)
logging.info("Game tree for %s saved to file: %s", FLAGS.game, FLAGS.out)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/gambit_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tabular Q-Learner example on Tic Tac Toe.
Two Q-Learning agents are trained by playing against each other. Then, the game
can be played against the agents from the command line.
After about 10**5 training episodes, the agents reach a good policy: win rate
against random opponents is around 99% for player 0 and 92% for player 1.
"""
import logging
import sys
from absl import app
from absl import flags
import numpy as np
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import random_agent
from open_spiel.python.algorithms import tabular_qlearner
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_episodes", int(5e4), "Number of train episodes.")
flags.DEFINE_boolean(
"interactive_play",
True,
"Whether to run an interactive play with the agent after training.",
)
def pretty_board(time_step):
"""Returns the board in `time_step` in a human readable format."""
info_state = time_step.observations["info_state"][0]
x_locations = np.nonzero(info_state[9:18])[0]
o_locations = np.nonzero(info_state[18:])[0]
board = np.full(3 * 3, ".")
board[x_locations] = "X"
board[o_locations] = "0"
board = np.reshape(board, (3, 3))
return board
def command_line_action(time_step):
"""Gets a valid action from the user on the command line."""
current_player = time_step.observations["current_player"]
legal_actions = time_step.observations["legal_actions"][current_player]
action = -1
while action not in legal_actions:
print("Choose an action from {}:".format(legal_actions))
sys.stdout.flush()
action_str = input()
try:
action = int(action_str)
except ValueError:
continue
return action
def eval_against_random_bots(env, trained_agents, random_agents, num_episodes):
"""Evaluates `trained_agents` against `random_agents` for `num_episodes`."""
wins = np.zeros(2)
for player_pos in range(2):
if player_pos == 0:
cur_agents = [trained_agents[0], random_agents[1]]
else:
cur_agents = [random_agents[0], trained_agents[1]]
for _ in range(num_episodes):
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = cur_agents[player_id].step(time_step, is_evaluation=True)
time_step = env.step([agent_output.action])
if time_step.rewards[player_pos] > 0:
wins[player_pos] += 1
return wins / num_episodes
def main(_):
game = "tic_tac_toe"
num_players = 2
env = rl_environment.Environment(game)
num_actions = env.action_spec()["num_actions"]
agents = [
tabular_qlearner.QLearner(player_id=idx, num_actions=num_actions)
for idx in range(num_players)
]
# random agents for evaluation
random_agents = [
random_agent.RandomAgent(player_id=idx, num_actions=num_actions)
for idx in range(num_players)
]
# 1. Train the agents
training_episodes = FLAGS.num_episodes
for cur_episode in range(training_episodes):
if cur_episode % int(1e4) == 0:
win_rates = eval_against_random_bots(env, agents, random_agents, 1000)
logging.info("Starting episode %s, win_rates %s", cur_episode, win_rates)
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(time_step)
time_step = env.step([agent_output.action])
# Episode is over, step all agents with final info state.
for agent in agents:
agent.step(time_step)
if not FLAGS.interactive_play:
return
# 2. Play from the command line against the trained agent.
human_player = 1
while True:
logging.info("You are playing as %s", "O" if human_player else "X")
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
if player_id == human_player:
agent_out = agents[human_player].step(time_step, is_evaluation=True)
logging.info("\n%s", agent_out.probs.reshape((3, 3)))
logging.info("\n%s", pretty_board(time_step))
action = command_line_action(time_step)
else:
agent_out = agents[1 - human_player].step(time_step, is_evaluation=True)
action = agent_out.action
time_step = env.step([action])
logging.info("\n%s", pretty_board(time_step))
logging.info("End of game!")
if time_step.rewards[human_player] > 0:
logging.info("You win")
elif time_step.rewards[human_player] < 0:
logging.info("You lose")
else:
logging.info("Draw")
# Switch order of players
human_player = 1 - human_player
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/tic_tac_toe_qlearner.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example use of the NeuRD algorithm on Kuhn Poker.
This NeuRD implementation does not use an entropy bonus to ensure that the
current joint policy approaches an equilibrium in zero-sum games, but it
tracks the exact tabular average so that the average policy approaches an
equilibrium (assuming the policy networks train well).
"""
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
from open_spiel.python.algorithms import neurd
import pyspiel
tf.enable_eager_execution()
FLAGS = flags.FLAGS
flags.DEFINE_integer("iterations", 1000, "Number of iterations")
flags.DEFINE_string("game", "kuhn_poker", "Name of the game")
flags.DEFINE_integer("players", 2, "Number of players")
flags.DEFINE_integer("print_freq", 100, "How often to print the exploitability")
flags.DEFINE_integer("num_hidden_layers", 1,
"The number of hidden layers in the policy model.")
flags.DEFINE_integer("num_hidden_units", 13,
"The number of hidden layers in the policy model.")
flags.DEFINE_integer(
"num_hidden_factors", 8,
"The number of factors in each hidden layer in the policy model.")
flags.DEFINE_boolean(
"use_skip_connections", True,
"Whether or not to use skip connections in the policy model.")
flags.DEFINE_integer("batch_size", 100, "The policy model training batch size.")
flags.DEFINE_float(
"threshold", 2.,
"Logits of the policy model will be discouraged from growing beyond "
"`threshold`.")
flags.DEFINE_float("step_size", 1.0, "Policy model step size.")
flags.DEFINE_boolean(
"autoencode", False,
"Whether or not to augment the policy model with outputs that attempt to "
"reproduce the model inputs. The policy model is updated online so "
"training with the reproduction error as an auxiliary task helps to keep "
"the model stable in the absence of an entropy bonus.")
def main(_):
game = pyspiel.load_game(FLAGS.game, {"players": FLAGS.players})
models = []
for _ in range(game.num_players()):
models.append(
neurd.DeepNeurdModel(
game,
num_hidden_layers=FLAGS.num_hidden_layers,
num_hidden_units=FLAGS.num_hidden_units,
num_hidden_factors=FLAGS.num_hidden_factors,
use_skip_connections=FLAGS.use_skip_connections,
autoencode=FLAGS.autoencode))
solver = neurd.CounterfactualNeurdSolver(game, models)
def _train(model, data):
neurd.train(
model,
data,
batch_size=FLAGS.batch_size,
step_size=FLAGS.step_size,
threshold=FLAGS.threshold,
autoencoder_loss=(tf.compat.v1.losses.huber_loss
if FLAGS.autoencode else None))
for i in range(FLAGS.iterations):
solver.evaluate_and_update_policy(_train)
if i % FLAGS.print_freq == 0:
conv = pyspiel.exploitability(game, solver.average_policy())
print("Iteration {} exploitability {}".format(i, conv))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/neurd_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contributed by Wannes Meert, Giuseppe Marra, and Pieter Robberechts
# for the KU Leuven course Machine Learning: Project.
"""Python spiel example."""
from absl import app
from absl import flags
import numpy as np
from open_spiel.python.bots import human
from open_spiel.python.bots import uniform_random
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_integer("seed", 12761381, "The seed to use for the RNG.")
# Supported types of players: "random", "human"
flags.DEFINE_string("player0", "random", "Type of the agent for player 0.")
flags.DEFINE_string("player1", "random", "Type of the agent for player 1.")
def LoadAgent(agent_type, player_id, rng):
"""Return a bot based on the agent type."""
if agent_type == "random":
return uniform_random.UniformRandomBot(player_id, rng)
elif agent_type == "human":
return human.HumanBot()
else:
raise RuntimeError("Unrecognized agent type: {}".format(agent_type))
def main(_):
rng = np.random.RandomState(FLAGS.seed)
games_list = pyspiel.registered_names()
assert "dots_and_boxes" in games_list
game_string = "dots_and_boxes(num_rows=2,num_cols=2)"
print("Creating game: {}".format(game_string))
game = pyspiel.load_game(game_string)
agents = [
LoadAgent(FLAGS.player0, 0, rng),
LoadAgent(FLAGS.player1, 1, rng),
]
state = game.new_initial_state()
# Print the initial state
print("INITIAL STATE")
print(str(state))
while not state.is_terminal():
current_player = state.current_player()
# Decision node: sample action for the single current player
legal_actions = state.legal_actions()
for action in legal_actions:
print(
"Legal action: {} ({})".format(
state.action_to_string(current_player, action), action
)
)
action = agents[current_player].step(state)
action_string = state.action_to_string(current_player, action)
print("Player ", current_player, ", chose action: ", action_string)
state.apply_action(action)
print("")
print("NEXT STATE:")
print(str(state))
if not state.is_terminal():
print(str(state.observation_tensor()))
# Game is now done. Print utilities for each player
returns = state.returns()
for pid in range(game.num_players()):
print("Utility for player {} is {}".format(pid, returns[pid]))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/dots_and_boxes_example.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of MMD with dilated entropy to solve for QRE in Leduc Poker."""
from absl import app
from absl import flags
from open_spiel.python.algorithms import mmd_dilated
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_integer("iterations", 100, "Number of iterations")
flags.DEFINE_float(
"alpha", 0.05, "QRE parameter, larger value amounts to more regularization")
flags.DEFINE_string("game", "leduc_poker", "Name of the game")
flags.DEFINE_integer("print_freq", 10, "How often to print the gap")
def main(_):
game = pyspiel.load_game(FLAGS.game)
mmd = mmd_dilated.MMDDilatedEnt(game, FLAGS.alpha)
for i in range(FLAGS.iterations):
mmd.update_sequences()
if i % FLAGS.print_freq == 0:
conv = mmd.get_gap()
print("Iteration {} gap {}".format(i, conv))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/mmd_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python spiel example."""
import logging
from absl import app
from absl import flags
from open_spiel.python.algorithms import tabular_qlearner
from open_spiel.python.environments import cliff_walking
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_episodes", int(5e2), "Number of train episodes.")
def eval_agent(env, agent, num_episodes):
"""Evaluates `agent` for `num_episodes`."""
rewards = 0.0
for _ in range(num_episodes):
time_step = env.reset()
episode_reward = 0
while not time_step.last():
agent_output = agent.step(time_step, is_evaluation=True)
time_step = env.step([agent_output.action])
episode_reward += time_step.rewards[0]
rewards += episode_reward
return rewards / num_episodes
def main_loop(unused_arg):
"""Trains a tabular qlearner agent in the cliff walking environment."""
env = cliff_walking.Environment(width=5, height=3)
num_actions = env.action_spec()["num_actions"]
train_episodes = FLAGS.num_episodes
eval_interval = 50
agent = tabular_qlearner.QLearner(
player_id=0, step_size=0.05, num_actions=num_actions)
# Train the agent
for ep in range(train_episodes):
time_step = env.reset()
while not time_step.last():
agent_output = agent.step(time_step)
action_list = [agent_output.action]
time_step = env.step(action_list)
# Episode is over, step agent with final info state.
agent.step(time_step)
if ep and ep % eval_interval == 0:
logging.info("-" * 80)
logging.info("Episode %s", ep)
logging.info("Last loss: %s", agent.loss)
avg_return = eval_agent(env, agent, 100)
logging.info("Avg return: %s", avg_return)
if __name__ == "__main__":
app.run(main_loop)
| open_spiel-master | open_spiel/python/examples/single_agent_cliff_walking.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN agent vs Tabular Q-Learning agents trained on Tic Tac Toe.
The two agents are trained by playing against each other. Then, the game
can be played against the DQN agent from the command line.
"""
import logging
import sys
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import dqn
from open_spiel.python.algorithms import random_agent
from open_spiel.python.algorithms import tabular_qlearner
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_episodes", int(5e4), "Number of train episodes.")
flags.DEFINE_boolean(
"interactive_play", True,
"Whether to run an interactive play with the agent after training.")
def pretty_board(time_step):
"""Returns the board in `time_step` in a human readable format."""
info_state = time_step.observations["info_state"][0]
x_locations = np.nonzero(info_state[9:18])[0]
o_locations = np.nonzero(info_state[18:])[0]
board = np.full(3 * 3, ".")
board[x_locations] = "X"
board[o_locations] = "0"
board = np.reshape(board, (3, 3))
return board
def command_line_action(time_step):
"""Gets a valid action from the user on the command line."""
current_player = time_step.observations["current_player"]
legal_actions = time_step.observations["legal_actions"][current_player]
action = -1
while action not in legal_actions:
print("Choose an action from {}:".format(legal_actions))
sys.stdout.flush()
action_str = input()
try:
action = int(action_str)
except ValueError:
continue
return action
def eval_against_random_bots(env, trained_agents, random_agents, num_episodes):
"""Evaluates `trained_agents` against `random_agents` for `num_episodes`."""
num_players = len(trained_agents)
sum_episode_rewards = np.zeros(num_players)
for player_pos in range(num_players):
cur_agents = random_agents[:]
cur_agents[player_pos] = trained_agents[player_pos]
for _ in range(num_episodes):
time_step = env.reset()
episode_rewards = 0
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = cur_agents[player_id].step(time_step, is_evaluation=True)
action_list = [agent_output.action]
time_step = env.step(action_list)
episode_rewards += time_step.rewards[player_pos]
sum_episode_rewards[player_pos] += episode_rewards
return sum_episode_rewards / num_episodes
def main(_):
game = "tic_tac_toe"
num_players = 2
env = rl_environment.Environment(game)
state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
hidden_layers_sizes = [32, 32]
replay_buffer_capacity = int(1e4)
train_episodes = FLAGS.num_episodes
loss_report_interval = 1000
with tf.Session() as sess:
dqn_agent = dqn.DQN(
sess,
player_id=0,
state_representation_size=state_size,
num_actions=num_actions,
hidden_layers_sizes=hidden_layers_sizes,
replay_buffer_capacity=replay_buffer_capacity)
tabular_q_agent = tabular_qlearner.QLearner(
player_id=1, num_actions=num_actions)
agents = [dqn_agent, tabular_q_agent]
sess.run(tf.global_variables_initializer())
# Train agent
for ep in range(train_episodes):
if ep and ep % loss_report_interval == 0:
logging.info("[%s/%s] DQN loss: %s", ep, train_episodes, agents[0].loss)
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(time_step)
action_list = [agent_output.action]
time_step = env.step(action_list)
# Episode is over, step all agents with final info state.
for agent in agents:
agent.step(time_step)
# Evaluate against random agent
random_agents = [
random_agent.RandomAgent(player_id=idx, num_actions=num_actions)
for idx in range(num_players)
]
r_mean = eval_against_random_bots(env, agents, random_agents, 1000)
logging.info("Mean episode rewards: %s", r_mean)
if not FLAGS.interactive_play:
return
# Play from the command line against the trained DQN agent.
human_player = 1
while True:
logging.info("You are playing as %s", "X" if human_player else "0")
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
if player_id == human_player:
agent_out = agents[human_player].step(time_step, is_evaluation=True)
logging.info("\n%s", agent_out.probs.reshape((3, 3)))
logging.info("\n%s", pretty_board(time_step))
action = command_line_action(time_step)
else:
agent_out = agents[1 - human_player].step(
time_step, is_evaluation=True)
action = agent_out.action
time_step = env.step([action])
logging.info("\n%s", pretty_board(time_step))
logging.info("End of game!")
if time_step.rewards[human_player] > 0:
logging.info("You win")
elif time_step.rewards[human_player] < 0:
logging.info("You lose")
else:
logging.info("Draw")
# Switch order of players
human_player = 1 - human_player
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/tic_tac_toe_dqn_vs_tabular.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exploitability of a policy from IS-MCTS search run at each info state."""
from absl import app
from absl import flags
from open_spiel.python import policy
from open_spiel.python.algorithms import exploitability
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "kuhn_poker", "Name of the game")
SEED = 129846127
def construct_is_mcts_policy(game, state, tabular_policy, bot, searched):
"""Constructs a tabular policy from independent bot calls.
Args:
game: an OpenSpiel game,
state: an OpenSpiel state to start the tree walk from,
tabular_policy: a policy.TabularPolicy for this game,
bot: the bot to get the policy from at each state
searched: a dictionary of information states already search (empty to begin)
"""
if state.is_terminal():
return
elif state.is_chance_node():
outcomes = state.legal_actions()
for outcome in outcomes:
new_state = state.clone()
new_state.apply_action(outcome)
construct_is_mcts_policy(game, new_state, tabular_policy, bot, searched)
else:
infostate_key = state.information_state_string()
if infostate_key not in searched:
searched[infostate_key] = True
infostate_policy = bot.get_policy(state)
tabular_state_policy = tabular_policy.policy_for_key(infostate_key)
for action, prob in infostate_policy:
tabular_state_policy[action] = prob
for action in state.legal_actions():
new_state = state.clone()
new_state.apply_action(action)
construct_is_mcts_policy(game, new_state, tabular_policy, bot, searched)
def main(_):
game = pyspiel.load_game(FLAGS.game)
evaluator = pyspiel.RandomRolloutEvaluator(1, SEED)
min_expl = game.max_utility() - game.min_utility()
print("{:>5} {:>10} {:>50} {:>20}".format(
"max_sims", "uct_c", "final_policy_type", "exploitability"))
for max_simulations in [10, 100, 1000, 10000]:
for uct_c in [0.2, 0.5, 1.0, 2.0, 4.0]: # These values are for Kuhn.
for final_policy_type in [
pyspiel.ISMCTSFinalPolicyType.NORMALIZED_VISIT_COUNT,
pyspiel.ISMCTSFinalPolicyType.MAX_VISIT_COUNT,
pyspiel.ISMCTSFinalPolicyType.MAX_VALUE
]:
tabular_policy = policy.TabularPolicy(game)
bot = pyspiel.ISMCTSBot(SEED, evaluator, uct_c, max_simulations, -1,
final_policy_type, False, False)
searched = {}
construct_is_mcts_policy(game, game.new_initial_state(), tabular_policy,
bot, searched)
expl = exploitability.exploitability(game, tabular_policy)
print("{:>5} {:>10} {:>50} {:>20}".format(max_simulations, uct_c,
str(final_policy_type), expl))
if expl < min_expl:
min_expl = expl
print("Min expl: {}".format(min_expl))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/is_mcts_exploitability.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple example of using the Roshambo population.
Note: the Roshambo bots are an optional dependency and excluded by default.
To enable Roshambo bots, set OPEN_SPIEL_BUILD_WITH_ROSHAMBO to ON when building.
See
https://github.com/deepmind/open_spiel/blob/master/docs/install.md#configuring-conditional-dependencies
for details.
"""
import re
from absl import app
from absl import flags
import numpy as np
from open_spiel.python import games # pylint: disable=unused-import
from open_spiel.python import rl_agent
from open_spiel.python import rl_environment
import pyspiel
FLAGS = flags.FLAGS
# See open_spiel/data/paper_data/pbe_rrps for the bot table from the RRPS paper:
# https://arxiv.org/abs/2303.03196
flags.DEFINE_string("bot_table_file", None,
"The file containing the bot entries.")
flags.DEFINE_integer("player0_pop_id", 0, "Population member ID for player 0")
flags.DEFINE_integer("player1_pop_id", 1, "Population member ID for player 1")
flags.DEFINE_integer("seed", 0, "Seed to use for RNG")
flags.DEFINE_integer("env_recall", 1,
"Number of recent steps to include in observation")
class BotAgent(rl_agent.AbstractAgent):
"""Agent class that wraps a bot.
Note, the environment must include the OpenSpiel state in its observations,
which means it must have been created with use_full_state=True.
"""
def __init__(self, num_actions, bot, name="bot_agent"):
assert num_actions > 0
self._bot = bot
self._num_actions = num_actions
def restart(self):
self._bot.restart()
def step(self, time_step, is_evaluation=False):
# If it is the end of the episode, don't select an action.
if time_step.last():
return
_, state = pyspiel.deserialize_game_and_state(
time_step.observations["serialized_state"])
action = self._bot.step(state)
probs = np.zeros(self._num_actions)
probs[action] = 1.0
return rl_agent.StepOutput(action=action, probs=probs)
def eval_agents(env, agents, num_players, num_episodes):
"""Evaluate the agent."""
sum_episode_rewards = np.zeros(num_players)
for ep in range(num_episodes):
for agent in agents:
# Bots need to be restarted at the start of the episode.
if hasattr(agent, "restart"):
agent.restart()
time_step = env.reset()
episode_rewards = np.zeros(num_players)
while not time_step.last():
agents_output = [
agent.step(time_step, is_evaluation=True) for agent in agents
]
action_list = [agent_output.action for agent_output in agents_output]
time_step = env.step(action_list)
episode_rewards += time_step.rewards
sum_episode_rewards += episode_rewards
print(f"Finished episode {ep}, "
+ f"avg returns: {sum_episode_rewards / num_episodes}")
return sum_episode_rewards / num_episodes
def print_roshambo_bot_names_and_ids(roshambo_bot_names):
print("Roshambo bot population:")
for i in range(len(roshambo_bot_names)):
print(f"{i}: {roshambo_bot_names[i]}")
def create_roshambo_bot_agent(player_id, num_actions, bot_names, pop_id):
name = bot_names[pop_id]
# Creates an OpenSpiel bot with the default number of throws
# (pyspiel.ROSHAMBO_NUM_THROWS). To create one for a different number of
# throws per episode, add the number as the third argument here.
bot = pyspiel.make_roshambo_bot(player_id, name)
return BotAgent(num_actions, bot, name=name)
def analyze_bot_table(filename):
"""Do some analysis on the payoff cross-table."""
print(f"Opening bot table file: {filename}")
bot_table_file = open(filename, "r")
table = np.zeros(shape=(pyspiel.ROSHAMBO_NUM_BOTS,
pyspiel.ROSHAMBO_NUM_BOTS), dtype=np.float64)
print("Parsing file...")
values = {}
bot_names_map = {}
for line in bot_table_file:
line = line.strip()
# ('driftbot', 'driftbot', -0.571)
myre = re.compile(r"\'(.*)\', \'(.*)\', (.*)\)")
match_obj = myre.search(line)
row_agent, col_agent, value = match_obj.groups()
values[f"{row_agent},{col_agent}"] = value
bot_names_map[row_agent] = True
bot_names_list = list(bot_names_map.keys())
bot_names_list.sort()
print(len(bot_names_list))
assert len(bot_names_list) == pyspiel.ROSHAMBO_NUM_BOTS
print(bot_names_list)
for i in range(pyspiel.ROSHAMBO_NUM_BOTS):
for j in range(pyspiel.ROSHAMBO_NUM_BOTS):
key = f"{bot_names_list[i]},{bot_names_list[j]}"
assert key in values
table[i][j] = float(values[key])
print("Population returns:")
pop_returns = np.zeros(pyspiel.ROSHAMBO_NUM_BOTS)
pop_aggregate = np.zeros(pyspiel.ROSHAMBO_NUM_BOTS)
for i in range(pyspiel.ROSHAMBO_NUM_BOTS):
pop_eval = 0
for j in range(pyspiel.ROSHAMBO_NUM_BOTS):
pop_eval += table[i][j]
pop_eval /= pyspiel.ROSHAMBO_NUM_BOTS
# print(f" {bot_names_list[i]}: {pop_eval}")
pop_returns[i] = pop_eval
pop_aggregate[i] += pop_eval
print(f" {pop_eval},")
print("Population exploitabilities: ")
pop_expls = np.zeros(pyspiel.ROSHAMBO_NUM_BOTS)
avg_pop_expl = 0
for i in range(pyspiel.ROSHAMBO_NUM_BOTS):
pop_expl = -float(pyspiel.ROSHAMBO_NUM_THROWS)
for j in range(pyspiel.ROSHAMBO_NUM_BOTS):
pop_expl = max(pop_expl, -table[i][j])
avg_pop_expl += pop_expl
pop_expls[i] = pop_expl
pop_aggregate[i] -= pop_expl
print(f" {pop_expl},")
avg_pop_expl /= pyspiel.ROSHAMBO_NUM_BOTS
print(f"Avg within-pop expl: {avg_pop_expl}")
print("Aggregate: ")
indices = np.argsort(pop_aggregate)
for i in range(pyspiel.ROSHAMBO_NUM_BOTS):
idx = indices[pyspiel.ROSHAMBO_NUM_BOTS - i - 1]
print(f" {i+1} & \\textsc{{{bot_names_list[idx]}}} & " +
f" ${pop_returns[idx]:0.3f}$ " +
f"& ${pop_expls[idx]:0.3f}$ & ${pop_aggregate[idx]:0.3f}$ \\\\")
print("Dominance:")
for i in range(pyspiel.ROSHAMBO_NUM_BOTS):
for j in range(pyspiel.ROSHAMBO_NUM_BOTS):
if np.all(np.greater(table[i], table[j])):
print(f"{bot_names_list[i]} dominates {bot_names_list[j]}")
def main(_):
np.random.seed(FLAGS.seed)
if FLAGS.bot_table_file is not None:
analyze_bot_table(FLAGS.bot_table_file)
return
# Note that the include_full_state variable has to be enabled because the
# BotAgent needs access to the full state.
env = rl_environment.Environment(
"repeated_game(stage_game=matrix_rps(),num_repetitions=" +
f"{pyspiel.ROSHAMBO_NUM_THROWS}," +
f"recall={FLAGS.env_recall})",
include_full_state=True)
num_players = 2
num_actions = env.action_spec()["num_actions"]
# Learning agents might need this:
# info_state_size = env.observation_spec()["info_state"][0]
print("Loading population...")
pop_size = pyspiel.ROSHAMBO_NUM_BOTS
print(f"Population size: {pop_size}")
roshambo_bot_names = pyspiel.roshambo_bot_names()
roshambo_bot_names.sort()
print_roshambo_bot_names_and_ids(roshambo_bot_names)
bot_id = 0
roshambo_bot_ids = {}
for name in roshambo_bot_names:
roshambo_bot_ids[name] = bot_id
bot_id += 1
# Create two bot agents
agents = [
create_roshambo_bot_agent(0, num_actions, roshambo_bot_names,
FLAGS.player0_pop_id),
create_roshambo_bot_agent(1, num_actions, roshambo_bot_names,
FLAGS.player1_pop_id)
]
print("Starting eval run.")
print(f"Player 0 is (pop_id {FLAGS.player0_pop_id}: " +
f"{roshambo_bot_names[FLAGS.player0_pop_id]})")
print(f"Player 1 is (pop_id {FLAGS.player1_pop_id}: " +
f"{roshambo_bot_names[FLAGS.player1_pop_id]})")
avg_eval_returns = eval_agents(env, agents, num_players, 100)
print(avg_eval_returns)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/roshambo_population_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Game-specific query example."""
from absl import app
from absl import flags
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "negotiation", "Name of the game")
def main(_):
print("Creating game: " + FLAGS.game)
game = pyspiel.load_game(FLAGS.game)
state = game.new_initial_state()
print(str(state))
# Need to apply the first chance node for items and utilities to be generated
state.apply_action(0)
print("Item pool: {}".format(state.item_pool()))
print("Player 0 utils: {}".format(state.agent_utils(0)))
print("Player 1 utils: {}".format(state.agent_utils(1)))
state = game.new_initial_state()
print(str(state))
# Need to apply the first chance node for items and utilities to be generated
state.apply_action(0)
print("Item pool: {}".format(state.item_pool()))
print("Player 0 utils: {}".format(state.agent_utils(0)))
print("Player 1 utils: {}".format(state.agent_utils(1)))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/query_example.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python Deep CFR example."""
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from open_spiel.python import policy
from open_spiel.python.algorithms import deep_cfr
from open_spiel.python.algorithms import expected_game_score
from open_spiel.python.algorithms import exploitability
import pyspiel
# Temporarily disable TF2 behavior until we update the code.
tf.disable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_iterations", 400, "Number of iterations")
flags.DEFINE_integer("num_traversals", 40, "Number of traversals/games")
flags.DEFINE_string("game_name", "kuhn_poker", "Name of the game")
def main(unused_argv):
logging.info("Loading %s", FLAGS.game_name)
game = pyspiel.load_game(FLAGS.game_name)
with tf.Session() as sess:
deep_cfr_solver = deep_cfr.DeepCFRSolver(
sess,
game,
policy_network_layers=(16,),
advantage_network_layers=(16,),
num_iterations=FLAGS.num_iterations,
num_traversals=FLAGS.num_traversals,
learning_rate=1e-3,
batch_size_advantage=128,
batch_size_strategy=1024,
memory_capacity=1e7,
policy_network_train_steps=400,
advantage_network_train_steps=20,
reinitialize_advantage_networks=False)
sess.run(tf.global_variables_initializer())
_, advantage_losses, policy_loss = deep_cfr_solver.solve()
for player, losses in advantage_losses.items():
logging.info("Advantage for player %d: %s", player,
losses[:2] + ["..."] + losses[-2:])
logging.info("Advantage Buffer Size for player %s: '%s'", player,
len(deep_cfr_solver.advantage_buffers[player]))
logging.info("Strategy Buffer Size: '%s'",
len(deep_cfr_solver.strategy_buffer))
logging.info("Final policy loss: '%s'", policy_loss)
average_policy = policy.tabular_policy_from_callable(
game, deep_cfr_solver.action_probabilities)
conv = exploitability.nash_conv(game, average_policy)
logging.info("Deep CFR in '%s' - NashConv: %s", FLAGS.game_name, conv)
average_policy_values = expected_game_score.policy_value(
game.new_initial_state(), [average_policy] * 2)
print("Computed player 0 value: {}".format(average_policy_values[0]))
print("Expected player 0 value: {}".format(-1 / 18))
print("Computed player 1 value: {}".format(average_policy_values[1]))
print("Expected player 1 value: {}".format(1 / 18))
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/deep_cfr.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starting point for playing with the AlphaZero algorithm."""
from absl import app
from absl import flags
from open_spiel.python.algorithms.alpha_zero import alpha_zero
from open_spiel.python.algorithms.alpha_zero import model as model_lib
from open_spiel.python.utils import spawn
flags.DEFINE_string("game", "connect_four", "Name of the game.")
flags.DEFINE_integer("uct_c", 2, "UCT's exploration constant.")
flags.DEFINE_integer("max_simulations", 300, "How many simulations to run.")
flags.DEFINE_integer("train_batch_size", 2 ** 10, "Batch size for learning.")
flags.DEFINE_integer("replay_buffer_size", 2 ** 16,
"How many states to store in the replay buffer.")
flags.DEFINE_integer("replay_buffer_reuse", 3,
"How many times to learn from each state.")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate.")
flags.DEFINE_float("weight_decay", 0.0001, "L2 regularization strength.")
flags.DEFINE_float("policy_epsilon", 0.25, "What noise epsilon to use.")
flags.DEFINE_float("policy_alpha", 1, "What dirichlet noise alpha to use.")
flags.DEFINE_float("temperature", 1,
"Temperature for final move selection.")
flags.DEFINE_integer("temperature_drop", 10, # Less than AZ due to short games.
"Drop the temperature to 0 after this many moves.")
flags.DEFINE_enum("nn_model", "resnet", model_lib.Model.valid_model_types,
"What type of model should be used?.")
flags.DEFINE_integer("nn_width", 2 ** 7, "How wide should the network be.")
flags.DEFINE_integer("nn_depth", 10, "How deep should the network be.")
flags.DEFINE_string("path", None, "Where to save checkpoints.")
flags.DEFINE_integer("checkpoint_freq", 100, "Save a checkpoint every N steps.")
flags.DEFINE_integer("actors", 2, "How many actors to run.")
flags.DEFINE_integer("evaluators", 1, "How many evaluators to run.")
flags.DEFINE_integer("evaluation_window", 100,
"How many games to average results over.")
flags.DEFINE_integer(
"eval_levels", 7,
("Play evaluation games vs MCTS+Solver, with max_simulations*10^(n/2)"
" simulations for n in range(eval_levels). Default of 7 means "
"running mcts with up to 1000 times more simulations."))
flags.DEFINE_integer("max_steps", 0, "How many learn steps before exiting.")
flags.DEFINE_bool("quiet", True, "Don't show the moves as they're played.")
flags.DEFINE_bool("verbose", False, "Show the MCTS stats of possible moves.")
FLAGS = flags.FLAGS
def main(unused_argv):
config = alpha_zero.Config(
game=FLAGS.game,
path=FLAGS.path,
learning_rate=FLAGS.learning_rate,
weight_decay=FLAGS.weight_decay,
train_batch_size=FLAGS.train_batch_size,
replay_buffer_size=FLAGS.replay_buffer_size,
replay_buffer_reuse=FLAGS.replay_buffer_reuse,
max_steps=FLAGS.max_steps,
checkpoint_freq=FLAGS.checkpoint_freq,
actors=FLAGS.actors,
evaluators=FLAGS.evaluators,
uct_c=FLAGS.uct_c,
max_simulations=FLAGS.max_simulations,
policy_alpha=FLAGS.policy_alpha,
policy_epsilon=FLAGS.policy_epsilon,
temperature=FLAGS.temperature,
temperature_drop=FLAGS.temperature_drop,
evaluation_window=FLAGS.evaluation_window,
eval_levels=FLAGS.eval_levels,
nn_model=FLAGS.nn_model,
nn_width=FLAGS.nn_width,
nn_depth=FLAGS.nn_depth,
observation_shape=None,
output_size=None,
quiet=FLAGS.quiet,
)
alpha_zero.alpha_zero(config)
if __name__ == "__main__":
with spawn.main_handler():
app.run(main)
| open_spiel-master | open_spiel/python/examples/alpha_zero.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RL agents trained against fixed policy/bot as approximate responses.
This can be used to try to find exploits in policies or bots, as described in
Timbers et al. '20 (https://arxiv.org/abs/2004.09677), but only using RL
directly rather than RL+Search.
"""
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python import rl_agent
from open_spiel.python import rl_environment
from open_spiel.python import rl_tools
from open_spiel.python.algorithms import dqn
from open_spiel.python.algorithms import random_agent
from open_spiel.python.algorithms import tabular_qlearner
FLAGS = flags.FLAGS
# Training parameters
flags.DEFINE_string("checkpoint_dir", "/tmp/dqn_test",
"Directory to save/load the agent models.")
flags.DEFINE_integer(
"save_every", int(1e4),
"Episode frequency at which the DQN agent models are saved.")
flags.DEFINE_integer("num_train_episodes", int(1e6),
"Number of training episodes.")
flags.DEFINE_integer(
"eval_every", 1000,
"Episode frequency at which the DQN agents are evaluated.")
flags.DEFINE_integer("eval_episodes", 1000,
"How many episodes to run per eval.")
# DQN model hyper-parameters
flags.DEFINE_list("hidden_layers_sizes", [64, 64, 64],
"Number of hidden units in the Q-Network MLP.")
flags.DEFINE_integer("replay_buffer_capacity", int(1e5),
"Size of the replay buffer.")
flags.DEFINE_integer("batch_size", 32,
"Number of transitions to sample at each learning step.")
# Main algorithm parameters
flags.DEFINE_integer("seed", 0, "Seed to use for everything")
flags.DEFINE_integer("window_size", 30, "Size of window for rolling average")
flags.DEFINE_integer("num_players", 2, "Numebr of players")
flags.DEFINE_string("game", "leduc_poker", "Game string")
flags.DEFINE_string("exploitee", "random", "Exploitee (random | first)")
flags.DEFINE_string("learner", "qlearning", "Learner (qlearning | dqn)")
def eval_against_fixed_bots(env, trained_agents, fixed_agents, num_episodes):
"""Evaluates `trained_agents` against `random_agents` for `num_episodes`."""
num_players = len(fixed_agents)
sum_episode_rewards = np.zeros(num_players)
for player_pos in range(num_players):
cur_agents = fixed_agents[:]
cur_agents[player_pos] = trained_agents[player_pos]
for _ in range(num_episodes):
time_step = env.reset()
episode_rewards = 0
turn_num = 0
while not time_step.last():
turn_num += 1
player_id = time_step.observations["current_player"]
if env.is_turn_based:
agent_output = cur_agents[player_id].step(
time_step, is_evaluation=True)
action_list = [agent_output.action]
else:
agents_output = [
agent.step(time_step, is_evaluation=True) for agent in cur_agents
]
action_list = [agent_output.action for agent_output in agents_output]
time_step = env.step(action_list)
episode_rewards += time_step.rewards[player_pos]
sum_episode_rewards[player_pos] += episode_rewards
return sum_episode_rewards / num_episodes
def create_training_agents(num_players, sess, num_actions, info_state_size,
hidden_layers_sizes):
"""Create the agents we want to use for learning."""
if FLAGS.learner == "qlearning":
# pylint: disable=g-complex-comprehension
return [
tabular_qlearner.QLearner(
player_id=idx,
num_actions=num_actions,
# step_size=0.02,
step_size=0.1,
# epsilon_schedule=rl_tools.ConstantSchedule(0.5),
epsilon_schedule=rl_tools.LinearSchedule(0.5, 0.2, 1000000),
discount_factor=0.99) for idx in range(num_players)
]
elif FLAGS.learner == "dqn":
# pylint: disable=g-complex-comprehension
return [
dqn.DQN(
session=sess,
player_id=idx,
state_representation_size=info_state_size,
num_actions=num_actions,
discount_factor=0.99,
epsilon_start=0.5,
epsilon_end=0.1,
hidden_layers_sizes=hidden_layers_sizes,
replay_buffer_capacity=FLAGS.replay_buffer_capacity,
batch_size=FLAGS.batch_size) for idx in range(num_players)
]
else:
raise RuntimeError("Unknown learner")
class FirstActionAgent(rl_agent.AbstractAgent):
"""An example agent class."""
def __init__(self, player_id, num_actions, name="first_action_agent"):
assert num_actions > 0
self._player_id = player_id
self._num_actions = num_actions
def step(self, time_step, is_evaluation=False):
# If it is the end of the episode, don't select an action.
if time_step.last():
return
# Pick the first legal action.
cur_legal_actions = time_step.observations["legal_actions"][self._player_id]
action = cur_legal_actions[0]
probs = np.zeros(self._num_actions)
probs[action] = 1.0
return rl_agent.StepOutput(action=action, probs=probs)
class RollingAverage(object):
"""Class to store a rolling average."""
def __init__(self, size=100):
self._size = size
self._values = np.array([0] * self._size, dtype=np.float64)
self._index = 0
self._total_additions = 0
def add(self, value):
self._values[self._index] = value
self._total_additions += 1
self._index = (self._index + 1) % self._size
def mean(self):
n = min(self._size, self._total_additions)
if n == 0:
return 0
return self._values.sum() / n
def main(_):
np.random.seed(FLAGS.seed)
tf.random.set_random_seed(FLAGS.seed)
num_players = FLAGS.num_players
env = rl_environment.Environment(FLAGS.game, include_full_state=True)
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
# Exploitee agents
if FLAGS.exploitee == "first":
exploitee_agents = [
FirstActionAgent(idx, num_actions) for idx in range(num_players)
]
elif FLAGS.exploitee == "random":
exploitee_agents = [
random_agent.RandomAgent(player_id=idx, num_actions=num_actions)
# FirstActionAgent(player_id=idx, num_actions=num_actions)
for idx in range(num_players)
]
else:
raise RuntimeError("Unknown exploitee")
rolling_averager = RollingAverage(FLAGS.window_size)
rolling_averager_p0 = RollingAverage(FLAGS.window_size)
rolling_averager_p1 = RollingAverage(FLAGS.window_size)
rolling_value = 0
total_value = 0
total_value_n = 0
with tf.Session() as sess:
hidden_layers_sizes = [int(l) for l in FLAGS.hidden_layers_sizes]
# pylint: disable=g-complex-comprehension
learning_agents = create_training_agents(num_players, sess, num_actions,
info_state_size,
hidden_layers_sizes)
sess.run(tf.global_variables_initializer())
print("Starting...")
for ep in range(FLAGS.num_train_episodes):
if (ep + 1) % FLAGS.eval_every == 0:
r_mean = eval_against_fixed_bots(env, learning_agents, exploitee_agents,
FLAGS.eval_episodes)
value = r_mean[0] + r_mean[1]
rolling_averager.add(value)
rolling_averager_p0.add(r_mean[0])
rolling_averager_p1.add(r_mean[1])
rolling_value = rolling_averager.mean()
rolling_value_p0 = rolling_averager_p0.mean()
rolling_value_p1 = rolling_averager_p1.mean()
total_value += value
total_value_n += 1
avg_value = total_value / total_value_n
print(("[{}] Mean episode rewards {}, value: {}, " +
"rval: {} (p0/p1: {} / {}), aval: {}").format(
ep + 1, r_mean, value, rolling_value, rolling_value_p0,
rolling_value_p1, avg_value))
agents_round1 = [learning_agents[0], exploitee_agents[1]]
agents_round2 = [exploitee_agents[0], learning_agents[1]]
for agents in [agents_round1, agents_round2]:
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
if env.is_turn_based:
agent_output = agents[player_id].step(time_step)
action_list = [agent_output.action]
else:
agents_output = [agent.step(time_step) for agent in agents]
action_list = [
agent_output.action for agent_output in agents_output
]
time_step = env.step(action_list)
# Episode is over, step all agents with final info state.
for agent in agents:
agent.step(time_step)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/rl_response.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python spiel example."""
import logging
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
from open_spiel.python.algorithms import dqn
from open_spiel.python.algorithms import eva
from open_spiel.python.algorithms import policy_gradient
from open_spiel.python.environments import catch
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_episodes", int(1e5), "Number of train episodes.")
flags.DEFINE_integer("eval_every", int(1e3),
"How often to evaluate the policy.")
flags.DEFINE_enum("algorithm", "dqn", ["dqn", "rpg", "qpg", "rm", "eva", "a2c"],
"Algorithms to run.")
def _eval_agent(env, agent, num_episodes):
"""Evaluates `agent` for `num_episodes`."""
rewards = 0.0
for _ in range(num_episodes):
time_step = env.reset()
episode_reward = 0
while not time_step.last():
agent_output = agent.step(time_step, is_evaluation=True)
time_step = env.step([agent_output.action])
episode_reward += time_step.rewards[0]
rewards += episode_reward
return rewards / num_episodes
def main_loop(unused_arg):
"""Trains a DQN agent in the catch environment."""
env = catch.Environment()
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
train_episodes = FLAGS.num_episodes
with tf.Session() as sess:
if FLAGS.algorithm in {"rpg", "qpg", "rm", "a2c"}:
agent = policy_gradient.PolicyGradient(
sess,
player_id=0,
info_state_size=info_state_size,
num_actions=num_actions,
loss_str=FLAGS.algorithm,
hidden_layers_sizes=[128, 128],
batch_size=128,
entropy_cost=0.01,
critic_learning_rate=0.1,
pi_learning_rate=0.1,
num_critic_before_pi=3)
elif FLAGS.algorithm == "dqn":
agent = dqn.DQN(
sess,
player_id=0,
state_representation_size=info_state_size,
num_actions=num_actions,
learning_rate=0.1,
replay_buffer_capacity=10000,
hidden_layers_sizes=[32, 32],
epsilon_decay_duration=2000, # 10% total data
update_target_network_every=250)
elif FLAGS.algorithm == "eva":
agent = eva.EVAAgent(
sess,
env,
player_id=0,
state_size=info_state_size,
num_actions=num_actions,
learning_rate=1e-3,
trajectory_len=2,
num_neighbours=2,
mixing_parameter=0.95,
memory_capacity=10000,
dqn_hidden_layers=[32, 32],
epsilon_decay_duration=2000, # 10% total data
update_target_network_every=250)
else:
raise ValueError("Algorithm not implemented!")
sess.run(tf.global_variables_initializer())
# Train agent
for ep in range(train_episodes):
time_step = env.reset()
while not time_step.last():
agent_output = agent.step(time_step)
action_list = [agent_output.action]
time_step = env.step(action_list)
# Episode is over, step agent with final info state.
agent.step(time_step)
if ep and ep % FLAGS.eval_every == 0:
logging.info("-" * 80)
logging.info("Episode %s", ep)
logging.info("Loss: %s", agent.loss)
avg_return = _eval_agent(env, agent, 100)
logging.info("Avg return: %s", avg_return)
if __name__ == "__main__":
app.run(main_loop)
| open_spiel-master | open_spiel/python/examples/single_agent_catch.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NFSP agents trained on Leduc Poker."""
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import nfsp
FLAGS = flags.FLAGS
flags.DEFINE_string("game_name", "leduc_poker",
"Name of the game.")
flags.DEFINE_integer("num_players", 2,
"Number of players.")
flags.DEFINE_integer("num_train_episodes", int(20e6),
"Number of training episodes.")
flags.DEFINE_integer("eval_every", 10000,
"Episode frequency at which the agents are evaluated.")
flags.DEFINE_list("hidden_layers_sizes", [
128,
], "Number of hidden units in the avg-net and Q-net.")
flags.DEFINE_integer("replay_buffer_capacity", int(2e5),
"Size of the replay buffer.")
flags.DEFINE_integer("reservoir_buffer_capacity", int(2e6),
"Size of the reservoir buffer.")
flags.DEFINE_integer("min_buffer_size_to_learn", 1000,
"Number of samples in buffer before learning begins.")
flags.DEFINE_float("anticipatory_param", 0.1,
"Prob of using the rl best response as episode policy.")
flags.DEFINE_integer("batch_size", 128,
"Number of transitions to sample at each learning step.")
flags.DEFINE_integer("learn_every", 64,
"Number of steps between learning updates.")
flags.DEFINE_float("rl_learning_rate", 0.01,
"Learning rate for inner rl agent.")
flags.DEFINE_float("sl_learning_rate", 0.01,
"Learning rate for avg-policy sl network.")
flags.DEFINE_string("optimizer_str", "sgd",
"Optimizer, choose from 'adam', 'sgd'.")
flags.DEFINE_string("loss_str", "mse",
"Loss function, choose from 'mse', 'huber'.")
flags.DEFINE_integer("update_target_network_every", 19200,
"Number of steps between DQN target network updates.")
flags.DEFINE_float("discount_factor", 1.0,
"Discount factor for future rewards.")
flags.DEFINE_integer("epsilon_decay_duration", int(20e6),
"Number of game steps over which epsilon is decayed.")
flags.DEFINE_float("epsilon_start", 0.06,
"Starting exploration parameter.")
flags.DEFINE_float("epsilon_end", 0.001,
"Final exploration parameter.")
flags.DEFINE_string("evaluation_metric", "nash_conv",
"Choose from 'exploitability', 'nash_conv'.")
flags.DEFINE_bool("use_checkpoints", True, "Save/load neural network weights.")
flags.DEFINE_string("checkpoint_dir", "/tmp/nfsp_test",
"Directory to save/load the agent.")
class NFSPPolicies(policy.Policy):
"""Joint policy to be evaluated."""
def __init__(self, env, nfsp_policies, mode):
game = env.game
player_ids = list(range(FLAGS.num_players))
super(NFSPPolicies, self).__init__(game, player_ids)
self._policies = nfsp_policies
self._mode = mode
self._obs = {
"info_state": [None] * FLAGS.num_players,
"legal_actions": [None] * FLAGS.num_players
}
def action_probabilities(self, state, player_id=None):
cur_player = state.current_player()
legal_actions = state.legal_actions(cur_player)
self._obs["current_player"] = cur_player
self._obs["info_state"][cur_player] = (
state.information_state_tensor(cur_player))
self._obs["legal_actions"][cur_player] = legal_actions
info_state = rl_environment.TimeStep(
observations=self._obs, rewards=None, discounts=None, step_type=None)
with self._policies[cur_player].temp_mode_as(self._mode):
p = self._policies[cur_player].step(info_state, is_evaluation=True).probs
prob_dict = {action: p[action] for action in legal_actions}
return prob_dict
def main(unused_argv):
logging.info("Loading %s", FLAGS.game_name)
game = FLAGS.game_name
num_players = FLAGS.num_players
env_configs = {"players": num_players}
env = rl_environment.Environment(game, **env_configs)
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
hidden_layers_sizes = [int(l) for l in FLAGS.hidden_layers_sizes]
kwargs = {
"replay_buffer_capacity": FLAGS.replay_buffer_capacity,
"reservoir_buffer_capacity": FLAGS.reservoir_buffer_capacity,
"min_buffer_size_to_learn": FLAGS.min_buffer_size_to_learn,
"anticipatory_param": FLAGS.anticipatory_param,
"batch_size": FLAGS.batch_size,
"learn_every": FLAGS.learn_every,
"rl_learning_rate": FLAGS.rl_learning_rate,
"sl_learning_rate": FLAGS.sl_learning_rate,
"optimizer_str": FLAGS.optimizer_str,
"loss_str": FLAGS.loss_str,
"update_target_network_every": FLAGS.update_target_network_every,
"discount_factor": FLAGS.discount_factor,
"epsilon_decay_duration": FLAGS.epsilon_decay_duration,
"epsilon_start": FLAGS.epsilon_start,
"epsilon_end": FLAGS.epsilon_end,
}
with tf.Session() as sess:
# pylint: disable=g-complex-comprehension
agents = [
nfsp.NFSP(sess, idx, info_state_size, num_actions, hidden_layers_sizes,
**kwargs) for idx in range(num_players)
]
joint_avg_policy = NFSPPolicies(env, agents, nfsp.MODE.average_policy)
sess.run(tf.global_variables_initializer())
if FLAGS.use_checkpoints:
for agent in agents:
if agent.has_checkpoint(FLAGS.checkpoint_dir):
agent.restore(FLAGS.checkpoint_dir)
for ep in range(FLAGS.num_train_episodes):
if (ep + 1) % FLAGS.eval_every == 0:
losses = [agent.loss for agent in agents]
logging.info("Losses: %s", losses)
if FLAGS.evaluation_metric == "exploitability":
# Avg exploitability is implemented only for 2 players constant-sum
# games, use nash_conv otherwise.
expl = exploitability.exploitability(env.game, joint_avg_policy)
logging.info("[%s] Exploitability AVG %s", ep + 1, expl)
elif FLAGS.evaluation_metric == "nash_conv":
nash_conv = exploitability.nash_conv(env.game, joint_avg_policy)
logging.info("[%s] NashConv %s", ep + 1, nash_conv)
else:
raise ValueError(" ".join(("Invalid evaluation metric, choose from",
"'exploitability', 'nash_conv'.")))
if FLAGS.use_checkpoints:
for agent in agents:
agent.save(FLAGS.checkpoint_dir)
logging.info("_____________________________________________")
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(time_step)
action_list = [agent_output.action]
time_step = env.step(action_list)
# Episode is over, step all agents with final info state.
for agent in agents:
agent.step(time_step)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/leduc_nfsp.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark performance of games by counting the number of rollouts."""
import random
import time
from absl import app
from absl import flags
from absl import logging
import pandas as pd
from open_spiel.python import games # pylint: disable=unused-import
from open_spiel.python.mfg import games as mfg_games # pylint: disable=unused-import
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string(
"games", "*", "Benchmark only specific games (semicolon separated). "
"Use * to benchmark all (loadable) games.")
flags.DEFINE_float("time_limit", 10., "Time limit per game (in seconds).")
flags.DEFINE_integer("give_up_after", 100,
"Give up rollout when the history length is exceeded.")
flags.DEFINE_bool(
"if_simultaneous_convert_to_turn_based", False,
"If True, load any simultaneous game as turn based for the benchmark.")
def _rollout_until_timeout(game_name,
time_limit,
give_up_after,
if_simultaneous_convert_to_turn_based=False):
"""Run rollouts on the specified game until the time limit.
Args:
game_name: str
time_limit: In number of seconds
give_up_after: Cuts off trajectories longer than specified
if_simultaneous_convert_to_turn_based: if the game is simultaneous and this
boolean is true, then the game is loaded as a turn based game.
Returns:
A dict of collected statistics.
"""
game = pyspiel.load_game(game_name)
if game.get_type().dynamics == pyspiel.GameType.Dynamics.MEAN_FIELD:
raise NotImplementedError(
"Benchmark on mean field games is not available yet.")
if (game.get_type().dynamics == pyspiel.GameType.Dynamics.SIMULTANEOUS and
if_simultaneous_convert_to_turn_based):
game = pyspiel.convert_to_turn_based(game)
is_time_out = lambda t: time.time() - t > time_limit
num_rollouts = 0
num_giveups = 0
num_moves = 0
start = time.time()
while not is_time_out(start):
state = game.new_initial_state()
while not state.is_terminal():
if len(state.history()) > give_up_after:
num_giveups += 1
break
if state.is_simultaneous_node():
def random_choice(actions):
if actions:
return random.choice(actions)
return 0
actions = [
random_choice(state.legal_actions(i))
for i in range(state.num_players())
]
state.apply_actions(actions)
else:
action = random.choice(state.legal_actions(state.current_player()))
state.apply_action(action)
num_moves += 1
num_rollouts += 1
time_elapsed = time.time() - start
return dict(
game_name=game_name,
ms_per_rollouts=time_elapsed / num_rollouts * 1000,
ms_per_moves=time_elapsed / num_moves * 1000,
giveups_per_rollout=num_giveups / num_rollouts,
time_elapsed=time_elapsed
)
def main(_):
if FLAGS.games == "*":
games_list = [
game.short_name
for game in pyspiel.registered_games()
if game.default_loadable
]
else:
games_list = FLAGS.games.split(";")
logging.info("Running benchmark for %s games.", len(games_list))
logging.info("This will take approximately %d seconds.",
len(games_list) * FLAGS.time_limit)
game_stats = []
for game_name in games_list:
logging.info("Running benchmark on %s", game_name)
game_stats.append(
_rollout_until_timeout(game_name, FLAGS.time_limit, FLAGS.give_up_after,
FLAGS.if_simultaneous_convert_to_turn_based))
with pd.option_context("display.max_rows", None,
"display.max_columns", None,
"display.width", 200):
df = pd.DataFrame(game_stats)
# Use nice header names.
df.rename(columns={
"game_name": "Game",
"ms_per_rollouts": "msec/rollout",
"ms_per_moves": "msec/move",
"giveups_per_rollout": "Give ups/rollouts",
"time_elapsed": "Time elapsed [sec]"
}, inplace=True)
print("---")
print("Results for following benchmark configuration:")
print("time_limit =", FLAGS.time_limit)
print("give_up_after =", FLAGS.give_up_after)
print("---")
print(df)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/benchmark_games.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy gradient agents trained and evaluated on Kuhn Poker."""
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import policy_gradient
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_episodes", int(1e6), "Number of train episodes.")
flags.DEFINE_integer("eval_every", int(1e4), "Eval agents every x episodes.")
flags.DEFINE_enum("loss_str", "rpg", ["a2c", "rpg", "qpg", "rm"],
"PG loss to use.")
class PolicyGradientPolicies(policy.Policy):
"""Joint policy to be evaluated."""
def __init__(self, env, nfsp_policies):
game = env.game
player_ids = [0, 1]
super(PolicyGradientPolicies, self).__init__(game, player_ids)
self._policies = nfsp_policies
self._obs = {"info_state": [None, None], "legal_actions": [None, None]}
def action_probabilities(self, state, player_id=None):
cur_player = state.current_player()
legal_actions = state.legal_actions(cur_player)
self._obs["current_player"] = cur_player
self._obs["info_state"][cur_player] = (
state.information_state_tensor(cur_player))
self._obs["legal_actions"][cur_player] = legal_actions
info_state = rl_environment.TimeStep(
observations=self._obs, rewards=None, discounts=None, step_type=None)
p = self._policies[cur_player].step(info_state, is_evaluation=True).probs
prob_dict = {action: p[action] for action in legal_actions}
return prob_dict
def main(_):
game = "kuhn_poker"
num_players = 2
env_configs = {"players": num_players}
env = rl_environment.Environment(game, **env_configs)
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
with tf.Session() as sess:
# pylint: disable=g-complex-comprehension
agents = [
policy_gradient.PolicyGradient(
sess,
idx,
info_state_size,
num_actions,
loss_str=FLAGS.loss_str,
hidden_layers_sizes=(128,)) for idx in range(num_players)
]
expl_policies_avg = PolicyGradientPolicies(env, agents)
sess.run(tf.global_variables_initializer())
for ep in range(FLAGS.num_episodes):
if (ep + 1) % FLAGS.eval_every == 0:
losses = [agent.loss for agent in agents]
expl = exploitability.exploitability(env.game, expl_policies_avg)
msg = "-" * 80 + "\n"
msg += "{}: {}\n{}\n".format(ep + 1, expl, losses)
logging.info("%s", msg)
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(time_step)
action_list = [agent_output.action]
time_step = env.step(action_list)
# Episode is over, step all agents with final info state.
for agent in agents:
agent.step(time_step)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/kuhn_policy_gradient.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python Deep CFR example."""
from absl import app
from absl import flags
from absl import logging
from open_spiel.python import policy
from open_spiel.python.algorithms import expected_game_score
import pyspiel
from open_spiel.python.pytorch import deep_cfr
FLAGS = flags.FLAGS
flags.DEFINE_integer("num_iterations", 400, "Number of iterations")
flags.DEFINE_integer("num_traversals", 40, "Number of traversals/games")
flags.DEFINE_string("game_name", "kuhn_poker", "Name of the game")
def main(unused_argv):
logging.info("Loading %s", FLAGS.game_name)
game = pyspiel.load_game(FLAGS.game_name)
deep_cfr_solver = deep_cfr.DeepCFRSolver(
game,
policy_network_layers=(32, 32),
advantage_network_layers=(16, 16),
num_iterations=FLAGS.num_iterations,
num_traversals=FLAGS.num_traversals,
learning_rate=1e-3,
batch_size_advantage=None,
batch_size_strategy=None,
memory_capacity=int(1e7))
_, advantage_losses, policy_loss = deep_cfr_solver.solve()
for player, losses in advantage_losses.items():
logging.info("Advantage for player %d: %s", player,
losses[:2] + ["..."] + losses[-2:])
logging.info("Advantage Buffer Size for player %s: '%s'", player,
len(deep_cfr_solver.advantage_buffers[player]))
logging.info("Strategy Buffer Size: '%s'",
len(deep_cfr_solver.strategy_buffer))
logging.info("Final policy loss: '%s'", policy_loss)
average_policy = policy.tabular_policy_from_callable(
game, deep_cfr_solver.action_probabilities)
pyspiel_policy = policy.python_policy_to_pyspiel_policy(average_policy)
conv = pyspiel.nash_conv(game, pyspiel_policy)
logging.info("Deep CFR in '%s' - NashConv: %s", FLAGS.game_name, conv)
average_policy_values = expected_game_score.policy_value(
game.new_initial_state(), [average_policy] * 2)
logging.info("Computed player 0 value: %.2f (expected: %.2f).",
average_policy_values[0], -1 / 18)
logging.info("Computed player 1 value: %.2f (expected: %.2f).",
average_policy_values[1], 1 / 18)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/deep_cfr_pytorch.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plays a uniform random bot against the default scenarios for that game."""
import random
from absl import app
from absl import flags
from open_spiel.python.bots import scenarios
from open_spiel.python.bots import uniform_random
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("game_name", "catch", "Game to play scenarios for.")
def main(argv):
del argv
game = pyspiel.load_game(FLAGS.game_name)
# TODO(author1): Add support for bots from neural networks.
bots = [
uniform_random.UniformRandomBot(i, random)
for i in range(game.num_players())
]
scenarios.play_bot_in_scenarios(game, bots)
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/examples/play_scenarios.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.