python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy aggregator.
A joint policy is a list of `num_players` policies.
This files enables to compute mixtures of such joint-policies to get a new
policy.
"""
import copy
import itertools
from open_spiel.python import policy
def _aggregate_at_state(joint_policies, state, player):
"""Returns {action: prob} for `player` in `state` for all joint policies.
Args:
joint_policies: List of joint policies.
state: Openspiel State
player: Current Player
Returns:
{action: prob} for `player` in `state` for all joint policies.
"""
return [
joint_policy[player].action_probabilities(state, player_id=player)
for joint_policy in joint_policies
]
class _DictPolicy(policy.Policy):
"""A callable policy class."""
def __init__(self, game, policies_as_dict):
"""Constructs a policy function.
Arguments:
game: OpenSpiel game.
policies_as_dict: A list of `num_players` policy objects {action: prob}.
"""
self._game = game
self._game_type = game.get_type()
self._policies_as_dict = policies_as_dict
def _state_key(self, state, player_id=None):
"""Returns the key to use to look up this (state, player_id) pair."""
if self._game_type.provides_information_state_string:
if player_id is None:
return state.information_state_string()
else:
return state.information_state_string(player_id)
elif self._game_type.provides_observation_string:
if player_id is None:
return state.observation_string()
else:
return state.observation_string(player_id)
else:
return str(state)
@property
def policies(self):
return self._policies_as_dict
def action_probabilities(self, state, player_id=None):
"""Returns the policy for a player in a state.
Args:
state: A `pyspiel.State` object.
player_id: Optional, the player id for whom we want an action. Optional
unless this is a simultaneous state at which multiple players can act.
Returns:
A `dict` of `{action: probability}` for the specified player in the
supplied state.
"""
state_key = self._state_key(state, player_id=player_id)
if player_id is None:
player_id = state.current_player()
return self._policies_as_dict[player_id][state_key]
class JointPolicyAggregator(object):
"""Main aggregator object."""
def __init__(self, game, epsilon=1e-40):
self._game = game
self._game_type = game.get_type()
self._num_players = self._game.num_players()
self._joint_policies = None
self._policy = {} # A Dict from info-state to {action: prob}
self._epsilon = epsilon
def _state_key(self, state, player_id=None):
"""Returns the key to use to look up this (state, player) pair."""
if self._game_type.provides_information_state_string:
if player_id is None:
return state.information_state_string()
else:
return state.information_state_string(player_id)
elif self._game_type.provides_observation_string:
if player_id is None:
return state.observation()
else:
return state.observation(player_id)
else:
return str(state)
def aggregate(self, pids, joint_policies, weights):
r"""Computes the weighted-mixture of the joint policies.
Let P of shape [num_players] be the joint policy, and W some weights.
Let N be the number of policies (i.e. len(policies)).
We return the policy P' such that for all state `s`:
P[s] ~ \sum_{i=0}^{N-1} (policies[i][player(s)](s) * weights[i] *
reach_prob(s, policies[i]))
Arguments:
pids: Spiel player ids of the players the strategies belong to.
joint_policies: List of list of policies (One list per joint strategy)
weights: List of weights to attach to each joint strategy.
Returns:
A _DictPolicy, a callable object representing the policy.
"""
aggr_policies = []
self._joint_policies = joint_policies
# To do(pmuller): We should be able to do a single recursion.
for pid in pids:
aggr_policies.append(self._sub_aggregate(pid, weights))
return _DictPolicy(self._game, aggr_policies)
def _sub_aggregate(self, pid, weights):
"""Aggregate the list of policies for one player.
Arguments:
pid: Spiel player id of the player the strategies belong to.
weights: List of weights to attach to each joint strategy.
Returns:
A _DictPolicy, a callable object representing the policy.
"""
# string of state -> probs list
self._policy = {}
state = self._game.new_initial_state()
self._rec_aggregate(pid, state, copy.deepcopy(weights))
# Now normalize
for key in self._policy:
actions, probabilities = zip(*self._policy[key].items())
new_probs = [prob + self._epsilon for prob in probabilities]
denom = sum(new_probs)
for i in range(len(actions)):
self._policy[key][actions[i]] = new_probs[i] / denom
return self._policy
def _rec_aggregate(self, pid, state, my_reaches):
"""Recursively traverse game tree to compute aggregate policy."""
if state.is_terminal():
return
if state.is_simultaneous_node():
policies = _aggregate_at_state(self._joint_policies, state, pid)
state_key = self._state_key(state, pid)
self._policy[state_key] = {}
used_moves = state.legal_actions(pid)
for uid in used_moves:
new_reaches = copy.deepcopy(my_reaches)
for i in range(len(policies)):
# compute the new reach for each policy for this action
new_reaches[i] *= policies[i].get(uid, 0)
# add reach * prob(a) for this policy to the computed policy
if uid in self._policy[state_key].keys():
self._policy[state_key][uid] += new_reaches[i]
else:
self._policy[state_key][uid] = new_reaches[i]
num_players = self._game.num_players()
all_other_used_moves = []
for player in range(num_players):
if player != pid:
all_other_used_moves.append(state.legal_actions(player))
other_joint_actions = itertools.product(*all_other_used_moves)
# enumerate every possible other-agent actions for next-state
for other_joint_action in other_joint_actions:
for uid in used_moves:
new_reaches = copy.deepcopy(my_reaches)
for i in range(len(policies)):
# compute the new reach for each policy for this action
new_reaches[i] *= policies[i].get(uid, 0)
joint_action = list(
other_joint_action[:pid] + (uid,) + other_joint_action[pid:]
)
new_state = state.clone()
new_state.apply_actions(joint_action)
self._rec_aggregate(pid, new_state, new_reaches)
return
if state.is_chance_node():
for action in state.legal_actions():
new_state = state.child(action)
self._rec_aggregate(pid, new_state, my_reaches)
return
current_player = state.current_player()
state_key = self._state_key(state, current_player)
action_probabilities_list = _aggregate_at_state(self._joint_policies, state,
current_player)
if pid == current_player:
# update the current node
# will need the observation to query the policies
if state_key not in self._policy:
self._policy[state_key] = {}
for action in state.legal_actions():
new_reaches = copy.deepcopy(my_reaches)
if pid == current_player:
for idx, state_action_probs in enumerate(action_probabilities_list):
# compute the new reach for each policy for this action
new_reaches[idx] *= state_action_probs.get(action, 0)
# add reach * prob(a) for this policy to the computed policy
if action in self._policy[state_key].keys():
self._policy[state_key][action] += new_reaches[idx]
else:
self._policy[state_key][action] = new_reaches[idx]
# recurse
self._rec_aggregate(pid, state.child(action), new_reaches)
| open_spiel-master | open_spiel/python/algorithms/policy_aggregator_joint.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.policy_value."""
from absl.testing import absltest
import numpy as np
from open_spiel.python import games # pylint: disable=unused-import
from open_spiel.python import policy
from open_spiel.python.algorithms import expected_game_score
import pyspiel
class PolicyValueTest(absltest.TestCase):
def test_expected_game_score_uniform_random_kuhn_poker(self):
game = pyspiel.load_game("kuhn_poker")
uniform_policy = policy.UniformRandomPolicy(game)
uniform_policy_values = expected_game_score.policy_value(
game.new_initial_state(), [uniform_policy] * 2)
self.assertTrue(np.allclose(uniform_policy_values, [1 / 8, -1 / 8]))
def test_expected_game_score_uniform_random_iterated_prisoner_dilemma(self):
game = pyspiel.load_game(
"python_iterated_prisoners_dilemma(max_game_length=6)")
pi = policy.UniformRandomPolicy(game)
values = expected_game_score.policy_value(game.new_initial_state(), pi)
# 4*(1-0.875**6)/0.125 = 17.6385498
np.testing.assert_allclose(values, [17.6385498, 17.6385498])
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/expected_game_score_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RL agent following an uniform distribution over legal actions."""
import numpy as np
from open_spiel.python import rl_agent
class RandomAgent(rl_agent.AbstractAgent):
"""Random agent class."""
def __init__(self, player_id, num_actions, name="random_agent"):
assert num_actions > 0
self._player_id = player_id
self._num_actions = num_actions
def step(self, time_step, is_evaluation=False):
# If it is the end of the episode, don't select an action.
if time_step.last():
return
# Pick a random legal action.
cur_legal_actions = time_step.observations["legal_actions"][self._player_id]
action = np.random.choice(cur_legal_actions)
probs = np.zeros(self._num_actions)
probs[cur_legal_actions] = 1.0 / len(cur_legal_actions)
return rl_agent.StepOutput(action=action, probs=probs)
| open_spiel-master | open_spiel/python/algorithms/random_agent.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.jpsro."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python.algorithms import jpsro
import pyspiel
GAMES = (
"sheriff_2p_gabriele",
)
SWEEP_KWARGS = [
dict( # pylint: disable=g-complex-comprehension
game_name=game,
iterations=iterations,
policy_init=policy_init,
update_players_strategy=update_players_strategy,
target_equilibrium=target_equilibrium,
br_selection=br_selection,
train_meta_solver=train_meta_solver,
eval_meta_solver=eval_meta_solver,
ignore_repeats=ignore_repeats,
) for (
iterations,
game,
policy_init,
update_players_strategy,
target_equilibrium,
br_selection,
train_meta_solver,
eval_meta_solver,
ignore_repeats) in itertools.product(
[2],
GAMES,
jpsro.INIT_POLICIES,
jpsro.UPDATE_PLAYERS_STRATEGY,
jpsro.BRS,
jpsro.BR_SELECTIONS,
jpsro.META_SOLVERS,
["mwcce"],
[True, False])
]
TEST_COUNT_LIMIT = 100
interval = len(SWEEP_KWARGS) // TEST_COUNT_LIMIT
interval = interval if interval % 2 != 0 else interval + 1 # Odd interval.
SWEEP_KWARGS = SWEEP_KWARGS[::interval]
def get_game(game_name):
"""Returns the game."""
if game_name == "kuhn_poker_3p":
game_name = "kuhn_poker"
game_kwargs = {"players": int(3)}
elif game_name == "trade_comm_2p_2i":
game_name = "trade_comm"
game_kwargs = {"num_items": int(2)}
elif game_name == "sheriff_2p_gabriele":
game_name = "sheriff"
game_kwargs = {
"item_penalty": float(1.0),
"item_value": float(5.0),
"max_bribe": int(2),
"max_items": int(10),
"num_rounds": int(2),
"sheriff_penalty": float(1.0),
}
else:
raise ValueError("Unrecognised game: %s" % game_name)
return pyspiel.load_game_as_turn_based(game_name, game_kwargs)
class JPSROTest(parameterized.TestCase, absltest.TestCase):
@parameterized.parameters(*SWEEP_KWARGS)
def test_jpsro_cce(self, **kwargs):
game = get_game(kwargs["game_name"])
jpsro.run_loop(game=game, **kwargs)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/jpsro_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python implementation for Monte Carlo Counterfactual Regret Minimization."""
import numpy as np
from open_spiel.python.algorithms import mccfr
import pyspiel
class OutcomeSamplingSolver(mccfr.MCCFRSolverBase):
"""An implementation of outcome sampling MCCFR."""
def __init__(self, game):
super().__init__(game)
# This is the epsilon exploration factor. When sampling episodes, the
# updating player will sampling according to expl * uniform + (1 - expl) *
# current_policy.
self._expl = 0.6
assert game.get_type().dynamics == pyspiel.GameType.Dynamics.SEQUENTIAL, (
"MCCFR requires sequential games. If you're trying to run it " +
"on a simultaneous (or normal-form) game, please first transform it " +
"using turn_based_simultaneous_game.")
def iteration(self):
"""Performs one iteration of outcome sampling.
An iteration consists of one episode for each player as the update
player.
"""
for update_player in range(self._num_players):
state = self._game.new_initial_state()
self._episode(
state, update_player, my_reach=1.0, opp_reach=1.0, sample_reach=1.0)
def _baseline(self, state, info_state, aidx): # pylint: disable=unused-argument
# Default to vanilla outcome sampling
return 0
def _baseline_corrected_child_value(self, state, info_state, sampled_aidx,
aidx, child_value, sample_prob):
# Applies Eq. 9 of Schmid et al. '19
baseline = self._baseline(state, info_state, aidx)
if aidx == sampled_aidx:
return baseline + (child_value - baseline) / sample_prob
else:
return baseline
def _episode(self, state, update_player, my_reach, opp_reach, sample_reach):
"""Runs an episode of outcome sampling.
Args:
state: the open spiel state to run from (will be modified in-place).
update_player: the player to update regrets for (the other players
update average strategies)
my_reach: reach probability of the update player
opp_reach: reach probability of all the opponents (including chance)
sample_reach: reach probability of the sampling (behavior) policy
Returns:
util is a real value representing the utility of the update player
"""
if state.is_terminal():
return state.player_return(update_player)
if state.is_chance_node():
outcomes, probs = zip(*state.chance_outcomes())
aidx = np.random.choice(range(len(outcomes)), p=probs)
state.apply_action(outcomes[aidx])
return self._episode(state, update_player, my_reach,
probs[aidx] * opp_reach, probs[aidx] * sample_reach)
cur_player = state.current_player()
info_state_key = state.information_state_string(cur_player)
legal_actions = state.legal_actions()
num_legal_actions = len(legal_actions)
infostate_info = self._lookup_infostate_info(info_state_key,
num_legal_actions)
policy = self._regret_matching(infostate_info[mccfr.REGRET_INDEX],
num_legal_actions)
if cur_player == update_player:
uniform_policy = (
np.ones(num_legal_actions, dtype=np.float64) / num_legal_actions)
sample_policy = self._expl * uniform_policy + (1.0 - self._expl) * policy
else:
sample_policy = policy
sampled_aidx = np.random.choice(range(num_legal_actions), p=sample_policy)
state.apply_action(legal_actions[sampled_aidx])
if cur_player == update_player:
new_my_reach = my_reach * policy[sampled_aidx]
new_opp_reach = opp_reach
else:
new_my_reach = my_reach
new_opp_reach = opp_reach * policy[sampled_aidx]
new_sample_reach = sample_reach * sample_policy[sampled_aidx]
child_value = self._episode(state, update_player, new_my_reach,
new_opp_reach, new_sample_reach)
# Compute each of the child estimated values.
child_values = np.zeros(num_legal_actions, dtype=np.float64)
for aidx in range(num_legal_actions):
child_values[aidx] = self._baseline_corrected_child_value(
state, infostate_info, sampled_aidx, aidx, child_value,
sample_policy[aidx])
value_estimate = 0
for aidx in range(num_legal_actions):
value_estimate += policy[aidx] * child_values[aidx]
if cur_player == update_player:
# Now the regret and avg strategy updates.
policy = self._regret_matching(infostate_info[mccfr.REGRET_INDEX],
num_legal_actions)
# Estimate for the counterfactual value of the policy.
cf_value = value_estimate * opp_reach / sample_reach
# Update regrets.
#
# Note: different from Chapter 4 of Lanctot '13 thesis, the utilities
# coming back from the recursion are already multiplied by the players'
# tail reaches and divided by the sample tail reach. So when adding
# regrets to the table, we need only multiply by the opponent reach and
# divide by the sample reach to this point.
for aidx in range(num_legal_actions):
# Estimate for the counterfactual value of the policy replaced by always
# choosing sampled_aidx at this information state.
cf_action_value = child_values[aidx] * opp_reach / sample_reach
self._add_regret(info_state_key, aidx, cf_action_value - cf_value)
# update the average policy
for aidx in range(num_legal_actions):
increment = my_reach * policy[aidx] / sample_reach
self._add_avstrat(info_state_key, aidx, increment)
return value_estimate
| open_spiel-master | open_spiel/python/algorithms/outcome_sampling_mccfr.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.masked_softmax."""
import math
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python.algorithms import masked_softmax
# Temporarily disable TF2 behavior until the code is updated.
tf.disable_v2_behavior()
exp = math.exp # For shorter lines
_BATCH_INPUTS = np.asarray([
[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0],
[10.0, 11.0, 12.0],
[13.0, 14.0, 15.0],
[16.0, 17.0, 18.0],
])
_BATCH_MASK = np.asarray([
[1.0, 1.0, 1.0],
[1.0, 0.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
])
total_row_0 = exp(1) + exp(2) + exp(3)
total_row_1 = exp(4) + exp(6)
total_row_2 = exp(8) + exp(9)
# pyformat: disable
_BATCH_EXPECTED = np.asarray([
[exp(1) / total_row_0, exp(2) / total_row_0, exp(3) / total_row_0],
[exp(4) / total_row_1, 0, exp(6) / total_row_1],
[0, exp(8) / total_row_2, exp(9) / total_row_2],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
])
# pyformat: enable
# The following provides a 2-batch set of time-sequence policies.
# [B, T, num_actions] = 2, 3, 3
_B_T_LOGITS = np.asarray([[
[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0],
], [
[10.0, 11.0, 12.0],
[13.0, 14.0, 15.0],
[16.0, 17.0, 18.0],
]])
_B_T_MASK = np.asarray([[
[1.0, 1.0, 1.0],
[1.0, 0.0, 1.0],
[0.0, 1.0, 1.0],
], [
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
]])
_B_T_EXPECTED = np.asarray([[
[exp(1) / total_row_0,
exp(2) / total_row_0,
exp(3) / total_row_0],
[exp(4) / total_row_1, 0, exp(6) / total_row_1],
[0, exp(8) / total_row_2, exp(9) / total_row_2],
], [
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
]])
array = np.asarray
# We test over all the above examples.
_ALL_TESTS_INPUTS = [
# Non-batch inputs
(array([1., 1.]), array([1., 1.]), array([.5, .5])),
(array([1., 1.]), array([0., 1.]), array([0., 1.])),
(array([1., 1.]), array([1., 0.]), array([1., 0.])),
(array([1., 1., 1]), array([1., 1., 0]), array([.5, .5, 0.])),
# Batch-inputs
(_BATCH_INPUTS, _BATCH_MASK, _BATCH_EXPECTED),
# Batch-time inputs
(_B_T_LOGITS, _B_T_MASK, _B_T_EXPECTED),
]
class MaskedSoftmaxTest(parameterized.TestCase, absltest.TestCase):
@parameterized.parameters(_ALL_TESTS_INPUTS)
def test_np_masked_softmax(self, logits, legal_actions, expected):
np.testing.assert_array_almost_equal(
expected, masked_softmax.np_masked_softmax(logits, legal_actions))
@parameterized.parameters(_ALL_TESTS_INPUTS)
def test_tf_masked_softmax(self, np_logits, np_legal_actions, expected):
logits = tf.Variable(np_logits, tf.float32)
mask = tf.Variable(np_legal_actions, tf.float32)
policy = masked_softmax.tf_masked_softmax(logits, mask)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
np_policy = sess.run(policy)
np.testing.assert_array_almost_equal(expected, np_policy)
def test_masked_softmax_on_all_invalid_moves(self):
# If all actions are illegal, the behavior is undefined (it can be nan
# or can be 0. We add this test to document this behavior and know if we
# change it.
np_logits = np.asarray([[
[1.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
]])
logits = tf.Variable(np_logits, tf.float32)
np_mask = np.asarray([[
[1.0, 1.0, 1.0],
[1.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
]])
mask = tf.Variable(np_mask, tf.float32)
expected = np.asarray([[
[1 / 3, 1 / 3, 1 / 3],
[1 / 2, 0.0, 1 / 2],
[np.nan, np.nan, np.nan],
]])
policy = masked_softmax.tf_masked_softmax(logits, mask)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
np_policy = sess.run(policy)
np.testing.assert_array_almost_equal(expected, np_policy)
# Numpy behaves similarly.
np.testing.assert_array_almost_equal(
expected, masked_softmax.np_masked_softmax(np_logits, np_mask))
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/masked_softmax_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import tensorflow.compat.v1 as tf
from open_spiel.python.algorithms import neurd
import pyspiel
# Temporarily disable TF2 behavior while the code is not updated.
tf.disable_v2_behavior()
tf.enable_eager_execution()
_GAME = pyspiel.load_game('kuhn_poker')
def _new_model():
return neurd.DeepNeurdModel(
_GAME,
num_hidden_layers=1,
num_hidden_units=13,
num_hidden_factors=1,
use_skip_connections=True,
autoencode=True)
class NeurdTest(tf.test.TestCase):
def setUp(self):
super(NeurdTest, self).setUp()
tf.set_random_seed(42)
def test_neurd(self):
num_iterations = 2
models = [_new_model() for _ in range(_GAME.num_players())]
solver = neurd.CounterfactualNeurdSolver(_GAME, models)
average_policy = solver.average_policy()
self.assertGreater(pyspiel.nash_conv(_GAME, average_policy), 0.91)
@tf.function
def _train(model, data):
neurd.train(
model=model,
data=data,
batch_size=12,
step_size=10.0,
autoencoder_loss=tf.losses.huber_loss)
for _ in range(num_iterations):
solver.evaluate_and_update_policy(_train)
average_policy = solver.average_policy()
self.assertLess(pyspiel.nash_conv(_GAME, average_policy), 0.91)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/neurd_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Tests for open_spiel.python.algorithms.playthrough."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import generate_playthrough
class PlaythroughTest(absltest.TestCase):
def test_runs(self):
result = generate_playthrough.playthrough(
"tic_tac_toe", action_sequence=[0, 1, 2, 3, 4, 5, 6, 7, 8])
self.assertNotEmpty(result)
def test_format_tensor_1d(self):
lines = generate_playthrough._format_tensor(np.array((1, 0, 1, 1)), "x")
self.assertEqual(lines, ["x: ββ―ββ"])
def test_format_tensor_2d(self):
lines = generate_playthrough._format_tensor(np.array(((1, 0), (1, 1))), "x")
self.assertEqual(lines, [
"x: ββ―",
" ββ",
])
def test_format_tensor_3d(self):
lines = []
tensor = np.array((
((1, 0), (1, 1)),
((0, 0), (1, 0)),
((0, 1), (1, 0)),
))
lines = generate_playthrough._format_tensor(tensor, "x")
self.assertEqual(lines, [
"x:",
"ββ― β―β― β―β",
"ββ ββ― ββ―",
])
def test_format_tensor_3d_linewrap(self):
tensor = np.array((
((1, 0), (1, 1)),
((0, 0), (1, 0)),
((0, 1), (1, 0)),
))
lines = generate_playthrough._format_tensor(tensor, "x", max_cols=9)
self.assertEqual(lines, [
"x:",
"ββ― β―β―",
"ββ ββ―",
"",
"β―β",
"ββ―",
])
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/generate_playthrough_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.cfr."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import external_sampling_mccfr
import pyspiel
SEED = 39823987
class ExternalSamplingMCCFRTest(absltest.TestCase):
def test_external_sampling_leduc_2p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("leduc_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Leduc2P, conv = {}".format(conv))
self.assertLess(conv, 5)
# ensure that to_tabular() works on the returned policy and
# the tabular policy is equivalent
tabular_policy = es_solver.average_policy().to_tabular()
conv2 = exploitability.nash_conv(game, tabular_policy)
self.assertEqual(conv, conv2)
def test_external_sampling_leduc_2p_full(self):
np.random.seed(SEED)
game = pyspiel.load_game("leduc_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.FULL)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Leduc2P, conv = {}".format(conv))
self.assertLess(conv, 5)
def test_external_sampling_kuhn_2p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn2P, conv = {}".format(conv))
self.assertLess(conv, 1)
def test_external_sampling_kuhn_2p_full(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.FULL)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn2P, conv = {}".format(conv))
self.assertLess(conv, 1)
# Liar's dice takes too long, so disable this test. Leave code for reference.
# pylint: disable=g-unreachable-test-method
def disabled_test_external_sampling_liars_dice_2p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("liars_dice")
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(1):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Liar's dice, conv = {}".format(conv))
self.assertLess(conv, 2)
def test_external_sampling_kuhn_3p_simple(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker", {"players": 3})
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.SIMPLE)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn3P, conv = {}".format(conv))
self.assertLess(conv, 2)
def test_external_sampling_kuhn_3p_full(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker", {"players": 3})
es_solver = external_sampling_mccfr.ExternalSamplingSolver(
game, external_sampling_mccfr.AverageType.FULL)
for _ in range(10):
es_solver.iteration()
conv = exploitability.nash_conv(game, es_solver.average_policy())
print("Kuhn3P, conv = {}".format(conv))
self.assertLess(conv, 2)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/external_sampling_mccfr_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements ResponseGraphUCB algorithm from the below paper.
"Multiagent Evaluation under Incomplete Information" (Rowland et al., 2019)
See https://arxiv.org/abs/1909.09849 for details.
"""
import copy
import functools
import itertools
import operator
import random
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import scipy.stats
class ResponseGraphUCB(object):
"""ResponseGraphUCB sampler class."""
def __init__(self,
game,
exploration_strategy='uniform-exhaustive',
confidence_method='ucb-standard',
delta=0.01,
ucb_eps=0,
per_payoff_confidence=True,
time_dependent_delta=False):
"""Initializes ResponseGraphUCB instance.
Assumes that all payoffs fall in the interval [0,1].
Args:
game: an instance of the BernoulliGameSampler class.
exploration_strategy: string specifying the exploration strategy.
confidence_method: string specifying the confidence method.
delta: float specifying the UCB delta parameter.
ucb_eps: float specifying the UCB epsilon parameter.
per_payoff_confidence: bool specifying whether confidence level applies
on a per-payoff basis, or to all payoffs simultaneously.
time_dependent_delta: bool specifying whether the confidence parameter
varies with the number of interactions so that a union bound holds.
"""
self.exploration_strategy = exploration_strategy
self.confidence_method = confidence_method
self.ucb_eps = ucb_eps
self.G = game # pylint: disable=invalid-name
self.per_payoff_confidence = per_payoff_confidence
self.time_dependent_delta = time_dependent_delta
if self.per_payoff_confidence:
self._delta = delta
else:
self._delta = delta / (
self.G.n_players *
functools.reduce(operator.mul, self.G.strategy_spaces, 1))
# Compute the graph
self.V = list( # pylint: disable=invalid-name
itertools.product(*[range(smax) for smax in self.G.strategy_spaces]))
self.E = [] # pylint: disable=invalid-name
for v in self.V:
adj_strats = [
list(range(v[k] + 1, self.G.strategy_spaces[k]))
for k in range(self.G.n_players)
]
for k in range(self.G.n_players):
for new_s in adj_strats[k]:
second_vertex = list(v)
second_vertex[k] = new_s
second_vertex = tuple(second_vertex)
self.E.append((v, second_vertex))
self.count_history = {v: [] for v in self.V}
self.total_interactions = 0
def delta(self, k, s):
"""Returns the confidence parameter for a given player and profile."""
if not self.time_dependent_delta:
return self._delta
else:
return self._delta * (6 / (np.pi**2 * self.count[k][s] **2))
def initialise_mean_and_count(self):
"""Initializes means and counts for all response graph profiles."""
self.mu = [
np.zeros(tuple(self.G.strategy_spaces)) for _ in range(self.G.n_players)
]
self.count = [
np.zeros(tuple(self.G.strategy_spaces)) for _ in range(self.G.n_players)
]
def update_mean_and_count(self, strat_profile, game_outcome):
"""Updates means and counts for strat_profile given game_outcome."""
self.total_interactions += 1
for k in range(self.G.n_players):
self.mu[k][strat_profile] *= self.count[k][strat_profile]
self.mu[k][strat_profile] += game_outcome[k]
self.count[k][strat_profile] += 1
self.mu[k][strat_profile] /= self.count[k][strat_profile]
for s in self.V:
self.count_history[s].append(self.count[0][s] /
float(self.total_interactions))
def _find_focal_coord(self, s1, s2):
num_deviations = tuple(s1[l] != s2[l] for l in range(len(s1)))
assert np.sum(num_deviations) == 1, ('Invalid profile pair s1, s2: ({},{}).'
'Exactly one player should'
'deviate!'.format(s1, s2))
return np.argmax(num_deviations)
def _initialise_queue_uniform(self):
self.remaining_edges = copy.deepcopy(self.E)
def _add_to_queue_uniform(self, edges_removed):
"""Adds edge to sampling queue using uniform sampling."""
for e in edges_removed:
self.remaining_edges.remove(e)
self.profile_queue.append(
random.choice(random.choice(self.remaining_edges)))
def _initialise_queue_uniform_exhaustive(self):
self.edge_order = copy.deepcopy(self.E)
random.shuffle(self.edge_order)
def _add_to_queue_uniform_exhaustive(self, edges_removed):
"""Adds edge to sampling queue using uniform-exhausitive sampling."""
for e in edges_removed:
self.edge_order.remove(e)
self.profile_queue.append(random.choice(self.edge_order[0]))
def _initialise_queue_valence_weighted(self):
self.vertex_valences = {
v: np.sum(self.G.strategy_spaces) - self.G.n_players for v in self.V
}
self.sum_valences = sum(self.vertex_valences.values())
def _add_to_queue_valence_weighted(self, edges_removed):
"""Adds edge to sampling queue using valence-weighted sampling."""
# Deal with removed edges
for e in edges_removed:
for s in e:
self.vertex_valences[s] -= 1
self.sum_valences -= 1
# Calculate probabilities
probs = np.array([self.vertex_valences[v]**2 for v in self.V])
probs = probs / np.sum(probs)
s_ix = np.random.choice(np.arange(len(self.V)), p=probs)
self.profile_queue.append(self.V[s_ix])
def _initialise_queue_count_weighted(self):
# Keep track of which vertices have non-zero valence in graph
self.vertex_valences = {
v: np.sum(self.G.strategy_spaces) - self.G.n_players for v in self.V
}
self.sum_valences = sum(self.vertex_valences.values())
def _add_to_queue_count_weighted(self, edges_removed):
"""Adds edge to sampling queue using count-weighted sampling."""
# Update vertex valences
for e in edges_removed:
for s in e:
self.vertex_valences[s] -= 1
self.sum_valences -= 1
# Check counts
eligible_vertices = {
v: self.count[0][v] for v in self.V if self.vertex_valences[v] != 0
}
strat = min(eligible_vertices, key=eligible_vertices.get)
self.profile_queue.append(strat)
def initialise_queue(self):
"""Initializes sampling queue."""
self.edges_remaining = copy.deepcopy(self.E)
if self.exploration_strategy == 'uniform':
self._initialise_queue_uniform()
elif self.exploration_strategy == 'uniform-exhaustive':
self._initialise_queue_uniform_exhaustive()
elif self.exploration_strategy == 'valence-weighted':
self._initialise_queue_valence_weighted()
elif self.exploration_strategy == 'count-weighted':
self._initialise_queue_count_weighted()
else:
raise ValueError('Did not recognise exploration strategy: {}'.format(
self.exploration_strategy))
self.profile_queue = []
def add_to_queue(self, removed):
"""Update the sampling queue and the list of resolved edges.
Args:
removed: the list of edges resolved in the previous round, which should be
removed from the sampling list in subsequent rounds.
"""
if self.exploration_strategy == 'uniform':
self._add_to_queue_uniform(removed)
elif self.exploration_strategy == 'uniform-exhaustive':
self._add_to_queue_uniform_exhaustive(removed)
elif self.exploration_strategy == 'valence-weighted':
self._add_to_queue_valence_weighted(removed)
elif self.exploration_strategy == 'count-weighted':
self._add_to_queue_count_weighted(removed)
else:
raise ValueError('Did not recognise exploration strategy: {}'.format(
self.exploration_strategy))
def evaluate_strategy_profile(self, yield_outcomes=False):
"""Evaluates a strategy profile on the sampling queue.
Specifically, this:
1. Removes a strategy profile from the queue.
2. Evaluates it.
3. Updates internal statistics.
4. Adjusts list of strategy profiles whose statistics have been updated
since last confidence bound check.
Args:
yield_outcomes: set True to yield the outcomes as well.
Yields:
s: profile evaluated.
game_outcome: outcomes (player payoffs) for profile s.
"""
if self.profile_queue:
s = self.profile_queue.pop(0)
if s not in self.active_strategy_profiles:
self.active_strategy_profiles.append(s)
game_outcome = self.G.observe_result(s)
if yield_outcomes:
yield s, game_outcome
self.update_mean_and_count(s, game_outcome)
def _ucb_standard_factor(self, s, k):
return np.sqrt(np.log(2 / self.delta(k, s)) / (2 * self.count[k][s]))
def _bernoulli_upper(self, p, n, delta):
"""Returns upper confidence bound for proportion p successes of n trials.
Uses exact Clopper-Pearson interval.
Args:
p: proportion of successes.
n: number of trials.
delta: confidence parameter.
"""
if p > 1 - 1e-6:
return 1.
else:
upper = scipy.stats.beta.ppf(1. - delta / 2, p * n + 1, n - p * n)
return upper
def _bernoulli_lower(self, p, n, delta):
"""Returns lower confidence bound for proportion p successes of n trials.
Uses exact Clopper-Pearson interval.
Args:
p: proportion of successes.
n: number of trials.
delta: confidence parameter.
"""
if p < 1e-6:
return 0.
else:
lower = scipy.stats.beta.ppf(delta / 2, p * n, n - p * n + 1)
return lower
def _ucb(self, s, k):
"""Returns k-th player's payoff upper-confidence-bound given profile s."""
if self.confidence_method == 'ucb-standard':
ucb_factor = self._ucb_standard_factor(s, k)
return self.mu[k][s] + ucb_factor
elif self.confidence_method == 'ucb-standard-relaxed':
ucb_factor = self._ucb_standard_factor(s, k) - self.ucb_eps
return self.mu[k][s] + ucb_factor
elif self.confidence_method == 'clopper-pearson-ucb':
return self._bernoulli_upper(self.mu[k][s], self.count[k][s],
self.delta(k, s))
elif self.confidence_method == 'clopper-pearson-ucb-relaxed':
return self._bernoulli_upper(self.mu[k][s], self.count[k][s],
self.delta(k, s)) - self.ucb_eps
else:
raise ValueError('Did not recognise confidence method {}'.format(
self.confidence_method))
def _lcb(self, s, k):
"""Returns k-th player's payoff lower-confidence-bound given profile s."""
if self.confidence_method == 'ucb-standard':
ucb_factor = self._ucb_standard_factor(s, k)
return self.mu[k][s] - ucb_factor
elif self.confidence_method == 'ucb-standard-relaxed':
ucb_factor = self._ucb_standard_factor(s, k) + self.ucb_eps
return self.mu[k][s] - ucb_factor
elif self.confidence_method == 'clopper-pearson-ucb':
return self._bernoulli_lower(self.mu[k][s], self.count[k][s],
self.delta(k, s))
elif self.confidence_method == 'clopper-pearson-ucb-relaxed':
return self._bernoulli_lower(self.mu[k][s], self.count[k][s],
self.delta(k, s)) + self.ucb_eps
else:
raise ValueError('Did not recognise confidence method {}'.format(
self.confidence_method))
def ucb_check(self, e):
"""Conducts a UCB check on response graph edge e.
Specifically, given edge e connecting two strategy profiles s1 and s2, this:
1. Determines the dominating strategy.
2. Checks whether the payoff_UCB(worse_strategy) is less than
the payoff_LCB of the better strategy; if this is true, the confidence
intervals are disjoint, and the edge e is considered 'resolved'.
Args:
e: response graph edge.
Returns:
A bool indicating whether the edge is resolved,
and also a tuple specifying the worse and better strategies.
"""
s1, s2 = e
k = self._find_focal_coord(s1, s2)
if self.mu[k][s1] > self.mu[k][s2]:
better_strat = s1
worse_strat = s2
else:
better_strat = s2
worse_strat = s1
ucb = self._ucb(worse_strat, k)
lcb = self._lcb(better_strat, k)
return (ucb < lcb), (worse_strat, better_strat)
def check_confidence(self):
"""Returns the edges that are 'resolved' given a confidence bound check."""
edges_to_check = []
for e in self.edges_remaining:
for s in self.active_strategy_profiles:
if s in e:
if e not in edges_to_check:
edges_to_check.append(e)
edges_removed = []
for e in edges_to_check:
removed, ordered_edge = self.ucb_check(e)
if removed:
edges_removed.append(e)
self.edges_remaining.remove(e)
self.directed_edges.append(ordered_edge)
self.active_strategy_profiles = []
return edges_removed
def real_edge_direction(self, e):
s1, s2 = e
k = self._find_focal_coord(s1, s2)
if self.G.means[k][s1] > self.G.means[k][s2]:
return (s2, s1)
else:
return (s1, s2)
def construct_real_graph(self):
directed_edges = []
for e in self.E:
ordered_edge = self.real_edge_direction(e)
directed_edges.append(ordered_edge)
return self._construct_digraph(directed_edges)
def compute_graph(self):
for e in self.E:
s1, s2 = e[0], e[1]
k = self._find_focal_coord(s1, s2)
if self.mu[k][s1] > self.mu[k][s2]:
directed_edge = (s2, s1)
else:
directed_edge = (s1, s2)
if directed_edge not in self.directed_edges:
self.directed_edges.append(directed_edge)
def forced_exploration(self):
for v in self.V:
game_outcome = self.G.observe_result(v)
self.update_mean_and_count(v, game_outcome)
def run(self, verbose=True, max_total_iterations=50000):
"""Runs the ResponseGraphUCB algorithm."""
self.verbose = verbose
# Upper bounds on number of evaluations
self.max_total_iterations = max_total_iterations
self.initialise_mean_and_count()
self.directed_edges = []
self.active_strategy_profiles = []
self.initialise_queue()
# Forced initial exploration
self.forced_exploration()
# Keep evaluating nodes until check method declares that we're finished
iterations = 0
edges_resolved_this_round = []
while self.total_interactions < max_total_iterations:
# Add nodes to queue
self.add_to_queue(removed=edges_resolved_this_round)
# Evaluate the nodes and log results
for v, _ in self.evaluate_strategy_profile():
if verbose:
print(v)
# Recompute confidence bounds, eliminate, stop etc.
edges_resolved_this_round = self.check_confidence()
if not self.edges_remaining:
break
iterations += 1
# Fill in missing edges if max iters reached without resolving all edges
self.compute_graph()
# Compute objects to be returned
if verbose:
total_steps = self.compute_total_steps()
print('\nTotal steps taken = {}'.format(total_steps))
results = {}
results['interactions'] = int(np.sum(self.count[0]))
graph = self._construct_digraph(self.directed_edges)
results['graph'] = graph
return results
def compute_total_steps(self):
return int(np.sum(self.count[0]))
def _construct_digraph(self, edges):
graph = nx.DiGraph()
graph.add_nodes_from(self.V)
for e in edges:
graph.add_edge(e[0], e[1])
return graph
def _plot_errorbars_2x2x2(self, x, y, xerr, yerr, fmt):
"""Plots ResponseGraph with error bars for a 2-player 2x2 game."""
# plt.errorbar does not accept list of colors, so plot twice
for i_strat in [0, 1]:
if xerr[i_strat] is None:
plt.errorbar(
x=x[i_strat],
y=y[i_strat],
yerr=np.reshape(yerr[:, i_strat], (2, 1)),
markerfacecolor='b',
ecolor='b',
fmt=fmt,
zorder=0)
elif yerr[i_strat] is None:
plt.errorbar(
x=x[i_strat],
y=y[i_strat],
xerr=np.reshape(xerr[:, i_strat], (2, 1)),
markerfacecolor='b',
ecolor='b',
fmt=fmt,
zorder=0)
else:
raise ValueError()
def visualise_2x2x2(self, real_values, graph):
"""Plots summary of ResponseGraphUCB for a 2-player 2x2 game."""
_, axes = plt.subplots(3, 3, figsize=(10, 10),
gridspec_kw={'width_ratios': [1, 2, 1],
'height_ratios': [1, 2, 1]})
axes[0, 0].axis('off')
axes[0, 2].axis('off')
axes[2, 0].axis('off')
axes[2, 2].axis('off')
# (0,0) vs. (0,1)
plt.sca(axes[0, 1])
s1 = (0, 0)
s2 = (0, 1)
self._plot_errorbars_2x2x2(
x=[0, 1],
y=[self.mu[1][s1], self.mu[1][s2]],
xerr=[None, None],
yerr=np.array([[self.mu[1][s1] - self._lcb(s1, 1),
self.mu[1][s2] - self._lcb(s2, 1)],
[self._ucb(s1, 1) - self.mu[1][s1],
self._ucb(s2, 1) - self.mu[1][s2]]]),
fmt='o')
plt.scatter([0, 1], [real_values[1, 0, 0], real_values[1, 0, 1]],
color='red',
zorder=1)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.tick_params(axis='both', which='minor', labelsize=14)
plt.xticks([])
plt.yticks([0, 0.5, 1])
plt.gca().set_yticklabels(['0', '', '1'])
plt.gca().yaxis.set_ticks_position('left')
plt.gca().grid(True)
plt.ylim(0, 1)
# (0,0) vs. (1,0)
plt.sca(axes[1, 0])
s1 = (1, 0)
s2 = (0, 0)
self._plot_errorbars_2x2x2(
x=[self.mu[0][s1], self.mu[0][s2]],
y=[0, 1],
xerr=np.array([[self.mu[0][s1] - self._lcb(s1, 0),
self.mu[0][s2] - self._lcb(s2, 0)],
[self._ucb(s1, 0) - self.mu[0][s1],
self._ucb(s2, 0) - self.mu[0][s2]]]),
yerr=[None, None],
fmt='o')
plt.scatter([real_values[0, 1, 0], real_values[0, 0, 0]], [0, 1],
color='red',
zorder=1)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.tick_params(axis='both', which='minor', labelsize=14)
plt.xticks([0, 0.5, 1])
plt.gca().set_xticklabels(['0', '', '1'])
plt.gca().xaxis.set_ticks_position('bottom')
plt.gca().grid(True)
plt.yticks([])
plt.xlim(0, 1)
# (0,1) vs. (1,1)
plt.sca(axes[1, 2])
s1 = (1, 1)
s2 = (0, 1)
self._plot_errorbars_2x2x2(
x=[self.mu[0][s1], self.mu[0][s2]],
y=[0, 1],
xerr=np.array([[self.mu[0][s1] - self._lcb(s1, 0),
self.mu[0][s2] - self._lcb(s2, 0)],
[self._ucb(s1, 0) - self.mu[0][s1],
self._ucb(s2, 0) - self.mu[0][s2]]]),
yerr=[None, None],
fmt='o')
plt.scatter([real_values[0, 1, 1], real_values[0, 0, 1]], [0, 1],
color='red',
zorder=1)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.tick_params(axis='both', which='minor', labelsize=14)
plt.xticks([0, 0.5, 1])
plt.gca().set_xticklabels(['0', '', '1'])
plt.gca().xaxis.set_ticks_position('top')
plt.yticks([])
plt.gca().grid(True)
plt.xlim(0, 1)
# (1,0) vs. (1,1)
plt.sca(axes[2, 1])
s1 = (1, 0)
s2 = (1, 1)
self._plot_errorbars_2x2x2(
x=[0, 1],
y=[self.mu[1][s1], self.mu[1][s2]],
xerr=[None, None],
yerr=np.array([[self.mu[1][s1] - self._lcb(s1, 1),
self.mu[1][s2] - self._lcb(s2, 1)],
[self._ucb(s1, 1) - self.mu[1][s1],
self._ucb(s2, 1) - self.mu[1][s2]]]),
fmt='o')
plt.scatter([0, 1], [real_values[1, 1, 0], real_values[1, 1, 1]],
color='red',
zorder=1)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.tick_params(axis='both', which='minor', labelsize=14)
plt.xticks([])
plt.yticks([0, 0.5, 1])
plt.gca().set_yticklabels(['0', '', '1'])
plt.gca().yaxis.set_ticks_position('right')
plt.gca().grid(True)
plt.ylim(0, 1)
self.plot_graph(graph, subplot=True, axes=axes) # Chart in the middle
def plot_graph(self, graph, subplot=False, axes=None):
"""Plots the response graph."""
if subplot:
plt.sca(axes[1, 1])
axes[1, 1].axis('off')
else:
plt.figure(figsize=(5, 5))
if len(graph.nodes) == 4:
pos = {(0, 0): [0, 1], (0, 1): [1, 1], (1, 0): [0, 0], (1, 1): [1, 0]}
else:
pos = nx.circular_layout(graph)
nx.draw_networkx_nodes(
graph, pos, node_size=1800, node_color='w', edgecolors='k')
nx.draw_networkx_edges(
graph,
pos,
node_size=1800,
edge_color='k',
arrowstyle='->',
arrowsize=10,
width=3)
nx.draw_networkx_labels(self.G, pos, {x: x for x in self.V}, font_size=14)
def visualise_count_history(self, figsize=(5, 2)):
"""Plots the sampling count history for each strategy profile."""
plt.figure(figsize=figsize)
data = []
labels = []
for v in self.V:
print(v)
labels.append(v)
data.append(self.count_history[v])
pal = plt.get_cmap('Dark2').colors
plt.stackplot(
np.arange(1, self.total_interactions + 1),
np.array(data),
labels=labels,
colors=pal)
plt.ylim(top=1, bottom=0)
plt.xlabel('Interactions')
plt.ylabel('Proportions')
# Shrink current axis
ax = plt.gca()
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.67, box.height])
plt.xlim(1, self.total_interactions)
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), ncol=1)
| open_spiel-master | open_spiel/python/algorithms/response_graph_ucb.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that gambit export can be imported back."""
import collections
import tempfile
from absl import app
from absl.testing import absltest
from open_spiel.python.algorithms.gambit import export_gambit
import pyspiel
class GambitTest(absltest.TestCase):
def test_gambit_export_can_be_imported(self):
game_list = [
"kuhn_poker",
"kuhn_poker(players=3)",
]
for game_name in game_list:
game_orig = pyspiel.load_game(game_name)
gbt = export_gambit(game_orig)
f = tempfile.NamedTemporaryFile("w", delete=False)
f.write(gbt)
f.flush()
game_efg = pyspiel.load_game("efg_game(filename=%s)" % f.name)
f.close()
self._infoset_table_orig = collections.defaultdict(lambda: [])
self._infoset_table_efg = collections.defaultdict(lambda: [])
self._recursive_check(game_orig.new_initial_state(),
game_efg.new_initial_state())
self._check_infoset_isomorphism(self._infoset_table_orig,
self._infoset_table_efg)
def _recursive_check(self, g, h):
self.assertEqual(g.current_player(), h.current_player())
self.assertEqual(g.is_chance_node(), h.is_chance_node())
self.assertEqual(g.is_terminal(), h.is_terminal())
if g.is_terminal():
self.assertEqual(g.returns(), h.returns())
return
if g.is_chance_node():
self.assertEqual(g.chance_outcomes(), h.chance_outcomes())
else:
self.assertEqual(g.legal_actions(), h.legal_actions())
self._infoset_table_orig[g.information_state_string()].append(g.history())
self._infoset_table_efg[h.information_state_string()].append(h.history())
for a, b in zip(g.legal_actions(), h.legal_actions()):
self._recursive_check(g.child(a), h.child(b))
def _check_infoset_isomorphism(self, a, b):
a_prime = []
b_prime = []
for vs in a.values():
a_prime.append(sorted([str(v) for v in vs]))
for vs in b.values():
b_prime.append(sorted([str(v) for v in vs]))
self.assertCountEqual(a_prime, b_prime)
def main(_):
absltest.main()
if __name__ == "__main__":
# Necessary to run main via app.run for internal tests.
app.run(main)
| open_spiel-master | open_spiel/python/algorithms/gambit_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python implementation for Monte Carlo Counterfactual Regret Minimization."""
import enum
import numpy as np
from open_spiel.python.algorithms import mccfr
import pyspiel
class AverageType(enum.Enum):
SIMPLE = 0
FULL = 1
class ExternalSamplingSolver(mccfr.MCCFRSolverBase):
"""An implementation of external sampling MCCFR."""
def __init__(self, game, average_type=AverageType.SIMPLE):
super().__init__(game)
# How to average the strategy. The 'simple' type does the averaging for
# player i + 1 mod num_players on player i's regret update pass; in two
# players this corresponds to the standard implementation (updating the
# average policy at opponent nodes). In n>2 players, this can be a problem
# for several reasons: first, it does not compute the estimate as described
# by the (unbiased) stochastically-weighted averaging in chapter 4 of
# Lanctot 2013 commonly used in MCCFR because the denominator (important
# sampling correction) should include all the other sampled players as well
# so the sample reach no longer cancels with reach of the player updating
# their average policy. Second, if one player assigns zero probability to an
# action (leading to a subtree), the average policy of a different player in
# that subtree is no longer updated. Hence, the full averaging does not
# update the average policy in the regret passes but does a separate pass to
# update the average policy. Nevertheless, we set the simple type as the
# default because it is faster, seems to work better empirically, and it
# matches what was done in Pluribus (Brown and Sandholm. Superhuman AI for
# multiplayer poker. Science, 11, 2019).
self._average_type = average_type
assert game.get_type().dynamics == pyspiel.GameType.Dynamics.SEQUENTIAL, (
"MCCFR requires sequential games. If you're trying to run it " +
'on a simultaneous (or normal-form) game, please first transform it ' +
'using turn_based_simultaneous_game.')
def iteration(self):
"""Performs one iteration of external sampling.
An iteration consists of one episode for each player as the update
player.
"""
for player in range(self._num_players):
self._update_regrets(self._game.new_initial_state(), player)
if self._average_type == AverageType.FULL:
reach_probs = np.ones(self._num_players, dtype=np.float64)
self._full_update_average(self._game.new_initial_state(), reach_probs)
def _full_update_average(self, state, reach_probs):
"""Performs a full update average.
Args:
state: the open spiel state to run from
reach_probs: array containing the probability of reaching the state
from the players point of view
"""
if state.is_terminal():
return
if state.is_chance_node():
for action in state.legal_actions():
self._full_update_average(state.child(action), reach_probs)
return
# If all the probs are zero, no need to keep going.
sum_reach_probs = np.sum(reach_probs)
if sum_reach_probs == 0:
return
cur_player = state.current_player()
info_state_key = state.information_state_string(cur_player)
legal_actions = state.legal_actions()
num_legal_actions = len(legal_actions)
infostate_info = self._lookup_infostate_info(info_state_key,
num_legal_actions)
policy = self._regret_matching(infostate_info[mccfr.REGRET_INDEX],
num_legal_actions)
for action_idx in range(num_legal_actions):
new_reach_probs = np.copy(reach_probs)
new_reach_probs[cur_player] *= policy[action_idx]
self._full_update_average(
state.child(legal_actions[action_idx]), new_reach_probs)
# Now update the cumulative policy
for action_idx in range(num_legal_actions):
self._add_avstrat(info_state_key, action_idx,
reach_probs[cur_player] * policy[action_idx])
def _update_regrets(self, state, player):
"""Runs an episode of external sampling.
Args:
state: the open spiel state to run from
player: the player to update regrets for
Returns:
value: is the value of the state in the game
obtained as the weighted average of the values
of the children
"""
if state.is_terminal():
return state.player_return(player)
if state.is_chance_node():
outcomes, probs = zip(*state.chance_outcomes())
outcome = np.random.choice(outcomes, p=probs)
return self._update_regrets(state.child(outcome), player)
cur_player = state.current_player()
info_state_key = state.information_state_string(cur_player)
legal_actions = state.legal_actions()
num_legal_actions = len(legal_actions)
infostate_info = self._lookup_infostate_info(info_state_key,
num_legal_actions)
policy = self._regret_matching(infostate_info[mccfr.REGRET_INDEX],
num_legal_actions)
value = 0
child_values = np.zeros(num_legal_actions, dtype=np.float64)
if cur_player != player:
# Sample at opponent node
action_idx = np.random.choice(np.arange(num_legal_actions), p=policy)
value = self._update_regrets(
state.child(legal_actions[action_idx]), player)
else:
# Walk over all actions at my node
for action_idx in range(num_legal_actions):
child_values[action_idx] = self._update_regrets(
state.child(legal_actions[action_idx]), player)
value += policy[action_idx] * child_values[action_idx]
if cur_player == player:
# Update regrets.
for action_idx in range(num_legal_actions):
self._add_regret(info_state_key, action_idx,
child_values[action_idx] - value)
# Simple average does averaging on the opponent node. To do this in a game
# with more than two players, we only update the player + 1 mod num_players,
# which reduces to the standard rule in 2 players.
if self._average_type == AverageType.SIMPLE and cur_player == (
player + 1) % self._num_players:
for action_idx in range(num_legal_actions):
self._add_avstrat(info_state_key, action_idx, policy[action_idx])
return value
| open_spiel-master | open_spiel/python/algorithms/external_sampling_mccfr.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.random_agent."""
from absl.testing import absltest
import numpy as np
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import random_agent
class RandomAgentTest(absltest.TestCase):
def test_step(self):
agent = random_agent.RandomAgent(player_id=0, num_actions=10)
legal_actions = [0, 2, 3, 5]
time_step = rl_environment.TimeStep(
observations={
"info_state": [[0], [1]],
"legal_actions": [legal_actions, []],
"current_player": 0
},
rewards=None,
discounts=None,
step_type=None)
agent_output = agent.step(time_step)
self.assertIn(agent_output.action, legal_actions)
self.assertAlmostEqual(sum(agent_output.probs), 1.0)
self.assertEqual(
len([x for x in agent_output.probs if x > 0]), len(legal_actions))
self.assertTrue(
np.allclose(agent_output.probs[legal_actions], [.25] * 4, atol=1e-5))
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/random_agent_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.cfr."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python import policy
from open_spiel.python.algorithms import cfr_br
from open_spiel.python.algorithms import expected_game_score
from open_spiel.python.algorithms import exploitability
import pyspiel
_KUHN_GAME = pyspiel.load_game("kuhn_poker")
_LEDUC_GAME = pyspiel.load_game("leduc_poker")
_KUHN_UNIFORM_POLICY = policy.TabularPolicy(_KUHN_GAME)
_LEDUC_UNIFORM_POLICY = policy.TabularPolicy(_LEDUC_GAME)
_EXPECTED_EXPLOITABILITIES_CFRBR_KUHN = [
0.9166666666666666, 0.33333333333333337, 0.3194444444444445,
0.2604166666666667, 0.22666666666666674
]
_EXPECTED_EXPLOITABILITIES_CFRBR_LEDUC = [
4.747222222222222, 4.006867283950617, 3.4090489231017034,
2.8982539553095172, 2.5367193593344504
]
class CFRBRTest(parameterized.TestCase, absltest.TestCase):
@parameterized.parameters(
list(itertools.product([True, False], [True, False])))
def test_policy_zero_is_uniform(self, linear_averaging, regret_matching_plus):
game = pyspiel.load_game("leduc_poker")
cfr_solver = cfr_br.CFRBRSolver(
game,
regret_matching_plus=regret_matching_plus,
linear_averaging=linear_averaging)
np.testing.assert_array_equal(
_LEDUC_UNIFORM_POLICY.action_probability_array,
cfr_solver.current_policy().action_probability_array)
np.testing.assert_array_equal(
_LEDUC_UNIFORM_POLICY.action_probability_array,
cfr_solver.average_policy().action_probability_array)
def test_policy_and_average_policy(self):
game = pyspiel.load_game("kuhn_poker")
cfrbr_solver = cfr_br.CFRBRSolver(game)
for _ in range(300):
cfrbr_solver.evaluate_and_update_policy()
average_policy = cfrbr_solver.average_policy()
average_policy_values = expected_game_score.policy_value(
game.new_initial_state(), [average_policy] * 2)
# 1/18 is the Nash value. See https://en.wikipedia.org/wiki/Kuhn_poker
np.testing.assert_allclose(
average_policy_values, [-1 / 18, 1 / 18], atol=1e-3)
cfrbr_solver.current_policy()
@parameterized.parameters([
(_KUHN_GAME, pyspiel.CFRBRSolver, _EXPECTED_EXPLOITABILITIES_CFRBR_KUHN),
(_KUHN_GAME, cfr_br.CFRBRSolver, _EXPECTED_EXPLOITABILITIES_CFRBR_KUHN),
(_LEDUC_GAME, pyspiel.CFRBRSolver,
_EXPECTED_EXPLOITABILITIES_CFRBR_LEDUC),
(_LEDUC_GAME, cfr_br.CFRBRSolver, _EXPECTED_EXPLOITABILITIES_CFRBR_LEDUC),
])
def test_cpp_and_python_cfr_br(self, game, solver_cls,
expected_exploitability):
solver = solver_cls(game)
for step in range(5):
solver.evaluate_and_update_policy()
# We do not compare the policy directly as we do not have an easy way to
# convert one to the other, so we use the exploitability as a proxy.
avg_policy = solver.average_policy()
if solver_cls == pyspiel.CFRBRSolver:
exploitability_ = pyspiel.nash_conv(game, avg_policy)
else:
exploitability_ = exploitability.nash_conv(game, avg_policy)
self.assertEqual(expected_exploitability[step], exploitability_)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/cfr_br_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the min-max algorithm with alpha-beta pruning.
Solves perfect play for deterministic, 2-players, perfect-information 0-sum
games.
See for example https://en.wikipedia.org/wiki/Alpha-beta_pruning
"""
import pyspiel
def _alpha_beta(state, depth, alpha, beta, value_function,
maximizing_player_id):
"""An alpha-beta algorithm.
Implements a min-max algorithm with alpha-beta pruning.
See for example https://en.wikipedia.org/wiki/Alpha-beta_pruning
Arguments:
state: The current state node of the game.
depth: The maximum depth for the min/max search.
alpha: best value that the MAX player can guarantee (if the value is <= than
alpha, the MAX player will avoid it).
beta: the best value that the MIN currently can guarantee (if the value is
>= than beta, the MIN player will avoid it).
value_function: An optional function mapping a Spiel `State` to a numerical
value, to be used as the value of the maximizing player for a node when we
reach `maximum_depth` and the node is not terminal.
maximizing_player_id: The id of the MAX player. The other player is assumed
to be MIN.
Returns:
A tuple of the optimal value of the sub-game starting in state
(given alpha/beta) and the move that achieved it.
Raises:
NotImplementedError: If we reach the maximum depth. Given we have no value
function for a non-terminal node, we cannot break early.
"""
if state.is_terminal():
return state.player_return(maximizing_player_id), None
if depth == 0 and value_function is None:
raise NotImplementedError(
"We assume we can walk the full depth of the tree. "
"Try increasing the maximum_depth or provide a value_function.")
if depth == 0:
return value_function(state), None
player = state.current_player()
best_action = -1
if player == maximizing_player_id:
value = -float("inf")
for action in state.legal_actions():
child_state = state.clone()
child_state.apply_action(action)
child_value, _ = _alpha_beta(child_state, depth - 1, alpha, beta,
value_function, maximizing_player_id)
if child_value > value:
value = child_value
best_action = action
alpha = max(alpha, value)
if alpha >= beta:
break # beta cut-off
return value, best_action
else:
value = float("inf")
for action in state.legal_actions():
child_state = state.clone()
child_state.apply_action(action)
child_value, _ = _alpha_beta(child_state, depth - 1, alpha, beta,
value_function, maximizing_player_id)
if child_value < value:
value = child_value
best_action = action
beta = min(beta, value)
if alpha >= beta:
break # alpha cut-off
return value, best_action
def alpha_beta_search(game,
state=None,
value_function=None,
maximum_depth=30,
maximizing_player_id=None):
"""Solves deterministic, 2-players, perfect-information 0-sum game.
For small games only! Please use keyword arguments for optional arguments.
Arguments:
game: The game to analyze, as returned by `load_game`.
state: The state to run from, as returned by `game.new_initial_state()`. If
none is specified, then the initial state is assumed.
value_function: An optional function mapping a Spiel `State` to a numerical
value, to be used as the value of the maximizing player for a node when we
reach `maximum_depth` and the node is not terminal.
maximum_depth: The maximum depth to search over. When this depth is reached,
an exception will be raised.
maximizing_player_id: The id of the MAX player. The other player is assumed
to be MIN. The default (None) will suppose the player at the root to be
the MAX player.
Returns:
A tuple containing the value of the game for the maximizing player when
both player play optimally, and the action that achieves this value.
"""
game_info = game.get_type()
if game.num_players() != 2:
raise ValueError("Game must be a 2-player game")
if game_info.chance_mode != pyspiel.GameType.ChanceMode.DETERMINISTIC:
raise ValueError("The game must be a Deterministic one, not {}".format(
game.chance_mode))
if game_info.information != pyspiel.GameType.Information.PERFECT_INFORMATION:
raise ValueError(
"The game must be a perfect information one, not {}".format(
game.information))
if game_info.dynamics != pyspiel.GameType.Dynamics.SEQUENTIAL:
raise ValueError("The game must be turn-based, not {}".format(
game.dynamics))
if game_info.utility != pyspiel.GameType.Utility.ZERO_SUM:
raise ValueError("The game must be 0-sum, not {}".format(game.utility))
if state is None:
state = game.new_initial_state()
if maximizing_player_id is None:
maximizing_player_id = state.current_player()
return _alpha_beta(
state.clone(),
maximum_depth,
alpha=-float("inf"),
beta=float("inf"),
value_function=value_function,
maximizing_player_id=maximizing_player_id)
def expectiminimax(state, depth, value_function, maximizing_player_id):
"""Runs expectiminimax until the specified depth.
See https://en.wikipedia.org/wiki/Expectiminimax for details.
Arguments:
state: The state to start the search from.
depth: The depth of the search (not counting chance nodes).
value_function: A value function, taking in a state and returning a value,
in terms of the maximizing_player_id.
maximizing_player_id: The player running the search (current player at root
of the search tree).
Returns:
A tuple (value, best_action) representing the value to the maximizing player
and the best action that achieves that value. None is returned as the best
action at chance nodes, the depth limit, and terminals.
"""
if state.is_terminal():
return state.player_return(maximizing_player_id), None
if depth == 0:
return value_function(state), None
if state.is_chance_node():
value = 0
for outcome, prob in state.chance_outcomes():
child_state = state.clone()
child_state.apply_action(outcome)
child_value, _ = expectiminimax(child_state, depth, value_function,
maximizing_player_id)
value += prob * child_value
return value, None
elif state.current_player() == maximizing_player_id:
value = -float("inf")
for action in state.legal_actions():
child_state = state.clone()
child_state.apply_action(action)
child_value, _ = expectiminimax(child_state, depth - 1, value_function,
maximizing_player_id)
if child_value > value:
value = child_value
best_action = action
return value, best_action
else:
value = float("inf")
for action in state.legal_actions():
child_state = state.clone()
child_state.apply_action(action)
child_value, _ = expectiminimax(child_state, depth - 1, value_function,
maximizing_player_id)
if child_value < value:
value = child_value
best_action = action
return value, best_action
| open_spiel-master | open_spiel/python/algorithms/minimax.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements Deep CFR Algorithm.
See https://arxiv.org/abs/1811.00164.
The algorithm defines an `advantage` and `strategy` networks that compute
advantages used to do regret matching across information sets and to approximate
the strategy profiles of the game. To train these networks a reservoir buffer
(other data structures may be used) memory is used to accumulate samples to
train the networks.
"""
import collections
import random
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python import policy
from open_spiel.python import simple_nets
import pyspiel
# Temporarily Disable TF2 behavior until we update the code.
tf.disable_v2_behavior()
AdvantageMemory = collections.namedtuple(
"AdvantageMemory", "info_state iteration advantage action")
StrategyMemory = collections.namedtuple(
"StrategyMemory", "info_state iteration strategy_action_probs")
# TODO(author3) Refactor into data structures lib.
class ReservoirBuffer(object):
"""Allows uniform sampling over a stream of data.
This class supports the storage of arbitrary elements, such as observation
tensors, integer actions, etc.
See https://en.wikipedia.org/wiki/Reservoir_sampling for more details.
"""
def __init__(self, reservoir_buffer_capacity):
self._reservoir_buffer_capacity = reservoir_buffer_capacity
self._data = []
self._add_calls = 0
def add(self, element):
"""Potentially adds `element` to the reservoir buffer.
Args:
element: data to be added to the reservoir buffer.
"""
if len(self._data) < self._reservoir_buffer_capacity:
self._data.append(element)
else:
idx = np.random.randint(0, self._add_calls + 1)
if idx < self._reservoir_buffer_capacity:
self._data[idx] = element
self._add_calls += 1
def sample(self, num_samples):
"""Returns `num_samples` uniformly sampled from the buffer.
Args:
num_samples: `int`, number of samples to draw.
Returns:
An iterable over `num_samples` random elements of the buffer.
Raises:
ValueError: If there are less than `num_samples` elements in the buffer
"""
if len(self._data) < num_samples:
raise ValueError("{} elements could not be sampled from size {}".format(
num_samples, len(self._data)))
return random.sample(self._data, num_samples)
def clear(self):
self._data = []
self._add_calls = 0
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
class DeepCFRSolver(policy.Policy):
"""Implements a solver for the Deep CFR Algorithm.
See https://arxiv.org/abs/1811.00164.
Define all networks and sampling buffers/memories. Derive losses & learning
steps. Initialize the game state and algorithmic variables.
Note: batch sizes default to `None` implying that training over the full
dataset in memory is done by default. To sample from the memories you
may set these values to something less than the full capacity of the
memory.
"""
def __init__(self,
session,
game,
policy_network_layers=(256, 256),
advantage_network_layers=(128, 128),
num_iterations: int = 100,
num_traversals: int = 20,
learning_rate: float = 1e-4,
batch_size_advantage=None,
batch_size_strategy=None,
memory_capacity: int = int(1e6),
policy_network_train_steps: int = 1,
advantage_network_train_steps: int = 1,
reinitialize_advantage_networks: bool = True):
"""Initialize the Deep CFR algorithm.
Args:
session: (tf.Session) TensorFlow session.
game: Open Spiel game.
policy_network_layers: (list[int]) Layer sizes of strategy net MLP.
advantage_network_layers: (list[int]) Layer sizes of advantage net MLP.
num_iterations: Number of iterations.
num_traversals: Number of traversals per iteration.
learning_rate: Learning rate.
batch_size_advantage: (int or None) Batch size to sample from advantage
memories.
batch_size_strategy: (int or None) Batch size to sample from strategy
memories.
memory_capacity: Number of samples that can be stored in memory.
policy_network_train_steps: Number of policy network training steps (per
iteration).
advantage_network_train_steps: Number of advantage network training steps
(per iteration).
reinitialize_advantage_networks: Whether to re-initialize the
advantage network before training on each iteration.
"""
all_players = list(range(game.num_players()))
super(DeepCFRSolver, self).__init__(game, all_players)
self._game = game
if game.get_type().dynamics == pyspiel.GameType.Dynamics.SIMULTANEOUS:
# `_traverse_game_tree` does not take into account this option.
raise ValueError("Simulatenous games are not supported.")
self._session = session
self._batch_size_advantage = batch_size_advantage
self._batch_size_strategy = batch_size_strategy
self._policy_network_train_steps = policy_network_train_steps
self._advantage_network_train_steps = advantage_network_train_steps
self._num_players = game.num_players()
self._root_node = self._game.new_initial_state()
# TODO(author6) Allow embedding size (and network) to be specified.
self._embedding_size = len(self._root_node.information_state_tensor(0))
self._num_iterations = num_iterations
self._num_traversals = num_traversals
self._reinitialize_advantage_networks = reinitialize_advantage_networks
self._num_actions = game.num_distinct_actions()
self._iteration = 1
self._environment_steps = 0
# Create required TensorFlow placeholders to perform the Q-network updates.
self._info_state_ph = tf.placeholder(
shape=[None, self._embedding_size],
dtype=tf.float32,
name="info_state_ph")
self._info_state_action_ph = tf.placeholder(
shape=[None, self._embedding_size + 1],
dtype=tf.float32,
name="info_state_action_ph")
self._action_probs_ph = tf.placeholder(
shape=[None, self._num_actions],
dtype=tf.float32,
name="action_probs_ph")
self._iter_ph = tf.placeholder(
shape=[None, 1], dtype=tf.float32, name="iter_ph")
self._advantage_ph = []
for p in range(self._num_players):
self._advantage_ph.append(
tf.placeholder(
shape=[None, self._num_actions],
dtype=tf.float32,
name="advantage_ph_" + str(p)))
# Define strategy network, loss & memory.
self._strategy_memories = ReservoirBuffer(memory_capacity)
self._policy_network = simple_nets.MLP(self._embedding_size,
list(policy_network_layers),
self._num_actions)
action_logits = self._policy_network(self._info_state_ph)
# Illegal actions are handled in the traversal code where expected payoff
# and sampled regret is computed from the advantage networks.
self._action_probs = tf.nn.softmax(action_logits)
self._loss_policy = tf.reduce_mean(
tf.losses.mean_squared_error(
labels=tf.math.sqrt(self._iter_ph) * self._action_probs_ph,
predictions=tf.math.sqrt(self._iter_ph) * self._action_probs))
self._optimizer_policy = tf.train.AdamOptimizer(learning_rate=learning_rate)
self._learn_step_policy = self._optimizer_policy.minimize(self._loss_policy)
# Define advantage network, loss & memory. (One per player)
self._advantage_memories = [
ReservoirBuffer(memory_capacity) for _ in range(self._num_players)
]
self._advantage_networks = [
simple_nets.MLP(self._embedding_size, list(advantage_network_layers),
self._num_actions) for _ in range(self._num_players)
]
self._advantage_outputs = [
self._advantage_networks[i](self._info_state_ph)
for i in range(self._num_players)
]
self._loss_advantages = []
self._optimizer_advantages = []
self._learn_step_advantages = []
for p in range(self._num_players):
self._loss_advantages.append(
tf.reduce_mean(
tf.losses.mean_squared_error(
labels=tf.math.sqrt(self._iter_ph) * self._advantage_ph[p],
predictions=tf.math.sqrt(self._iter_ph) *
self._advantage_outputs[p])))
self._optimizer_advantages.append(
tf.train.AdamOptimizer(learning_rate=learning_rate))
self._learn_step_advantages.append(self._optimizer_advantages[p].minimize(
self._loss_advantages[p]))
@property
def advantage_buffers(self):
return self._advantage_memories
@property
def strategy_buffer(self):
return self._strategy_memories
def clear_advantage_buffers(self):
for p in range(self._num_players):
self._advantage_memories[p].clear()
def reinitialize_advantage_networks(self):
for p in range(self._num_players):
self.reinitialize_advantage_network(p)
def reinitialize_advantage_network(self, player):
self._session.run(
tf.group(*[
var.initializer
for var in self._advantage_networks[player].variables
]))
def solve(self):
"""Solution logic for Deep CFR."""
advantage_losses = collections.defaultdict(list)
for _ in range(self._num_iterations):
for p in range(self._num_players):
for _ in range(self._num_traversals):
self._traverse_game_tree(self._root_node, p)
if self._reinitialize_advantage_networks:
# Re-initialize advantage network for player and train from scratch.
self.reinitialize_advantage_network(p)
advantage_losses[p].append(self._learn_advantage_network(p))
self._iteration += 1
# Train policy network.
policy_loss = self._learn_strategy_network()
return self._policy_network, advantage_losses, policy_loss
def get_environment_steps(self):
return self._environment_steps
def _traverse_game_tree(self, state, player):
"""Performs a traversal of the game tree.
Over a traversal the advantage and strategy memories are populated with
computed advantage values and matched regrets respectively.
Args:
state: Current OpenSpiel game state.
player: (int) Player index for this traversal.
Returns:
Recursively returns expected payoffs for each action.
"""
self._environment_steps += 1
expected_payoff = collections.defaultdict(float)
if state.is_terminal():
# Terminal state get returns.
return state.returns()[player]
elif state.is_chance_node():
# If this is a chance node, sample an action
chance_outcome, chance_proba = zip(*state.chance_outcomes())
action = np.random.choice(chance_outcome, p=chance_proba)
return self._traverse_game_tree(state.child(action), player)
elif state.current_player() == player:
sampled_regret = collections.defaultdict(float)
# Update the policy over the info set & actions via regret matching.
_, strategy = self._sample_action_from_advantage(state, player)
for action in state.legal_actions():
expected_payoff[action] = self._traverse_game_tree(
state.child(action), player)
cfv = 0
for a_ in state.legal_actions():
cfv += strategy[a_] * expected_payoff[a_]
for action in state.legal_actions():
sampled_regret[action] = expected_payoff[action]
sampled_regret[action] -= cfv
sampled_regret_arr = [0] * self._num_actions
for action in sampled_regret:
sampled_regret_arr[action] = sampled_regret[action]
self._advantage_memories[player].add(
AdvantageMemory(state.information_state_tensor(), self._iteration,
sampled_regret_arr, action))
return cfv
else:
other_player = state.current_player()
_, strategy = self._sample_action_from_advantage(state, other_player)
# Recompute distribution dor numerical errors.
probs = np.array(strategy)
probs /= probs.sum()
sampled_action = np.random.choice(range(self._num_actions), p=probs)
self._strategy_memories.add(
StrategyMemory(
state.information_state_tensor(other_player), self._iteration,
strategy))
return self._traverse_game_tree(state.child(sampled_action), player)
def _sample_action_from_advantage(self, state, player):
"""Returns an info state policy by applying regret-matching.
Args:
state: Current OpenSpiel game state.
player: (int) Player index over which to compute regrets.
Returns:
1. (list) Advantage values for info state actions indexed by action.
2. (list) Matched regrets, prob for actions indexed by action.
"""
info_state = state.information_state_tensor(player)
legal_actions = state.legal_actions(player)
advantages_full = self._session.run(
self._advantage_outputs[player],
feed_dict={self._info_state_ph: np.expand_dims(info_state, axis=0)})[0]
advantages = [max(0., advantage) for advantage in advantages_full]
cumulative_regret = np.sum([advantages[action] for action in legal_actions])
matched_regrets = np.array([0.] * self._num_actions)
if cumulative_regret > 0.:
for action in legal_actions:
matched_regrets[action] = advantages[action] / cumulative_regret
else:
matched_regrets[max(legal_actions, key=lambda a: advantages_full[a])] = 1
return advantages, matched_regrets
def action_probabilities(self, state):
"""Returns action probabilities dict for a single batch."""
cur_player = state.current_player()
legal_actions = state.legal_actions(cur_player)
info_state_vector = np.array(state.information_state_tensor())
if len(info_state_vector.shape) == 1:
info_state_vector = np.expand_dims(info_state_vector, axis=0)
probs = self._session.run(
self._action_probs, feed_dict={self._info_state_ph: info_state_vector})
return {action: probs[0][action] for action in legal_actions}
def _learn_advantage_network(self, player):
"""Compute the loss on sampled transitions and perform a Q-network update.
If there are not enough elements in the buffer, no loss is computed and
`None` is returned instead.
Args:
player: (int) player index.
Returns:
The average loss over the advantage network.
"""
for _ in range(self._advantage_network_train_steps):
if self._batch_size_advantage:
if self._batch_size_advantage > len(self._advantage_memories[player]):
## Skip if there aren't enough samples
return None
samples = self._advantage_memories[player].sample(
self._batch_size_advantage)
else:
samples = self._advantage_memories[player]
info_states = []
advantages = []
iterations = []
for s in samples:
info_states.append(s.info_state)
advantages.append(s.advantage)
iterations.append([s.iteration])
# Ensure some samples have been gathered.
if not info_states:
return None
loss_advantages, _ = self._session.run(
[self._loss_advantages[player], self._learn_step_advantages[player]],
feed_dict={
self._info_state_ph: np.array(info_states),
self._advantage_ph[player]: np.array(advantages),
self._iter_ph: np.array(iterations),
})
return loss_advantages
def _learn_strategy_network(self):
"""Compute the loss over the strategy network.
Returns:
The average loss obtained on this batch of transitions or `None`.
"""
for _ in range(self._policy_network_train_steps):
if self._batch_size_strategy:
if self._batch_size_strategy > len(self._strategy_memories):
## Skip if there aren't enough samples
return None
samples = self._strategy_memories.sample(self._batch_size_strategy)
else:
samples = self._strategy_memories
info_states = []
action_probs = []
iterations = []
for s in samples:
info_states.append(s.info_state)
action_probs.append(s.strategy_action_probs)
iterations.append([s.iteration])
loss_strategy, _ = self._session.run(
[self._loss_policy, self._learn_step_policy],
feed_dict={
self._info_state_ph: np.array(info_states),
self._action_probs_ph: np.array(np.squeeze(action_probs)),
self._iter_ph: np.array(iterations),
})
return loss_strategy
| open_spiel-master | open_spiel/python/algorithms/deep_cfr.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
# Note: this import needs to come before Tensorflow to fix a malloc error.
import pyspiel # pylint: disable=g-bad-import-order
import tensorflow.compat.v1 as tf
from open_spiel.python.algorithms import rcfr
# Temporarily disable TF2 behavior while the code is not updated.
tf.disable_v2_behavior()
tf.enable_eager_execution()
_GAME = pyspiel.load_game('kuhn_poker')
_BOOLEANS = [False, True]
def _new_model():
return rcfr.DeepRcfrModel(
_GAME,
num_hidden_layers=1,
num_hidden_units=13,
num_hidden_factors=1,
use_skip_connections=True)
class RcfrTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(RcfrTest, self).setUp()
tf.random.set_random_seed(42)
def test_with_one_hot_action_features_single_state_vector(self):
information_state_features = [1., 2., 3.]
features = rcfr.with_one_hot_action_features(
information_state_features,
legal_actions=[0, 1],
num_distinct_actions=3)
self.assertAllEqual([
[1., 2., 3., 1., 0., 0.],
[1., 2., 3., 0., 1., 0.],
], features)
features = rcfr.with_one_hot_action_features(
information_state_features,
legal_actions=[1, 2],
num_distinct_actions=3)
self.assertAllEqual([
[1., 2., 3., 0., 1., 0.],
[1., 2., 3., 0., 0., 1.],
], features)
def test_with_one_hot_action_features_batch(self):
info_state_features = [[1., 2., 3.], [4., 5., 6.]]
features = rcfr.with_one_hot_action_features(
info_state_features, legal_actions=[0, 1], num_distinct_actions=3)
self.assertAllEqual([
[1., 2., 3., 1., 0., 0.],
[4., 5., 6., 1., 0., 0.],
[1., 2., 3., 0., 1., 0.],
[4., 5., 6., 0., 1., 0.],
], features)
features = rcfr.with_one_hot_action_features(
info_state_features, legal_actions=[1, 2], num_distinct_actions=3)
self.assertAllEqual([
[1., 2., 3., 0., 1., 0.],
[4., 5., 6., 0., 1., 0.],
[1., 2., 3., 0., 0., 1.],
[4., 5., 6., 0., 0., 1.],
], features)
def test_with_one_hot_action_features_error(self):
info_state_features = tf.ones([1, 1, 1])
with self.assertRaises(ValueError):
rcfr.with_one_hot_action_features(
info_state_features, legal_actions=[0, 1], num_distinct_actions=3)
def test_sequence_features(self):
state = _GAME.new_initial_state()
while state.is_chance_node():
state.apply_action(state.legal_actions()[0])
assert len(state.legal_actions()) == 2
features = rcfr.sequence_features(state, 3)
x = state.information_state_tensor()
self.assertAllEqual([x + [1, 0, 0], x + [0, 1, 0]], features)
def test_num_features(self):
assert rcfr.num_features(_GAME) == 13
def test_root_state_wrapper_num_sequences(self):
root_state_wrapper = rcfr.RootStateWrapper(_GAME.new_initial_state())
assert root_state_wrapper.num_player_sequences[0] == 12
assert root_state_wrapper.num_player_sequences[1] == 12
def test_root_state_wrapper_sequence_indices(self):
root_state_wrapper = rcfr.RootStateWrapper(_GAME.new_initial_state())
self.assertAllEqual(
{
# Info state string -> initial sequence index map for player 1.
'0': 0,
'0pb': 2,
'1': 4,
'1pb': 6,
'2': 8,
'2pb': 10,
# Info state string -> initial sequence index map for player 2.
'1p': 0,
'1b': 2,
'2p': 4,
'2b': 6,
'0p': 8,
'0b': 10,
},
root_state_wrapper.info_state_to_sequence_idx)
def test_root_state_wrapper_sequence_features(self):
root_state_wrapper = rcfr.RootStateWrapper(_GAME.new_initial_state())
p1_info_state_features = [
[1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 1., 0., 0., 1., 0., 0., 1., 0., 0.],
[1., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 1., 0., 1., 0., 0., 1., 0., 0.],
[1., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 1., 1., 0., 0., 1., 0., 0.],
]
p2_info_state_features = [
[0., 1., 0., 1., 0., 1., 0., 0., 0., 0., 0.],
[0., 1., 0., 1., 0., 0., 1., 0., 0., 0., 0.],
[0., 1., 0., 0., 1., 1., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 1., 0., 1., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 1., 0., 0., 0., 0.],
]
action_features = [[1., 0.], [0., 1.]]
expected_p1_sequence_features = [
p1_info_state_features[0] + action_features[0],
p1_info_state_features[0] + action_features[1],
p1_info_state_features[1] + action_features[0],
p1_info_state_features[1] + action_features[1],
p1_info_state_features[2] + action_features[0],
p1_info_state_features[2] + action_features[1],
p1_info_state_features[3] + action_features[0],
p1_info_state_features[3] + action_features[1],
p1_info_state_features[4] + action_features[0],
p1_info_state_features[4] + action_features[1],
p1_info_state_features[5] + action_features[0],
p1_info_state_features[5] + action_features[1],
]
expected_p2_sequence_features = [
p2_info_state_features[0] + action_features[0],
p2_info_state_features[0] + action_features[1],
p2_info_state_features[1] + action_features[0],
p2_info_state_features[1] + action_features[1],
p2_info_state_features[2] + action_features[0],
p2_info_state_features[2] + action_features[1],
p2_info_state_features[3] + action_features[0],
p2_info_state_features[3] + action_features[1],
p2_info_state_features[4] + action_features[0],
p2_info_state_features[4] + action_features[1],
p2_info_state_features[5] + action_features[0],
p2_info_state_features[5] + action_features[1],
]
expected_sequence_features = [
expected_p1_sequence_features, expected_p2_sequence_features
]
self.assertAllEqual(expected_sequence_features,
root_state_wrapper.sequence_features)
def test_root_state_wrapper_sequence_terminal_values(self):
root_state_wrapper = rcfr.RootStateWrapper(_GAME.new_initial_state())
expected_terminal_values = {}
no_call_histories_p1_win = [
'2, 0, 0, 0', '2, 0, 1, 0', '0, 1, 1, 0', '1, 2, 1, 0', '1, 0, 1, 0',
'1, 0, 0, 0', '2, 1, 1, 0', '2, 1, 0, 0', '0, 2, 1, 0'
]
for h in no_call_histories_p1_win:
expected_terminal_values[h] = [1., -1.]
no_call_histories_p2_win = [
'0, 2, 0, 1, 0', '0, 1, 0, 0', '0, 1, 0, 1, 0', '0, 2, 0, 0',
'1, 2, 0, 0', '2, 0, 0, 1, 0', '1, 2, 0, 1, 0', '2, 1, 0, 1, 0',
'1, 0, 0, 1, 0'
]
for h in no_call_histories_p2_win:
expected_terminal_values[h] = [-1., 1.]
call_histories_p1_win = [
'1, 0, 1, 1', '2, 1, 1, 1', '2, 1, 0, 1, 1', '2, 0, 0, 1, 1',
'1, 0, 0, 1, 1', '2, 0, 1, 1'
]
for h in call_histories_p1_win:
expected_terminal_values[h] = [2., -2.]
call_histories_p2_win = [
'0, 2, 0, 1, 1', '0, 1, 0, 1, 1', '0, 1, 1, 1', '1, 2, 1, 1',
'1, 2, 0, 1, 1', '0, 2, 1, 1'
]
for h in call_histories_p2_win:
expected_terminal_values[h] = [-2., 2.]
self.assertAllEqual(
expected_terminal_values,
{k: v.tolist() for k, v in root_state_wrapper.terminal_values.items()})
def test_normalized_by_sum(self):
self.assertAllClose(
rcfr.normalized_by_sum([1., 2., 3., 4.]), [0.1, 0.2, 0.3, 0.4])
def test_counterfactual_regrets_and_reach_weights_value_error(self):
root = rcfr.RootStateWrapper(_GAME.new_initial_state())
# Initialize arbitrary weights to generate an arbitrary profile.
sequence_weights1_with_a_missing_sequence = [
0.4967141530112327,
0.0,
0.6476885381006925,
1.5230298564080254,
0.0,
0.0,
1.5792128155073915,
0.7674347291529088,
0.0,
0.5425600435859647,
0.0,
# 0.0,
]
# Ensure this player's policy is fully mixed so that each of player 1's
# information states are reached.
sequence_weights2 = [
0.24196227156603412,
0.1,
0.1,
0.1,
0.1,
0.3142473325952739,
0.1,
0.1,
1.465648768921554,
0.1,
0.06752820468792384,
0.1,
]
with self.assertRaises(ValueError):
root.counterfactual_regrets_and_reach_weights(
0, 1, sequence_weights1_with_a_missing_sequence, sequence_weights2)
def test_counterfactual_regrets_and_reach_weights(self):
root = rcfr.RootStateWrapper(_GAME.new_initial_state())
# Initialize arbitrary weights to generate an arbitrary profile.
sequence_weights1 = [
0.4967141530112327,
0.0,
0.6476885381006925,
1.5230298564080254,
0.0,
0.0,
1.5792128155073915,
0.7674347291529088,
0.0,
0.5425600435859647,
0.0,
0.0,
]
sequence_weights2 = [
0.24196227156603412,
0.0,
0.0,
0.0,
0.0,
0.3142473325952739,
0.0,
0.0,
1.465648768921554,
0.0,
0.06752820468792384,
0.0,
]
# These expected regrets and sequence weights were computed for the given
# sequence weights.
expected_regrets_given_sequence_weights = [
0.,
0.283604,
0.116937,
-0.049729,
-0.06892,
0.06892,
0.054506,
-0.112161,
-0.083333,
0.,
0.,
0.,
]
expected_reach_weights_given_sequence_weights = [
2.,
0.,
1.,
1.,
0.,
2.,
1.,
1.,
2.,
0.,
2.,
0.,
]
regrets, weights = root.counterfactual_regrets_and_reach_weights(
0, 1, sequence_weights1, sequence_weights2)
self.assertAllClose(regrets, expected_regrets_given_sequence_weights)
self.assertAllClose(weights, expected_reach_weights_given_sequence_weights)
def test_all_states(self):
states = rcfr.all_states(
_GAME.new_initial_state(),
depth_limit=-1,
include_terminals=False,
include_chance_states=False)
self.assertLen(list(states), 24)
states = rcfr.all_states(
_GAME.new_initial_state(),
depth_limit=-1,
include_terminals=True,
include_chance_states=False)
self.assertLen(list(states), 54)
states = rcfr.all_states(
_GAME.new_initial_state(),
depth_limit=-1,
include_terminals=False,
include_chance_states=True)
self.assertLen(list(states), 28)
states = rcfr.all_states(
_GAME.new_initial_state(),
depth_limit=-1,
include_terminals=True,
include_chance_states=True)
self.assertLen(list(states), 58)
def test_sequence_weights_to_tabular_profile(self):
root = rcfr.RootStateWrapper(_GAME.new_initial_state())
def policy_fn(state):
"""Generates a policy profile by treating sequence indices as weights."""
info_state = state.information_state_string()
sequence_offset = root.info_state_to_sequence_idx[info_state]
num_actions = len(state.legal_actions())
return rcfr.normalized_by_sum(
list(range(sequence_offset, sequence_offset + num_actions)))
profile = rcfr.sequence_weights_to_tabular_profile(root.root, policy_fn)
expected_profile = {
# Player 1
'0': [(0, 0.), (1, 1.)], # Sequences 0 and 1 (sums to 1)
'0pb': [(0, 0.4), (1, 0.6)], # Sequences 2 and 3 (sums to 5)
# Sequences 4 and 5 (sums to 9)
'1': [(0, 0.44444444444444442), (1, 0.55555555555555558)],
# Sequences 6 and 7 (sums to 13)
'1pb': [(0, 0.46153846153846156), (1, 0.53846153846153844)],
# Sequences 8 and 9 (sums to 17)
'2': [(0, 0.47058823529411764), (1, 0.52941176470588236)],
# Sequences 10 and 11 (sums to 21)
'2pb': [(0, 0.47619047619047616), (1, 0.52380952380952384)],
# Player 2
'1p': [(0, 0.), (1, 1.)], # Sequences 0 and 1 (sums to 1)
'1b': [(0, 0.4), (1, 0.6)], # Sequences 2 and 3 (sums to 5)
# Sequences 4 and 5 (sums to 9)
'2p': [(0, 0.44444444444444442), (1, 0.55555555555555558)],
# Sequences 6 and 7 (sums to 13)
'2b': [(0, 0.46153846153846156), (1, 0.53846153846153844)],
# Sequences 8 and 9 (sums to 17)
'0p': [(0, 0.47058823529411764), (1, 0.52941176470588236)],
# Sequences 10 and 11 (sums to 21)
'0b': [(0, 0.47619047619047616), (1, 0.52380952380952384)],
}
self.assertAllClose(profile, expected_profile)
def test_cfr(self):
root = rcfr.RootStateWrapper(_GAME.new_initial_state())
num_half_iterations = 6
cumulative_regrets = [np.zeros(n) for n in root.num_player_sequences]
cumulative_reach_weights = [np.zeros(n) for n in root.num_player_sequences]
average_profile = root.sequence_weights_to_tabular_profile(
cumulative_reach_weights)
self.assertGreater(pyspiel.nash_conv(_GAME, average_profile), 0.91)
regret_player = 0
for _ in range(num_half_iterations):
reach_weights_player = 1 if regret_player == 0 else 0
regrets, reach = root.counterfactual_regrets_and_reach_weights(
regret_player, reach_weights_player, *rcfr.relu(cumulative_regrets))
cumulative_regrets[regret_player] += regrets
cumulative_reach_weights[reach_weights_player] += reach
regret_player = reach_weights_player
average_profile = root.sequence_weights_to_tabular_profile(
cumulative_reach_weights)
self.assertLess(pyspiel.nash_conv(_GAME, average_profile), 0.27)
def test_rcfr_functions(self):
models = [_new_model() for _ in range(_GAME.num_players())]
root = rcfr.RootStateWrapper(_GAME.new_initial_state())
num_half_iterations = 4
num_epochs = 100
cumulative_regrets = [np.zeros(n) for n in root.num_player_sequences]
cumulative_reach_weights = [np.zeros(n) for n in root.num_player_sequences]
average_profile = root.sequence_weights_to_tabular_profile(
cumulative_reach_weights)
self.assertGreater(pyspiel.nash_conv(_GAME, average_profile), 0.91)
regret_player = 0
sequence_weights = [
model(root.sequence_features[player]).numpy()
for player, model in enumerate(models)
]
for _ in range(num_half_iterations):
reach_weights_player = 1 if regret_player == 0 else 0
sequence_weights[reach_weights_player] = models[reach_weights_player](
root.sequence_features[reach_weights_player]).numpy()
regrets, seq_probs = root.counterfactual_regrets_and_reach_weights(
regret_player, reach_weights_player, *sequence_weights)
cumulative_regrets[regret_player] += regrets
cumulative_reach_weights[reach_weights_player] += seq_probs
data = tf.data.Dataset.from_tensor_slices(
(root.sequence_features[regret_player],
tf.expand_dims(cumulative_regrets[regret_player], axis=1)))
data = data.shuffle(12)
data = data.batch(12)
data = data.repeat(num_epochs)
optimizer = tf.keras.optimizers.Adam(lr=0.005, amsgrad=True)
for x, y in data:
optimizer.minimize(
lambda: tf.losses.huber_loss(y, models[regret_player](x)), # pylint: disable=cell-var-from-loop
models[regret_player].trainable_variables)
regret_player = reach_weights_player
average_profile = root.sequence_weights_to_tabular_profile(
cumulative_reach_weights)
self.assertLess(pyspiel.nash_conv(_GAME, average_profile), 0.91)
@parameterized.parameters(list(itertools.product(_BOOLEANS, _BOOLEANS)))
def test_rcfr(self, bootstrap, truncate_negative):
num_epochs = 100
num_iterations = 2
models = [_new_model() for _ in range(_GAME.num_players())]
patient = rcfr.RcfrSolver(
_GAME, models, bootstrap=bootstrap, truncate_negative=truncate_negative)
def _train(model, data):
data = data.shuffle(12)
data = data.batch(12)
data = data.repeat(num_epochs)
optimizer = tf.keras.optimizers.Adam(lr=0.005, amsgrad=True)
for x, y in data:
optimizer.minimize(
lambda: tf.losses.huber_loss(y, model(x)), # pylint: disable=cell-var-from-loop
model.trainable_variables)
average_policy = patient.average_policy()
self.assertGreater(pyspiel.nash_conv(_GAME, average_policy), 0.91)
for _ in range(num_iterations):
patient.evaluate_and_update_policy(_train)
average_policy = patient.average_policy()
self.assertLess(pyspiel.nash_conv(_GAME, average_policy), 0.91)
def test_reservior_buffer_insert(self):
buffer_size = 10
patient = rcfr.ReservoirBuffer(buffer_size)
x_buffer = []
for i in range(buffer_size):
patient.insert(i)
x_buffer.append(i)
assert patient.num_elements == len(x_buffer)
self.assertAllEqual(x_buffer, patient.buffer)
assert patient.num_available_spaces() == 0
for i in range(buffer_size):
patient.insert(buffer_size + i)
assert patient.num_elements == buffer_size
def test_reservior_buffer_insert_all(self):
buffer_size = 10
patient = rcfr.ReservoirBuffer(buffer_size)
x_buffer = list(range(buffer_size))
patient.insert_all(x_buffer)
assert patient.num_elements == buffer_size
self.assertAllEqual(x_buffer, patient.buffer)
assert patient.num_available_spaces() == 0
x_buffer = list(range(buffer_size, 2 * buffer_size))
patient.insert_all(x_buffer)
assert patient.num_elements == buffer_size
def test_rcfr_with_buffer(self):
buffer_size = 12
num_epochs = 100
num_iterations = 2
models = [_new_model() for _ in range(_GAME.num_players())]
patient = rcfr.ReservoirRcfrSolver(_GAME, models, buffer_size=buffer_size)
def _train(model, data):
data = data.shuffle(12)
data = data.batch(12)
data = data.repeat(num_epochs)
optimizer = tf.keras.optimizers.Adam(lr=0.005, amsgrad=True)
for x, y in data:
optimizer.minimize(
lambda: tf.losses.huber_loss(y, model(x)), # pylint: disable=cell-var-from-loop
model.trainable_variables)
average_policy = patient.average_policy()
self.assertGreater(pyspiel.nash_conv(_GAME, average_policy), 0.91)
for _ in range(num_iterations):
patient.evaluate_and_update_policy(_train)
average_policy = patient.average_policy()
self.assertLess(pyspiel.nash_conv(_GAME, average_policy), 0.91)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/rcfr_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Boltzmann Q learning agent.
This algorithm is a variation of Q learning that uses action selection
based on boltzmann probability interpretation of Q-values.
For more details, see equation (2) page 2 in
https://arxiv.org/pdf/1109.1528.pdf
"""
import numpy as np
from open_spiel.python import rl_tools
from open_spiel.python.algorithms import tabular_qlearner
class BoltzmannQLearner(tabular_qlearner.QLearner):
"""Tabular Boltzmann Q-Learning agent.
See open_spiel/python/examples/tic_tac_toe_qlearner.py for an usage example.
The tic_tac_toe example uses the standard Qlearner. Using the
BoltzmannQlearner is
identical and only differs in the initialization of the agents.
"""
def __init__(self,
player_id,
num_actions,
step_size=0.1,
discount_factor=1.0,
temperature_schedule=rl_tools.ConstantSchedule(.5),
centralized=False):
super().__init__(
player_id,
num_actions,
step_size=step_size,
discount_factor=discount_factor,
epsilon_schedule=temperature_schedule,
centralized=centralized)
def _softmax(self, info_state, legal_actions, temperature):
"""Action selection based on boltzmann probability interpretation of Q-values.
For more details, see equation (2) page 2 in
https://arxiv.org/pdf/1109.1528.pdf
Args:
info_state: hashable representation of the information state.
legal_actions: list of actions at `info_state`.
temperature: temperature used for softmax.
Returns:
A valid soft-max selected action and valid action probabilities.
"""
probs = np.zeros(self._num_actions)
if temperature > 0.0:
probs += [
np.exp((1 / temperature) * self._q_values[info_state][i])
for i in range(self._num_actions)
]
probs /= np.sum(probs)
else:
# Temperature = 0 causes normal greedy action selection
greedy_q = max([self._q_values[info_state][a] for a in legal_actions])
greedy_actions = [
a for a in legal_actions if self._q_values[info_state][a] == greedy_q
]
probs[greedy_actions] += 1 / len(greedy_actions)
action = np.random.choice(range(self._num_actions), p=probs)
return action, probs
def _get_action_probs(self, info_state, legal_actions, epsilon):
"""Returns a selected action and the probabilities of legal actions."""
return self._softmax(info_state, legal_actions, temperature=epsilon)
| open_spiel-master | open_spiel/python/algorithms/boltzmann_tabular_qlearner.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WoLF policy-hill climbing agent.
Based on: https://www.sciencedirect.com/science/article/pii/S0004370202001212
"""
import collections
import numpy as np
from open_spiel.python import rl_agent
from open_spiel.python import rl_tools
from open_spiel.python.algorithms.projected_replicator_dynamics import _simplex_projection
def valuedict():
return collections.defaultdict(float)
class WoLFSchedule(rl_tools.ValueSchedule):
"""Schedule rules described in the WoLF paper.
at step t the step size is (t0 / (t + t1))
"""
def __init__(self, t0, t1):
super(WoLFSchedule, self).__init__()
self._t0 = t0
self._t1 = t1
self._step_taken = 0
def step(self):
value = (self._t0 / (self._step_taken + self._t1))
self._step_taken += 1
return value
@property
def value(self):
return self._t0 / (self._step_taken + self._t1)
class WoLFPHC(rl_agent.AbstractAgent):
"""WoLF policy-hill climbing agent agent.
Based on win or learn fast principle.
Based on:
https://www.sciencedirect.com/science/article/pii/S0004370202001212
"""
def __init__(self,
player_id,
num_actions,
step_size=WoLFSchedule(10000, 1000000),
epsilon_schedule=rl_tools.ConstantSchedule(0.2),
delta_w=WoLFSchedule(1, 20000),
delta_l=WoLFSchedule(2, 20000),
discount_factor=1.0):
"""Initialize the WoLF-PHC agent."""
self._player_id = player_id
self._num_actions = num_actions
self._step_size = step_size
self._epsilon_schedule = epsilon_schedule
self._epsilon = epsilon_schedule.value
self._discount_factor = discount_factor
self._delta_w = delta_w
self._delta_l = delta_l
self._cur_policy = collections.defaultdict(valuedict)
self._avg_policy = collections.defaultdict(valuedict)
self._q_values = collections.defaultdict(valuedict)
self._state_counters = valuedict()
self._prev_info_state = None
self._last_loss_value = None
self._cur_delta_value = self._delta_l.value
def _hill_climbing(self, info_state, legal_actions):
"""Does the hill-climbing update.
Args:
info_state: hashable representation of the information state.
legal_actions: list of actions at `info_state`.
"""
greedy_q = max(
[self._q_values[info_state][action] for action in legal_actions])
greedy_actions = [
action for action in legal_actions
if self._q_values[info_state][action] == greedy_q
]
if len(greedy_actions) == len(legal_actions):
return
deltas = { # pylint: disable=g-complex-comprehension
action:
min(self._cur_policy[info_state][action],
self._cur_delta_value / (len(legal_actions) - len(greedy_actions)))
for action in legal_actions
}
delta_greedy = sum([
deltas[action]
for action in legal_actions
if action not in greedy_actions
]) / len(greedy_actions)
deltas = {
action:
-deltas[action] if action not in greedy_actions else delta_greedy
for action in legal_actions
}
new_policy = np.array([
self._cur_policy[info_state][action] + deltas[action]
for action in legal_actions
])
new_policy = _simplex_projection(new_policy)
for i in range(len(legal_actions)):
self._cur_policy[info_state][legal_actions[i]] = new_policy[i]
def _get_action_probs(self, info_state, legal_actions, epsilon):
"""Returns a selected action and the probabilities of legal actions.
To be overwritten by subclasses that implement other action selection
methods.
Args:
info_state: hashable representation of the information state.
legal_actions: list of actions at `info_state`.
epsilon: float: current value of the epsilon schedule or 0 in case
evaluation. QLearner uses it as the exploration parameter in
epsilon-greedy, but subclasses are free to interpret in different ways
(e.g. as temperature in softmax).
"""
if info_state not in self._cur_policy:
for action in legal_actions:
self._cur_policy[info_state][action] = 1. / len(legal_actions)
self._avg_policy[info_state][action] = 1. / len(legal_actions)
probs = np.zeros(self._num_actions)
for action in legal_actions:
probs[action] = ((1-epsilon) * self._cur_policy[info_state][action] +
epsilon * 1.0 / len(legal_actions))
action = np.random.choice(range(self._num_actions), p=probs)
return action, probs
def step(self, time_step, is_evaluation=False):
"""Returns the action to be taken and updates the Q-values if needed.
Args:
time_step: an instance of rl_environment.TimeStep.
is_evaluation: bool, whether this is a training or evaluation call.
Returns:
A `rl_agent.StepOutput` containing the action probs and chosen action.
"""
info_state = str(time_step.observations["info_state"][self._player_id])
legal_actions = time_step.observations["legal_actions"][self._player_id]
# Prevent undefined errors if this agent never plays until terminal step
action, probs = None, None
# Act step: don't act at terminal states.
if not time_step.last():
epsilon = 0.0 if is_evaluation else self._epsilon
action, probs = self._get_action_probs(info_state, legal_actions, epsilon)
# Learn step: don't learn during evaluation or at first agent steps.
if self._prev_info_state and not is_evaluation:
target = time_step.rewards[self._player_id]
if not time_step.last(): # Q values are zero for terminal.
target += self._discount_factor * max(
[self._q_values[info_state][a] for a in legal_actions])
prev_q_value = self._q_values[self._prev_info_state][self._prev_action]
self._last_loss_value = target - prev_q_value
self._q_values[self._prev_info_state][self._prev_action] += (
self._step_size.value * self._last_loss_value)
self._state_counters[info_state] += 1
for action_ in legal_actions:
self._avg_policy[info_state][action_] = (
self._avg_policy[info_state][action_] +
1 / self._state_counters[info_state] * (
self._cur_policy[info_state][action_] -
self._avg_policy[info_state][action_]))
assert self._delta_l.value > self._delta_w.value
cur_policy_value = sum([
self._cur_policy[info_state][action] *
self._q_values[info_state][action] for action in legal_actions
])
avg_policy_value = sum([
self._avg_policy[info_state][action] *
self._q_values[info_state][action] for action in legal_actions
])
if cur_policy_value > avg_policy_value:
self._cur_delta_value = self._delta_w.value
else:
self._cur_delta_value = self._delta_l.value
if not time_step.last():
self._hill_climbing(info_state, legal_actions)
# Decay epsilon, if necessary.
self._epsilon = self._epsilon_schedule.step()
self._delta_l.step()
self._delta_w.step()
self._step_size.step()
else: # prepare for the next episode.
self._prev_info_state = None
return
# Don't mess up with the state during evaluation.
if not is_evaluation:
self._prev_info_state = info_state
self._prev_action = action
return rl_agent.StepOutput(action=action, probs=probs)
@property
def loss(self):
return self._last_loss_value
| open_spiel-master | open_spiel/python/algorithms/wolf_phc.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.eva."""
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import eva
# Temporarily disable TF2 behavior until we update the code.
tf.disable_v2_behavior()
class EVATest(parameterized.TestCase):
@parameterized.parameters("tic_tac_toe", "kuhn_poker", "liars_dice")
def test_run_games(self, game):
env = rl_environment.Environment(game)
num_players = env.num_players
eva_agents = []
num_actions = env.action_spec()["num_actions"]
state_size = env.observation_spec()["info_state"][0]
with tf.Session() as sess:
for player in range(num_players):
eva_agents.append(
eva.EVAAgent(
sess,
env,
player,
state_size,
num_actions,
embedding_network_layers=(64, 32),
embedding_size=12,
learning_rate=1e-4,
mixing_parameter=0.5,
memory_capacity=int(1e6),
discount_factor=1.0,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_duration=int(1e6)))
sess.run(tf.global_variables_initializer())
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
current_agent = eva_agents[current_player]
# 1. Step the agent.
# 2. Step the Environment.
agent_output = current_agent.step(time_step)
time_step = env.step([agent_output.action])
for agent in eva_agents:
agent.step(time_step)
class QueryableFixedSizeRingBufferTest(tf.test.TestCase):
def test_replay_buffer_add(self):
replay_buffer = eva.QueryableFixedSizeRingBuffer(replay_buffer_capacity=10)
self.assertEmpty(replay_buffer)
replay_buffer.add("entry1")
self.assertLen(replay_buffer, 1)
replay_buffer.add("entry2")
self.assertLen(replay_buffer, 2)
self.assertIn("entry1", replay_buffer)
self.assertIn("entry2", replay_buffer)
def test_replay_buffer_max_capacity(self):
replay_buffer = eva.QueryableFixedSizeRingBuffer(replay_buffer_capacity=2)
replay_buffer.add("entry1")
replay_buffer.add("entry2")
replay_buffer.add("entry3")
self.assertLen(replay_buffer, 2)
self.assertIn("entry2", replay_buffer)
self.assertIn("entry3", replay_buffer)
def test_replay_buffer_sample(self):
replay_buffer = eva.QueryableFixedSizeRingBuffer(replay_buffer_capacity=3)
replay_buffer.add("entry1")
replay_buffer.add("entry2")
replay_buffer.add("entry3")
samples = replay_buffer.sample(3)
self.assertIn("entry1", samples)
self.assertIn("entry2", samples)
self.assertIn("entry3", samples)
# TODO(author6) Test knn query.
if __name__ == "__main__":
tf.test.main()
| open_spiel-master | open_spiel/python/algorithms/eva_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.tabular_multiagent_qlearner."""
from absl.testing import absltest
import numpy as np
from open_spiel.python import rl_environment
from open_spiel.python.algorithms.tabular_multiagent_qlearner import CorrelatedEqSolver
from open_spiel.python.algorithms.tabular_multiagent_qlearner import MultiagentQLearner
from open_spiel.python.algorithms.tabular_multiagent_qlearner import StackelbergEqSolver
from open_spiel.python.algorithms.tabular_multiagent_qlearner import TwoPlayerNashSolver
from open_spiel.python.algorithms.tabular_qlearner import QLearner
from open_spiel.python.egt.utils import game_payoffs_array
import pyspiel
SEED = 18763511
class MultiagentQTest(absltest.TestCase):
def test_simple_pathfinding_run(self):
env = rl_environment.Environment(
"pathfinding", grid="B.A\n...\na.b", players=2, step_reward=-1.)
with self.subTest("nash_q"):
qlearner = QLearner(0, env.game.num_distinct_actions())
nashqlearner = MultiagentQLearner(1, 2,
[env.game.num_distinct_actions()] * 2,
TwoPlayerNashSolver())
time_step = env.reset()
actions = [None, None]
step_cnt = 0
while not time_step.last():
actions = [
qlearner.step(time_step).action,
nashqlearner.step(time_step, actions).action
]
time_step = env.step(actions)
step_cnt += 1
self.assertLess(step_cnt, 500)
with self.subTest("ce_q"):
qlearner = QLearner(0, env.game.num_distinct_actions())
ceqlearner = MultiagentQLearner(1, 2,
[env.game.num_distinct_actions()] * 2,
CorrelatedEqSolver(is_cce=False))
time_step = env.reset()
actions = [None, None]
step_cnt = 0
while not time_step.last():
actions = [
qlearner.step(time_step).action,
ceqlearner.step(time_step, actions).action
]
time_step = env.step(actions)
step_cnt += 1
self.assertLess(step_cnt, 500)
with self.subTest("cce_q"):
qlearner = QLearner(0, env.game.num_distinct_actions())
cceqlearner = MultiagentQLearner(1, 2,
[env.game.num_distinct_actions()] * 2,
CorrelatedEqSolver(is_cce=True))
time_step = env.reset()
actions = [None, None]
step_cnt = 0
while not time_step.last():
actions = [
qlearner.step(time_step).action,
cceqlearner.step(time_step, actions).action
]
time_step = env.step(actions)
step_cnt += 1
self.assertLess(step_cnt, 500)
with self.subTest("asym_q"):
qlearner = QLearner(0, env.game.num_distinct_actions())
asymqlearner = MultiagentQLearner(1, 2,
[env.game.num_distinct_actions()] * 2,
StackelbergEqSolver())
time_step = env.reset()
actions = [None, None]
step_cnt = 0
while not time_step.last():
actions = [
qlearner.step(time_step).action,
asymqlearner.step(time_step, actions).action
]
time_step = env.step(actions)
step_cnt += 1
self.assertLess(step_cnt, 500)
def test_rps_run(self):
env = rl_environment.Environment("matrix_rps")
nashqlearner0 = MultiagentQLearner(0, 2,
[env.game.num_distinct_actions()] * 2,
TwoPlayerNashSolver())
nashqlearner1 = MultiagentQLearner(1, 2,
[env.game.num_distinct_actions()] * 2,
TwoPlayerNashSolver())
for _ in range(1000):
time_step = env.reset()
actions = [None, None]
actions = [
nashqlearner0.step(time_step, actions).action,
nashqlearner1.step(time_step, actions).action
]
time_step = env.step(actions)
nashqlearner0.step(time_step, actions)
nashqlearner1.step(time_step, actions)
with self.subTest("correct_rps_strategy"):
time_step = env.reset()
actions = [None, None]
learner0_strategy, learner1_strategy = nashqlearner0.step(
time_step, actions).probs, nashqlearner1.step(time_step,
actions).probs
np.testing.assert_array_almost_equal(
np.asarray([1 / 3, 1 / 3, 1 / 3]),
learner0_strategy.reshape(-1),
decimal=4)
np.testing.assert_array_almost_equal(
np.asarray([1 / 3, 1 / 3, 1 / 3]),
learner1_strategy.reshape(-1),
decimal=4)
with self.subTest("correct_rps_value"):
time_step = env.reset()
ground_truth_values = game_payoffs_array(
pyspiel.load_matrix_game("matrix_rps"))
info_state = str(time_step.observations["info_state"])
learner0_values, learner1_values = nashqlearner0._get_payoffs_array(
info_state), nashqlearner1._get_payoffs_array(info_state)
np.testing.assert_array_almost_equal(
ground_truth_values, learner0_values, decimal=4)
np.testing.assert_array_almost_equal(
ground_truth_values, learner1_values, decimal=4)
if __name__ == "__main__":
np.random.seed(SEED)
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/tabular_multiagent_qlearner_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python implementation of the CFR-BR algorithm."""
import numpy as np
from open_spiel.python import policy
from open_spiel.python.algorithms import cfr
from open_spiel.python.algorithms import exploitability
import pyspiel
# pylint: disable=protected-access
_CFRSolverBase = cfr._CFRSolverBase
_update_current_policy = cfr._update_current_policy
_apply_regret_matching_plus_reset = cfr._apply_regret_matching_plus_reset
# pylint: enable=protected-access
class CFRBRSolver(_CFRSolverBase):
"""Implements the Counterfactual Regret Minimization (CFR-BR) algorithm.
This is Counterfactual Regret Minimization against Best Response, from
Michael Johanson and al., 2012, Finding Optimal Abstract Strategies in
Extensive-Form Games,
https://poker.cs.ualberta.ca/publications/AAAI12-cfrbr.pdf).
The algorithm
computes an approximate Nash policy for n-player zero-sum games, but the
implementation is currently restricted to 2-player.
It uses an exact Best Response and full tree traversal.
One iteration for a n-player game consists of the following:
- Compute the BR of each player against the rest of the players.
- Then, for each player p sequentially (from player 0 to N-1):
- Compute the conterfactual reach probabilities and action values for player
p, playing against the set of the BR for all other players.
- Update the player `p` policy using these values.
CFR-BR should converge with high probability (see the paper), but we can also
compute the time-averaged strategy.
The implementation reuses the `action_values_vs_best_response` module and
thus uses TabularPolicies. This will run only for smallish games.
"""
def __init__(self, game, linear_averaging=False, regret_matching_plus=False):
# pyformat: disable
"""Initializer.
Args:
game: The `pyspiel.Game` to run on.
linear_averaging: Whether to use linear averaging, i.e.
cumulative_policy[info_state][action] += (
iteration_number * reach_prob * action_prob)
or not:
cumulative_policy[info_state][action] += reach_prob * action_prob
regret_matching_plus: Whether to use Regret Matching+:
cumulative_regrets = max(cumulative_regrets + regrets, 0)
or simply regret matching:
cumulative_regrets = cumulative_regrets + regrets
"""
# pyformat: enable
if game.num_players() != 2:
raise ValueError("Game {} does not have {} players.".format(game, 2))
assert game.get_type().dynamics == pyspiel.GameType.Dynamics.SEQUENTIAL, (
"CFR requires sequential games. If you're trying to run it " +
"on a simultaneous (or normal-form) game, please first transform it " +
"using turn_based_simultaneous_game.")
super(CFRBRSolver, self).__init__(
game,
alternating_updates=True,
linear_averaging=linear_averaging,
regret_matching_plus=regret_matching_plus)
self._best_responses = {i: None for i in range(game.num_players())}
def _compute_best_responses(self):
"""Computes each player best-response against the pool of other players."""
def policy_fn(state):
key = state.information_state_string()
return self._get_infostate_policy(key)
current_policy = policy.tabular_policy_from_callable(self._game, policy_fn)
for player_id in range(self._game.num_players()):
self._best_responses[player_id] = exploitability.best_response(
self._game, current_policy, player_id)
def evaluate_and_update_policy(self):
"""Performs a single step of policy evaluation and policy improvement."""
self._iteration += 1
self._compute_best_responses()
for player in range(self._num_players):
# We do not use policies, to not have to call `state.information_state`
# several times (in here and within policy).
policies = []
for p in range(self._num_players):
# pylint: disable=g-long-lambda
policies.append(
lambda infostate_str, p=p:
{self._best_responses[p]["best_response_action"][infostate_str]: 1})
# pylint: enable=g-long-lambda
policies[player] = self._get_infostate_policy
self._compute_counterfactual_regret_for_player(
state=self._root_node,
policies=policies,
reach_probabilities=np.ones(self._num_players + 1),
player=player)
if self._regret_matching_plus:
_apply_regret_matching_plus_reset(self._info_state_nodes)
_update_current_policy(self._current_policy, self._info_state_nodes)
| open_spiel-master | open_spiel/python/algorithms/cfr_br.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example algorithm to sample some states from a game."""
import random
import pyspiel
def sample_some_states(
game,
max_states=100,
make_distribution_fn=lambda states: [1 / len(states)] * len(states)):
"""Samples some states in the game.
This can be run for large games, in contrast to `get_all_states`. It is useful
for tests that need to check a predicate only on a subset of the game, since
generating the whole game is infeasible.
Currently only works for sequential games. For simultaneous games and mean
field games it returns only the initial state.
The algorithm maintains a list of states and repeatedly picks a random state
from the list to expand until enough states have been sampled.
Arguments:
game: The game to analyze, as returned by `load_game`.
max_states: The maximum number of states to return. Negative means no limit.
make_distribution_fn: Function that takes a list of states and returns a
corresponding distribution (as a list of floats). Only used for mean field
games.
Returns:
A `list` of `pyspiel.State`.
"""
if game.get_type().dynamics in [
pyspiel.GameType.Dynamics.SIMULTANEOUS,
pyspiel.GameType.Dynamics.MEAN_FIELD
]:
return [game.new_initial_state()]
states = []
unexplored_actions = []
indexes_with_unexplored_actions = set()
def add_state(state):
states.append(state)
if state.is_terminal():
unexplored_actions.append(None)
else:
indexes_with_unexplored_actions.add(len(states) - 1)
unexplored_actions.append(set(state.legal_actions()))
def expand_random_state():
index = random.choice(list(indexes_with_unexplored_actions))
state = states[index]
if state.is_mean_field_node():
child = state.clone()
child.update_distribution(
make_distribution_fn(child.distribution_support()))
indexes_with_unexplored_actions.remove(index)
return child
else:
actions = unexplored_actions[index]
assert actions, f"Empty actions for state {state}"
action = random.choice(list(actions))
actions.remove(action)
if not actions:
indexes_with_unexplored_actions.remove(index)
return state.child(action)
add_state(game.new_initial_state())
while (len(states) < max_states) and indexes_with_unexplored_actions:
add_state(expand_random_state())
if not states:
raise ValueError("get_some_states sampled 0 states!")
return states
| open_spiel-master | open_spiel/python/algorithms/sample_some_states.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.regret_matching."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import regret_matching
from open_spiel.python.egt.utils import game_payoffs_array
import pyspiel
class RegretMatchingTest(absltest.TestCase):
def test_two_players(self):
test_a = np.array([[2, 1, 0], [0, -1, -2]])
test_b = np.array([[2, 1, 0], [0, -1, -2]])
strategies = regret_matching.regret_matching(
[test_a, test_b],
initial_strategies=None,
iterations=50000,
prd_gamma=1e-8,
average_over_last_n_strategies=10)
self.assertLen(strategies, 2, "Wrong strategy length.")
self.assertGreater(strategies[0][0], 0.999,
"Regret matching failed in trivial case.")
def test_three_players(self):
test_a = np.array([[[2, 1, 0], [1, 0, -1]], [[1, 0, -1], [0, -1, -2]]])
test_b = np.array([[[2, 1, 0], [1, 0, -1]], [[1, 0, -1], [0, -1, -2]]])
test_c = np.array([[[2, 1, 0], [1, 0, -1]], [[1, 0, -1], [0, -1, -2]]])
strategies = regret_matching.regret_matching(
[test_a, test_b, test_c],
initial_strategies=None,
iterations=50000,
gamma=1e-6,
average_over_last_n_strategies=10)
self.assertLen(strategies, 3, "Wrong strategy length.")
self.assertGreater(strategies[0][0], 0.999,
"Regret matching failed in trivial case.")
def test_rps(self):
game = pyspiel.load_game("matrix_rps")
payoffs_array = game_payoffs_array(game)
strategies = regret_matching.regret_matching(
[payoffs_array[0], payoffs_array[1]],
initial_strategies=[
np.array([0.1, 0.4, 0.5]),
np.array([0.9, 0.1, 0.01])
],
iterations=50000,
gamma=1e-6)
self.assertLen(strategies, 2, "Wrong strategy length.")
# places=1 corresponds to an absolute difference of < 0.001
self.assertAlmostEqual(strategies[0][0], 1 / 3., places=2)
self.assertAlmostEqual(strategies[0][1], 1 / 3., places=2)
self.assertAlmostEqual(strategies[0][2], 1 / 3., places=2)
def test_biased_rps(self):
game = pyspiel.load_game("matrix_brps")
payoffs_array = game_payoffs_array(game)
strategies = regret_matching.regret_matching(
[payoffs_array[0], payoffs_array[1]], iterations=50000, gamma=1e-8)
self.assertLen(strategies, 2, "Wrong strategy length.")
# places=1 corresponds to an absolute difference of < 0.01
self.assertAlmostEqual(strategies[0][0], 1 / 16., places=1)
self.assertAlmostEqual(strategies[0][1], 10 / 16., places=1)
self.assertAlmostEqual(strategies[0][2], 5 / 16., places=1)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/regret_matching_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.cfr."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python import policy
from open_spiel.python.algorithms import cfr
from open_spiel.python.algorithms import expected_game_score
from open_spiel.python.algorithms import exploitability
import pyspiel
_KUHN_GAME = pyspiel.load_game("kuhn_poker")
_LEDUC_GAME = pyspiel.load_game("leduc_poker")
_KUHN_UNIFORM_POLICY = policy.TabularPolicy(_KUHN_GAME)
_LEDUC_UNIFORM_POLICY = policy.TabularPolicy(_LEDUC_GAME)
class ModuleLevelFunctionTest(absltest.TestCase):
def test__update_current_policy(self):
game = pyspiel.load_game("kuhn_poker")
tabular_policy = policy.TabularPolicy(game)
cumulative_regrets = np.arange(0, 12 * 2).reshape((12, 2))
expected_policy = cumulative_regrets / np.sum(
cumulative_regrets, axis=-1, keepdims=True)
nodes_indices = {
u"0": 0,
u"0pb": 1,
u"1": 2,
u"1pb": 3,
u"2": 4,
u"2pb": 5,
u"1p": 6,
u"1b": 7,
u"2p": 8,
u"2b": 9,
u"0p": 10,
u"0b": 11,
}
# pylint: disable=g-complex-comprehension
info_state_nodes = {
key: cfr._InfoStateNode(
legal_actions=[0, 1],
index_in_tabular_policy=None,
cumulative_regret=dict(enumerate(cumulative_regrets[index])),
cumulative_policy=None) for key, index in nodes_indices.items()
}
# pylint: enable=g-complex-comprehension
cfr._update_current_policy(tabular_policy, info_state_nodes)
np.testing.assert_array_equal(expected_policy,
tabular_policy.action_probability_array)
class CFRTest(parameterized.TestCase, absltest.TestCase):
@parameterized.parameters(
list(itertools.product([True, False], [True, False], [True, False])))
def test_policy_zero_is_uniform(self, linear_averaging, regret_matching_plus,
alternating_updates):
# We use Leduc and not Kuhn, because Leduc has illegal actions and Kuhn does
# not.
game = pyspiel.load_game("leduc_poker")
cfr_solver = cfr._CFRSolver(
game,
regret_matching_plus=regret_matching_plus,
linear_averaging=linear_averaging,
alternating_updates=alternating_updates)
np.testing.assert_array_equal(
_LEDUC_UNIFORM_POLICY.action_probability_array,
cfr_solver.current_policy().action_probability_array)
np.testing.assert_array_equal(
_LEDUC_UNIFORM_POLICY.action_probability_array,
cfr_solver.average_policy().action_probability_array)
def test_cfr_kuhn_poker(self):
game = pyspiel.load_game("kuhn_poker")
cfr_solver = cfr.CFRSolver(game)
for _ in range(300):
cfr_solver.evaluate_and_update_policy()
average_policy = cfr_solver.average_policy()
average_policy_values = expected_game_score.policy_value(
game.new_initial_state(), [average_policy] * 2)
# 1/18 is the Nash value. See https://en.wikipedia.org/wiki/Kuhn_poker
np.testing.assert_allclose(
average_policy_values, [-1 / 18, 1 / 18], atol=1e-3)
def test_cfr_plus_kuhn_poker(self):
game = pyspiel.load_game("kuhn_poker")
cfr_solver = cfr.CFRPlusSolver(game)
for _ in range(200):
cfr_solver.evaluate_and_update_policy()
average_policy = cfr_solver.average_policy()
average_policy_values = expected_game_score.policy_value(
game.new_initial_state(), [average_policy] * 2)
# 1/18 is the Nash value. See https://en.wikipedia.org/wiki/Kuhn_poker
np.testing.assert_allclose(
average_policy_values, [-1 / 18, 1 / 18], atol=1e-3)
def test_cfr_plus_solver_best_response_mdp(self):
game = pyspiel.load_game("kuhn_poker")
cfr_solver = cfr.CFRPlusSolver(game)
for _ in range(200):
cfr_solver.evaluate_and_update_policy()
average_policy = cfr_solver.average_policy()
pyspiel_avg_policy = policy.python_policy_to_pyspiel_policy(average_policy)
br_computer = pyspiel.TabularBestResponseMDP(game, pyspiel_avg_policy)
br_info = br_computer.exploitability()
self.assertLessEqual(br_info.exploitability, 0.001)
def test_cfr_cce_ce_dist_goofspiel(self):
"""Copy of the TestCCEDistCFRGoofSpiel in corr_dist_test.cc."""
game = pyspiel.load_game(
"turn_based_simultaneous_game(game=goofspiel(num_cards=3,points_order="
"descending,returns_type=total_points))")
for num_iterations in [1, 10, 100]:
policies = []
cfr_solver = cfr.CFRSolver(game)
for _ in range(num_iterations):
cfr_solver.evaluate_and_update_policy()
policies.append(
policy.python_policy_to_pyspiel_policy(cfr_solver.current_policy()))
mu = pyspiel.uniform_correlation_device(policies)
cce_dist_info = pyspiel.cce_dist(game, mu)
print("goofspiel, cce test num_iters: {}, cce_dist: {}, per player: {}"
.format(num_iterations, cce_dist_info.dist_value,
cce_dist_info.deviation_incentives))
# Try converting one of the BR policies:
_ = policy.pyspiel_policy_to_python_policy(
game, cce_dist_info.best_response_policies[0])
# Assemble the same correlation device manually, just as an example for
# how to do non-uniform distributions of them and to test the python
# bindings for lists of tuples works properly
uniform_prob = 1.0 / len(policies)
mu2 = [(uniform_prob, policy) for policy in policies]
cce_dist_info2 = pyspiel.cce_dist(game, mu2)
self.assertAlmostEqual(cce_dist_info2.dist_value,
sum(cce_dist_info.deviation_incentives))
# Test the CEDist function too, why not. Disable the exact one, as it
# takes too long for a test.
# ce_dist_info = pyspiel.ce_dist(game, pyspiel.determinize_corr_dev(mu))
ce_dist_info = pyspiel.ce_dist(
game, pyspiel.sampled_determinize_corr_dev(mu, 100))
print("goofspiel, ce test num_iters: {}, ce_dist: {}, per player: {}"
.format(num_iterations, ce_dist_info.dist_value,
ce_dist_info.deviation_incentives))
print("number of conditional best responses per player:")
for p in range(game.num_players()):
print(" player {}, num: {}".format(
p, len(ce_dist_info.conditional_best_response_policies[p])))
@parameterized.parameters(
list(itertools.product([True, False], [True, False], [True, False])))
def test_cfr_kuhn_poker_runs_with_multiple_players(self, linear_averaging,
regret_matching_plus,
alternating_updates):
num_players = 3
game = pyspiel.load_game("kuhn_poker", {"players": num_players})
cfr_solver = cfr._CFRSolver(
game,
regret_matching_plus=regret_matching_plus,
linear_averaging=linear_averaging,
alternating_updates=alternating_updates)
for _ in range(10):
cfr_solver.evaluate_and_update_policy()
average_policy = cfr_solver.average_policy()
average_policy_values = expected_game_score.policy_value(
game.new_initial_state(), [average_policy] * num_players)
del average_policy_values
@parameterized.parameters(list(itertools.product([False, True])))
def test_simultaneous_two_step_avg_1b_seq_in_kuhn_poker(
self, regret_matching_plus):
num_players = 2
game = pyspiel.load_game("kuhn_poker", {"players": num_players})
cfr_solver = cfr._CFRSolver(
game,
regret_matching_plus=regret_matching_plus,
linear_averaging=False,
alternating_updates=False)
def check_avg_policy_is_uniform_random():
avg_policy = cfr_solver.average_policy()
for player_info_states in avg_policy.states_per_player:
for info_state in player_info_states:
state_policy = avg_policy.policy_for_key(info_state)
np.testing.assert_allclose(state_policy, [1.0 / len(state_policy)] *
len(state_policy))
check_avg_policy_is_uniform_random()
cfr_solver.evaluate_and_update_policy()
check_avg_policy_is_uniform_random()
cfr_solver.evaluate_and_update_policy()
# The acting player in 1b is player 1 and they have not acted before, so
# the probability this player plays to this information state is 1, and
# the sequence probability of any action is just the probability of that
# action given the information state. On the first iteration, this
# probability is 0.5 for both actions. On the second iteration, the
# current policy is [0, 1], so the average cumulants should be
# [0.5, 1.5]. Normalizing this gives the average policy.
normalization = 0.5 + 0.5 + 1
np.testing.assert_allclose(cfr_solver.average_policy().policy_for_key("1b"),
[0.5 / normalization, (0.5 + 1) / normalization])
def test_policy(self):
game = pyspiel.load_game("kuhn_poker")
solver = cfr.CFRPlusSolver(game)
tabular_policy = solver.current_policy()
self.assertLen(tabular_policy.state_lookup, 12)
for info_state_str in tabular_policy.state_lookup.keys():
np.testing.assert_equal(
np.asarray([0.5, 0.5]), tabular_policy.policy_for_key(info_state_str))
@parameterized.parameters([
(pyspiel.load_game("kuhn_poker"), pyspiel.CFRSolver, cfr.CFRSolver),
(pyspiel.load_game("leduc_poker"), pyspiel.CFRSolver, cfr.CFRSolver),
(pyspiel.load_game("kuhn_poker"), pyspiel.CFRPlusSolver,
cfr.CFRPlusSolver),
(pyspiel.load_game("leduc_poker"), pyspiel.CFRPlusSolver,
cfr.CFRPlusSolver),
])
def test_cpp_algorithms_identical_to_python_algorithm(self, game, cpp_class,
python_class):
cpp_solver = cpp_class(game)
python_solver = python_class(game)
for _ in range(5):
cpp_solver.evaluate_and_update_policy()
python_solver.evaluate_and_update_policy()
cpp_avg_policy = cpp_solver.average_policy()
python_avg_policy = python_solver.average_policy()
# We do not compare the policy directly as we do not have an easy way to
# convert one to the other, so we use the exploitability as a proxy.
cpp_expl = pyspiel.nash_conv(game, cpp_avg_policy)
python_expl = exploitability.nash_conv(game, python_avg_policy)
self.assertEqual(cpp_expl, python_expl)
# Then we also check the CurrentPolicy, just to check it is giving the same
# results too
cpp_current_policy = cpp_solver.current_policy()
python_current_policy = python_solver.current_policy()
cpp_expl = pyspiel.nash_conv(game, cpp_current_policy)
python_expl = exploitability.nash_conv(game, python_current_policy)
self.assertEqual(cpp_expl, python_expl)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/cfr_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.projected_replicator_dynamics."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import projected_replicator_dynamics
class ProjectedReplicatorDynamicsTest(absltest.TestCase):
def test_two_players(self):
test_a = np.array([[2, 1, 0], [0, -1, -2]])
test_b = np.array([[2, 1, 0], [0, -1, -2]])
strategies = projected_replicator_dynamics.projected_replicator_dynamics(
[test_a, test_b],
prd_initial_strategies=None,
prd_iterations=50000,
prd_dt=1e-3,
prd_gamma=1e-8,
average_over_last_n_strategies=10)
self.assertLen(strategies, 2, "Wrong strategy length.")
self.assertGreater(strategies[0][0], 0.999,
"Projected Replicator Dynamics failed in trivial case.")
def test_three_players(self):
test_a = np.array([[[2, 1, 0], [1, 0, -1]], [[1, 0, -1], [0, -1, -2]]])
test_b = np.array([[[2, 1, 0], [1, 0, -1]], [[1, 0, -1], [0, -1, -2]]])
test_c = np.array([[[2, 1, 0], [1, 0, -1]], [[1, 0, -1], [0, -1, -2]]])
strategies = projected_replicator_dynamics.projected_replicator_dynamics(
[test_a, test_b, test_c],
prd_initial_strategies=None,
prd_iterations=50000,
prd_dt=1e-3,
prd_gamma=1e-6,
average_over_last_n_strategies=10)
self.assertLen(strategies, 3, "Wrong strategy length.")
self.assertGreater(strategies[0][0], 0.999,
"Projected Replicator Dynamics failed in trivial case.")
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/projected_replicator_dynamics_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.minimax."""
from absl.testing import absltest
from open_spiel.python.algorithms import minimax
import pyspiel
class MinimaxTest(absltest.TestCase):
def test_compute_game_value(self):
tic_tac_toe = pyspiel.load_game("tic_tac_toe")
game_score, _ = minimax.alpha_beta_search(tic_tac_toe)
self.assertEqual(0., game_score)
def test_compute_game_value_with_evaluation_function(self):
# We only check it runs
tic_tac_toe = pyspiel.load_game("tic_tac_toe")
game_score, _ = minimax.alpha_beta_search(
tic_tac_toe, value_function=lambda x: 0, maximum_depth=1)
self.assertEqual(0., game_score)
def test_win(self):
tic_tac_toe = pyspiel.load_game("tic_tac_toe")
state = tic_tac_toe.new_initial_state()
# Construct:
# .o.
# .x.
# ...
state.apply_action(4)
state.apply_action(1)
game_score, _ = minimax.alpha_beta_search(tic_tac_toe, state=state)
self.assertEqual(1., game_score)
def test_loss(self):
tic_tac_toe = pyspiel.load_game("tic_tac_toe")
state = tic_tac_toe.new_initial_state()
# Construct:
# ...
# xox
# ..o
state.apply_action(5)
state.apply_action(4)
state.apply_action(3)
state.apply_action(8)
game_score, _ = minimax.alpha_beta_search(tic_tac_toe, state=state)
self.assertEqual(-1., game_score)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/minimax_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.get_all_states."""
from absl.testing import absltest
from open_spiel.python.algorithms import get_all_states
import pyspiel
class GetAllStatesTest(absltest.TestCase):
def test_tic_tac_toe_number_histories(self):
game = pyspiel.load_game("tic_tac_toe")
states = get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=True,
include_chance_states=False,
to_string=lambda s: s.history_str())
self.assertLen(states, 549946)
states = get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=True,
include_chance_states=False,
to_string=str)
self.assertLen(states, 5478)
def test_simultaneous_python_game_get_all_state(self):
game = pyspiel.load_game(
"python_iterated_prisoners_dilemma(max_game_length=6)")
states = get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=True,
include_chance_states=False,
to_string=lambda s: s.history_str())
self.assertLen(states, 10921)
states = get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=True,
include_chance_states=False,
to_string=str)
self.assertLen(states, 5461)
def test_simultaneous_game_get_all_state(self):
game = game = pyspiel.load_game("goofspiel", {"num_cards": 3})
states = get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=True,
include_chance_states=False,
to_string=lambda s: s.history_str())
self.assertLen(states, 273)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/get_all_states_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.best_response."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python import games # pylint:disable=unused-import
from open_spiel.python import policy
from open_spiel.python.algorithms import best_response
from open_spiel.python.algorithms import expected_game_score
from open_spiel.python.algorithms import get_all_states
import pyspiel
class BestResponseTest(parameterized.TestCase, absltest.TestCase):
def test_best_response_is_a_policy(self):
game = pyspiel.load_game("kuhn_poker")
test_policy = policy.UniformRandomPolicy(game)
br = best_response.BestResponsePolicy(game, policy=test_policy, player_id=0)
expected_policy = {
"0": 1, # Bet in case opponent folds when winning
"1": 1, # Bet in case opponent folds when winning
"2": 0, # Both equally good (we return the lowest action)
# Some of these will never happen under the best-response policy,
# but we have computed best-response actions anyway.
"0pb": 0, # Fold - we're losing
"1pb": 1, # Call - we're 50-50
"2pb": 1, # Call - we've won
}
self.assertEqual(
expected_policy,
{key: br.best_response_action(key) for key in expected_policy.keys()})
@parameterized.parameters(["kuhn_poker", "leduc_poker"])
def test_cpp_and_python_implementations_are_identical(self, game_name):
game = pyspiel.load_game(game_name)
python_policy = policy.UniformRandomPolicy(game)
pyspiel_policy = pyspiel.UniformRandomPolicy(game)
all_states = get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False,
to_string=lambda s: s.information_state_string())
for current_player in range(game.num_players()):
python_br = best_response.BestResponsePolicy(game, current_player,
python_policy)
cpp_br = pyspiel.TabularBestResponse(
game, current_player, pyspiel_policy).get_best_response_policy()
for state in all_states.values():
if state.current_player() != current_player:
continue
self.assertEqual(
python_br.action_probabilities(state), {
a: prob
for a, prob in cpp_br.action_probabilities(state).items()
if prob != 0
})
@parameterized.parameters(("kuhn_poker", 2))
def test_cpp_and_python_best_response_are_identical(self, game_name,
num_players):
game = pyspiel.load_game(game_name, {"players": num_players})
test_policy = policy.TabularPolicy(game)
for i_player in range(num_players):
best_resp_py_backend = best_response.BestResponsePolicy(
game, i_player, test_policy)
best_resp_cpp_backend = best_response.CPPBestResponsePolicy(
game, i_player, test_policy)
for state in best_resp_cpp_backend.all_states.values():
if i_player == state.current_player():
py_dict = best_resp_py_backend.action_probabilities(state)
cpp_dict = best_resp_cpp_backend.action_probabilities(state)
# We do check like this, because the actions associated to a 0. prob
# do not necessarily appear
for key, value in py_dict.items():
self.assertEqual(value, cpp_dict.get(key, 0.))
for key, value in cpp_dict.items():
self.assertEqual(value, py_dict.get(key, 0.))
@parameterized.parameters(("kuhn_poker", 2), ("kuhn_poker", 3))
def test_cpp_and_python_value_are_identical(self, game_name, num_players):
game = pyspiel.load_game(game_name, {"players": num_players})
test_policy = policy.TabularPolicy(game)
root_state = game.new_initial_state()
for i_player in range(num_players):
best_resp_py_backend = best_response.BestResponsePolicy(
game, i_player, test_policy)
best_resp_cpp_backend = best_response.CPPBestResponsePolicy(
game, i_player, test_policy)
value_py_backend = best_resp_py_backend.value(root_state)
value_cpp_backend = best_resp_cpp_backend.value(root_state)
self.assertTrue(np.allclose(value_py_backend, value_cpp_backend))
def test_best_response_tic_tac_toe_value_is_consistent(self):
# This test was failing because of use of str(state) in the best response,
# which is imperfect recall. We now use state.history_str() throughout.
# Chose a policy at random; not the uniform random policy.
game = pyspiel.load_game("tic_tac_toe")
pi = policy.TabularPolicy(game)
rng = np.random.RandomState(1234)
pi.action_probability_array[:] = rng.rand(*pi.legal_actions_mask.shape)
pi.action_probability_array *= pi.legal_actions_mask
pi.action_probability_array /= np.sum(
pi.action_probability_array, axis=1, keepdims=True)
# Compute a best response and verify the best response value is consistent.
br = best_response.BestResponsePolicy(game, 1, pi)
self.assertAlmostEqual(
expected_game_score.policy_value(game.new_initial_state(), [pi, br])[1],
br.value(game.new_initial_state()))
def test_best_response_oshi_zumo_simultaneous_game(self):
"""Test best response computation for simultaneous game."""
game = pyspiel.load_game("oshi_zumo(horizon=5,coins=5)")
test_policy = policy.UniformRandomPolicy(game)
br = best_response.BestResponsePolicy(game, policy=test_policy, player_id=0)
expected_policy = {
"0, 0, 0, 3, 0, 2": 1,
"0, 0, 1, 4, 3, 1": 0,
"0, 0, 4, 1, 0, 2, 0, 2": 1,
"0, 1, 1, 0, 1, 4": 1,
"0, 1, 4, 1, 0, 0, 0, 1": 1,
"0, 2, 2, 2, 3, 0, 0, 0": 0,
"0, 5, 0, 0, 0, 0, 3, 0": 1
}
self.assertEqual(
expected_policy,
{key: br.best_response_action(key) for key in expected_policy})
self.assertAlmostEqual(br.value(game.new_initial_state()), 0.856471051954)
def test_best_response_prisoner_dilemma_simultaneous_game(self):
"""Test best response computation for simultaneous game."""
game = pyspiel.load_game(
"python_iterated_prisoners_dilemma(max_game_length=5)")
test_policy = policy.UniformRandomPolicy(game)
br = best_response.BestResponsePolicy(game, policy=test_policy, player_id=0)
# Best policy is always to defect; we verify this for a handful of states
self.assertEqual(br.best_response_action("us:CCCC op:CCCC"), 1)
self.assertEqual(br.best_response_action("us:DDDD op:CCCC"), 1)
self.assertEqual(br.best_response_action("us:CDCD op:DCDC"), 1)
self.assertEqual(br.best_response_action("us:CCCC op:DDDD"), 1)
# Expected value per turn = 5.5 (avg of 1 and 10)
# Expected game length = sum(0.875**i for i in range(5)) = 3.896728515625
# Game value = 5.5 * 3.896728515625 = 21.4320068359375
self.assertAlmostEqual(br.value(game.new_initial_state()), 21.4320068359375)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/best_response_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tabular Multiagent Q-learning agent.
Currently implementations include:
Nash-Q: https://www.jmlr.org/papers/volume4/hu03a/hu03a.pdf
Correlated-Q: https://www.aaai.org/Papers/ICML/2003/ICML03-034.pdf, where both
CE-Q and CCE-Q are supported.
Asymmetric-Q: https://ieeexplore.ieee.org/document/1241094
"""
import abc
import collections
import itertools
import nashpy as nash
import numpy as np
from open_spiel.python import rl_agent
from open_spiel.python import rl_tools
from open_spiel.python.algorithms.jpsro import _mgcce
from open_spiel.python.algorithms.stackelberg_lp import solve_stackelberg
import pyspiel
def valuedict():
return collections.defaultdict(float)
class JointActionSolver:
@abc.abstractmethod
def __call__(self, payoffs_array):
"""Find a joint action mixture and values for the current one-step game.
Args:
payoffs_array: a `numpy.ndarray` of utilities of a game.
Returns:
res_mixtures: a list of mixed strategies for each agent
res_values: a list of expected utilities for each agent
"""
class TwoPlayerNashSolver(JointActionSolver):
"""A joint action solver solving for Nash for two-player games.
Uses python.algorithms.matrix_nash.lemke_howson_solve
"""
def __call__(self, payoffs_array):
assert len(payoffs_array) == 2
row_payoffs, col_payoffs = payoffs_array[0], payoffs_array[1]
a0, a1 = payoffs_array.shape[1:]
nashpy_game = nash.Game(row_payoffs, col_payoffs)
best_value = float("-inf")
res_mixtures, res_values = None, None
for (row_mixture, col_mixture) in nashpy_game.support_enumeration():
# TO-DO: handle the case where the LH solver gave ineligible answer
if np.sum(np.isnan(row_mixture)) or np.sum(np.isnan(col_mixture)):
continue
row_mixture_, col_mixture_ = row_mixture.reshape(
(-1, 1)), col_mixture.reshape((-1, 1))
row_value, col_value = (
row_mixture_.T.dot(row_payoffs).dot(col_mixture_)).item(), (
row_mixture_.T.dot(col_payoffs).dot(col_mixture_)).item()
# Currently using maximizing social welfare for equilibrium selection
if row_value + col_value > best_value:
best_value = row_value + col_value
res_mixtures = [row_mixture, col_mixture]
res_values = [row_value, col_value]
# If no plauisble nash found, use uniform mixed strategies
if not res_mixtures:
res_mixtures = [np.ones(a0) / a0, np.ones(a1) / a1]
row_mixture_, col_mixture_ = res_mixtures[0].reshape(
(-1, 1)), res_mixtures[1].reshape((-1, 1))
res_values = [(row_mixture_.T.dot(row_payoffs).dot(col_mixture_)).item(),
(row_mixture_.T.dot(col_payoffs).dot(col_mixture_)).item()]
return res_mixtures, res_values
class CorrelatedEqSolver(JointActionSolver):
"""A joint action solver solving for correlated equilibrium.
Uses python.algorithms.jspro._mgce and _mgcce for solving (coarse) correlated
equilibrium.
"""
def __init__(self, is_cce=False):
self._is_cce = is_cce
def __call__(self, payoffs_array):
num_players = len(payoffs_array)
assert num_players > 0
num_strategies_per_player = payoffs_array.shape[1:]
mixture, _ = (
_mgcce( # pylint: disable=g-long-ternary
payoffs_array,
[np.ones([ns], dtype=np.int32) for ns in num_strategies_per_player],
ignore_repeats=True)
if self._is_cce else _mgcce(
payoffs_array,
[np.ones([ns], dtype=np.int32) for ns in num_strategies_per_player],
ignore_repeats=True))
mixtures, values = [], []
for n in range(num_players):
values.append(np.sum(payoffs_array[n] * mixture))
mixtures.append(
np.sum(
mixture,
axis=tuple([n_ for n_ in range(num_players) if n_ != n])))
return mixtures, values
class StackelbergEqSolver(JointActionSolver):
"""A joint action solver solving for Stackelverg equilibrium.
Uses python.algorithms.stackelberg_lp.py.
"""
def __init__(self, is_first_leader=True):
self._is_first_leader = is_first_leader
def __call__(self, payoffs_array):
assert len(payoffs_array) == 2
game = pyspiel.create_matrix_game(payoffs_array[0], payoffs_array[1])
try:
player0_strategy, player1_strategy, player0_value, player1_value = solve_stackelberg(
game, self._is_first_leader)
return [player0_strategy,
player1_strategy], [player0_value, player1_value]
except: # pylint: disable=bare-except
# if the game matrix is degenerated and cannot solve for an SSE,
# return uniform strategy
num_player0_strategies, num_player1_strategies = payoffs_array[0].shape
player0_strategy, player1_strategy = np.ones(
num_player0_strategies) / num_player0_strategies, np.ones(
num_player1_strategies) / num_player1_strategies
player0_value, player1_value = player0_strategy.reshape(1, -1).dot(
payoffs_array[0]).dot(player1_strategy.reshape(
-1, 1)), player0_strategy.reshape(1, -1).dot(
payoffs_array[1]).dot(player1_strategy.reshape(-1, 1))
return [player0_strategy,
player1_strategy], [player0_value, player1_value]
class MultiagentQLearner(rl_agent.AbstractAgent):
"""A multiagent joint action learner."""
def __init__(self,
player_id,
num_players,
num_actions,
joint_action_solver,
step_size=0.1,
epsilon_schedule=rl_tools.ConstantSchedule(0.2),
discount_factor=1.0):
"""Initialize the Multiagent joint-action Q-Learning agent.
The joint_action_solver solves for one-step matrix game defined by Q-tables.
Args:
player_id: the player id this agent will play as,
num_players: the number of players in the game,
num_actions: the number of distinct actions in the game,
joint_action_solver: the joint action solver class to use to solve the
one-step matrix games
step_size: learning rate for Q-learning,
epsilon_schedule: exploration parameter,
discount_factor: the discount factor as in Q-learning.
"""
self._player_id = player_id
self._num_players = num_players
self._num_actions = num_actions
self._joint_action_solver = joint_action_solver
self._step_size = step_size
self._epsilon_schedule = epsilon_schedule
self._epsilon = epsilon_schedule.value
self._discount_factor = discount_factor
self._q_values = [
collections.defaultdict(valuedict) for _ in range(num_players)
]
self._prev_info_state = None
def _get_payoffs_array(self, info_state):
payoffs_array = np.zeros((self._num_players,) + tuple(self._num_actions))
for joint_action in itertools.product(
*[range(dim) for dim in self._num_actions]):
for n in range(self._num_players):
payoffs_array[
(n,) + joint_action] = self._q_values[n][info_state][joint_action]
return payoffs_array
def _epsilon_greedy(self, info_state, legal_actions, epsilon):
"""Returns a valid epsilon-greedy action and valid action probs.
If the agent has not been to `info_state`, a valid random action is chosen.
Args:
info_state: hashable representation of the information state.
legal_actions: list of actions at `info_state`.
epsilon: float, prob of taking an exploratory action.
Returns:
A valid epsilon-greedy action and valid action probabilities.
"""
probs = np.zeros(self._num_actions[self._player_id])
state_probs, _ = self._joint_action_solver(
self._get_payoffs_array(info_state))
probs[legal_actions[self._player_id]] = (
epsilon / len(legal_actions[self._player_id]))
probs += (1 - epsilon) * state_probs[self._player_id]
action = np.random.choice(
range(self._num_actions[self._player_id]), p=probs)
return action, probs
def step(self, time_step, actions=None, is_evaluation=False):
"""Returns the action to be taken and updates the Q-values if needed.
Args:
time_step: an instance of rl_environment.TimeStep,
actions: list of actions taken by all agents from the previous step,
is_evaluation: bool, whether this is a training or evaluation call,
Returns:
A `rl_agent.StepOutput` containing the action probs and chosen action.
"""
info_state = str(time_step.observations["info_state"])
legal_actions = time_step.observations["legal_actions"]
# Prevent undefined errors if this agent never plays until terminal step
action, probs = None, None
# Act step: don't act at terminal states.
if not time_step.last():
epsilon = 0.0 if is_evaluation else self._epsilon
# select according to the joint action solver
action, probs = self._epsilon_greedy(
info_state, legal_actions, epsilon=epsilon)
# Learn step: don't learn during evaluation or at first agent steps.
actions = tuple(actions)
if self._prev_info_state and not is_evaluation:
_, next_state_values = (
self._joint_action_solver(self._get_payoffs_array(info_state)))
# update Q values for every agent
for n in range(self._num_players):
target = time_step.rewards[n]
if not time_step.last(): # Q values are zero for terminal.
target += self._discount_factor * next_state_values[n]
prev_q_value = self._q_values[n][self._prev_info_state][actions]
self._q_values[n][self._prev_info_state][actions] += (
self._step_size * (target - prev_q_value))
# Decay epsilon, if necessary.
self._epsilon = self._epsilon_schedule.step()
if time_step.last(): # prepare for the next episode.
self._prev_info_state = None
return
# Don't mess up with the state during evaluation.
if not is_evaluation:
self._prev_info_state = info_state
return rl_agent.StepOutput(action=action, probs=probs)
| open_spiel-master | open_spiel/python/algorithms/tabular_multiagent_qlearner.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Joint Policy-Space Response Oracles.
An implementation of JSPRO, described in https://arxiv.org/abs/2106.09435.
Bibtex / Cite:
```
@misc{marris2021multiagent,
title={Multi-Agent Training beyond Zero-Sum with Correlated Equilibrium
Meta-Solvers},
author={Luke Marris and Paul Muller and Marc Lanctot and Karl Tuyls and
Thore Graepel},
year={2021},
eprint={2106.09435},
archivePrefix={arXiv},
primaryClass={cs.MA}
}
```
"""
import itertools
import string
from absl import logging
import cvxpy as cp
import numpy as np
import scipy as sp
from open_spiel.python import policy
from open_spiel.python.algorithms import projected_replicator_dynamics
from open_spiel.python.egt import alpharank as alpharank_lib
import pyspiel
DEFAULT_ECOS_SOLVER_KWARGS = dict(
solver="ECOS",
max_iters=100000000,
abstol=1e-7,
reltol=1e-7,
feastol=1e-7,
abstol_inacc=1e-7,
reltol_inacc=1e-7,
feastol_inacc=1e-7,
verbose=False,
)
DEFAULT_OSQP_SOLVER_KWARGS = dict(
solver="OSQP",
max_iter=1000000000,
eps_abs=1e-8,
eps_rel=1e-8,
eps_prim_inf=1e-8,
eps_dual_inf=1e-8,
polish_refine_iter=100,
check_termination=1000,
sigma=1e-7, # Default 1e-6
delta=1e-7, # Default 1e-06
verbose=False,
)
DEFAULT_CVXOPT_SOLVER_KWARGS = dict(
solver="CVXOPT",
maxiters=200000,
abstol=5e-8,
reltol=5e-8,
feastol=5e-8,
refinement=10,
verbose=False,
)
INIT_POLICIES = (
"uniform", # Unopinionated but slower to evaluate.
"random_deterministic", # Faster to evaluate but requires samples.
)
UPDATE_PLAYERS_STRATEGY = (
"all",
"cycle",
"random",
)
BRS = (
"cce",
"ce",
)
BR_SELECTIONS = (
"all", # All policies.
"all_novel", # All novel policies.
"random", # Random.
"random_novel", # Random novel BR (one that has not be considered before).
"largest_gap", # The BR with the largest gap.
)
META_SOLVERS = (
"uni", # Uniform.
"undominated_uni", # Uniform over undominated strategies.
"rj", # Random joint.
"undominated_rj", # Random joint.
"rd", # Random dirichlet.
"undominated_rd", # Random dirichlet.
"prd", # Prodected replicator dynamics.
"alpharank", # Alpha-Rank
"mgce", # Maximum gini CE.
"min_epsilon_mgce", # Min Epsilon Maximum gini CE.
"approx_mgce", # Approximate Maximum gini CE.
"rmwce", # Random maximum welfare CE.
"mwce", # Maximum welfare CE.
"rvce", # Random vertex CE.
"mgcce", # Maximum gini CCE.
"min_epsilon_mgcce", # Min Epsilon Maximum gini CCE.
"approx_mgcce", # Approximate Maximum gini CE.
"rmwcce", # Random maximum welfare CCE.
"mwcce", # Maximum welfare CCE.
"rvcce", # Random vertex CCE.
)
LOG_STRING = """
Iteration {iteration: 6d}
=== ({game})
Player {player}
BRs {brs}
Num Policies {num_policies}
Unique Policies {unique}
--- ({train_meta_solver})
Train Value {train_value}
Train Gap {train_gap}
--- ({eval_meta_solver})
Eval Value {eval_value}
Eval Gap {eval_gap}
"""
DIST_TOL = 1e-8
GAP_TOL = 1e-8
RETURN_TOL = 1e-12
## Meta Solvers.
# Helper Functions - Dominated strategy elimination.
def _eliminate_dominated_payoff(
payoff, epsilon, action_labels=None, action_repeats=None, weakly=False):
"""Eliminate epsilon dominated strategies."""
num_players = payoff.shape[0]
eliminated = True
if action_labels is None:
action_labels = [np.arange(na, dtype=np.int32) for na in payoff.shape[1:]]
if action_repeats is not None:
action_repeats = [ar for ar in action_repeats]
while eliminated:
eliminated = False
for p in range(num_players):
if epsilon > 0.0:
continue
num_actions = payoff.shape[1:]
if num_actions[p] <= 1:
continue
for a in range(num_actions[p]):
index = [slice(None) for _ in range(num_players)]
index[p] = slice(a, a+1)
if weakly:
diff = payoff[p] <= payoff[p][tuple(index)]
else:
diff = payoff[p] < payoff[p][tuple(index)]
axis = tuple(range(p)) + tuple(range(p+1, num_players))
less = np.all(diff, axis=axis)
less[a] = False # Action cannot eliminate itself.
if np.any(less):
nonzero = np.nonzero(less)
payoff = np.delete(payoff, nonzero, axis=p+1)
action_labels[p] = np.delete(action_labels[p], nonzero)
if action_repeats is not None:
action_repeats[p] = np.delete(action_repeats[p], nonzero)
eliminated = True
break
return payoff, action_labels, action_repeats
def _reconstruct_dist(eliminated_dist, action_labels, num_actions):
"""Returns reconstructed dist from eliminated_dist and action_labels.
Redundant dist elements are given values 0.
Args:
eliminated_dist: Array of shape [A0E, A1E, ...].
action_labels: List of length N and shapes [[A0E], [A1E], ...].
num_actions: List of length N and values [A0, A1, ...].
Returns:
reconstructed_dist: Array of shape [A0, A1, ...].
"""
reconstructed_payoff = np.zeros(num_actions)
reconstructed_payoff[np.ix_(*action_labels)] = eliminated_dist
return reconstructed_payoff
def _eliminate_dominated_decorator(func):
"""Wrap eliminate dominated."""
def wrapper(payoff, per_player_repeats, *args, eliminate_dominated=True,
**kwargs):
epsilon = getattr(kwargs, "epsilon", 0.0)
if not eliminate_dominated:
return func(payoff, *args, **kwargs)
num_actions = payoff.shape[1:]
eliminated_payoff, action_labels, eliminated_action_repeats = _eliminate_dominated_payoff(
payoff, epsilon, action_repeats=per_player_repeats)
eliminated_dist, meta = func(
eliminated_payoff, eliminated_action_repeats, *args, **kwargs)
meta["eliminated_dominated_dist"] = eliminated_dist
meta["eliminated_dominated_payoff"] = eliminated_payoff
dist = _reconstruct_dist(
eliminated_dist, action_labels, num_actions)
return dist, meta
return wrapper
# Optimization.
def _try_two_solvers(func, *args, **kwargs):
try:
logging.debug("Trying CVXOPT.", flush=True)
kwargs_ = {"solver_kwargs": DEFAULT_CVXOPT_SOLVER_KWARGS, **kwargs}
res = func(*args, **kwargs_)
except: # pylint: disable=bare-except
logging.debug("CVXOPT failed. Trying OSQP.", flush=True)
kwargs_ = {"solver_kwargs": DEFAULT_OSQP_SOLVER_KWARGS, **kwargs}
res = func(*args, **kwargs_)
return res
# Helper Functions - CCEs.
def _indices(p, a, num_players):
return [a if p_ == p else slice(None) for p_ in range(num_players)]
def _sparse_indices_generator(player, action, num_actions):
indices = [(action,) if p == player else range(na)
for p, na in enumerate(num_actions)]
return itertools.product(*indices)
def _partition_by_player(val, p_vec, num_players):
"""Partitions a value by the players vector."""
parts = []
for p in range(num_players):
inds = p_vec == p
if inds.size > 0:
parts.append(val[inds])
else:
parts.append(None)
return parts
def _cce_constraints(payoff, epsilons, remove_null=True, zero_tolerance=1e-8):
"""Returns the coarse correlated constraints.
Args:
payoff: A [NUM_PLAYER, NUM_ACT_0, NUM_ACT_1, ...] shape payoff tensor.
epsilons: Per player floats corresponding to the epsilon.
remove_null: Remove null rows of the constraint matrix.
zero_tolerance: Zero out elements with small value.
Returns:
a_mat: The gain matrix for deviting to an action or shape [SUM(A), PROD(A)].
meta: Dictionary containing meta information.
"""
num_players = payoff.shape[0]
num_actions = payoff.shape[1:]
num_dists = int(np.prod(num_actions))
cor_cons = int(np.sum(num_actions))
a_mat = np.zeros([cor_cons] + list(num_actions))
p_vec = np.zeros([cor_cons], dtype=np.int32)
i_vec = np.zeros([cor_cons], dtype=np.int32)
con = 0
for p in range(num_players):
for a1 in range(num_actions[p]):
a1_inds = tuple(_indices(p, a1, num_players))
for a0 in range(num_actions[p]):
a0_inds = tuple(_indices(p, a0, num_players))
a_mat[con][a0_inds] += payoff[p][a1_inds]
a_mat[con] -= payoff[p]
a_mat[con] -= epsilons[p]
p_vec[con] = p
i_vec[con] = a0
con += 1
a_mat = np.reshape(a_mat, [cor_cons, num_dists])
a_mat[np.abs(a_mat) < zero_tolerance] = 0.0
if remove_null:
null_cons = np.any(a_mat != 0.0, axis=-1)
redundant_cons = np.max(a_mat, axis=1) >= 0
nonzero_mask = null_cons & redundant_cons
a_mat = a_mat[nonzero_mask, :].copy()
p_vec = p_vec[nonzero_mask].copy()
i_vec = i_vec[nonzero_mask].copy()
meta = dict(
p_vec=p_vec,
i_vec=i_vec,
epsilons=epsilons,
)
return a_mat, meta
def _ace_constraints(payoff, epsilons, remove_null=True, zero_tolerance=0.0):
"""Returns sparse alternate ce constraints Ax - epsilon <= 0.
Args:
payoff: Dense payoff tensor.
epsilons: Scalar epsilon approximation.
remove_null: Whether to remove null row constraints.
zero_tolerance: Smallest absolute value.
Returns:
a_csr: Sparse gain matrix from switching from one action to another.
e_vec: Epsilon vector.
meta: Dictionary containing meta information.
"""
num_players = payoff.shape[0]
num_actions = payoff.shape[1:]
num_dists = int(np.prod(num_actions))
num_cons = 0
for p in range(num_players):
num_cons += num_actions[p] * (num_actions[p] - 1)
a_dok = sp.sparse.dok_matrix((num_cons, num_dists))
e_vec = np.zeros([num_cons])
p_vec = np.zeros([num_cons], dtype=np.int32)
i_vec = np.zeros([num_cons, 2], dtype=np.int32)
num_null_cons = None
num_redundant_cons = None
num_removed_cons = None
if num_cons > 0:
con = 0
for p in range(num_players):
generator = itertools.permutations(range(num_actions[p]), 2)
for a0, a1 in generator:
a0_inds = _sparse_indices_generator(p, a0, num_actions)
a1_inds = _sparse_indices_generator(p, a1, num_actions)
for a0_ind, a1_ind in zip(a0_inds, a1_inds):
a0_ind_flat = np.ravel_multi_index(a0_ind, num_actions)
val = payoff[p][a1_ind] - payoff[p][a0_ind]
if abs(val) > zero_tolerance:
a_dok[con, a0_ind_flat] = val
e_vec[con] = epsilons[p]
p_vec[con] = p
i_vec[con] = [a0, a1]
con += 1
a_csr = a_dok.tocsr()
if remove_null:
null_cons = np.logical_or(
a_csr.max(axis=1).todense() != 0.0,
a_csr.min(axis=1).todense() != 0.0)
null_cons = np.ravel(null_cons)
redundant_cons = np.ravel(a_csr.max(axis=1).todense()) >= e_vec
nonzero_mask = null_cons & redundant_cons
a_csr = a_csr[nonzero_mask, :]
e_vec = e_vec[nonzero_mask].copy()
p_vec = p_vec[nonzero_mask].copy()
i_vec = i_vec[nonzero_mask].copy()
num_null_cons = np.sum(~null_cons)
num_redundant_cons = np.sum(~redundant_cons)
num_removed_cons = np.sum(~nonzero_mask)
else:
a_csr = a_dok.tocsr()
meta = dict(
p_vec=p_vec,
i_vec=i_vec,
epsilons=epsilons,
num_null_cons=num_null_cons,
num_redundant_cons=num_redundant_cons,
num_removed_cons=num_removed_cons,
)
return a_csr, e_vec, meta
def _get_repeat_factor(action_repeats):
"""Returns the repeat factors for the game."""
num_players = len(action_repeats)
out_labels = string.ascii_lowercase[:len(action_repeats)]
in_labels = ",".join(out_labels)
repeat_factor = np.ravel(np.einsum(
"{}->{}".format(in_labels, out_labels), *action_repeats))
indiv_repeat_factors = []
for player in range(num_players):
action_repeats_ = [
np.ones_like(ar) if player == p else ar
for p, ar in enumerate(action_repeats)]
indiv_repeat_factor = np.ravel(np.einsum(
"{}->{}".format(in_labels, out_labels), *action_repeats_))
indiv_repeat_factors.append(indiv_repeat_factor)
return repeat_factor, indiv_repeat_factors
# Solvers.
def _linear(
payoff,
a_mat,
e_vec,
action_repeats=None,
solver_kwargs=None,
cost=None):
"""Returns linear solution.
This is a linear program.
Args:
payoff: A [NUM_PLAYER, NUM_ACT_0, NUM_ACT_1, ...] shape payoff tensor.
a_mat: Constaint matrix.
e_vec: Epsilon vector.
action_repeats: List of action repeat counts.
solver_kwargs: Solver kwargs.
cost: Cost function of same shape as payoff.
Returns:
An epsilon-correlated equilibrium.
"""
num_players = payoff.shape[0]
num_actions = payoff.shape[1:]
num_dists = int(np.prod(num_actions))
if solver_kwargs is None:
solver_kwargs = DEFAULT_ECOS_SOLVER_KWARGS
if a_mat.shape[0] > 0:
# Variables.
x = cp.Variable(num_dists, nonneg=True)
# Classifier.
epsilon_dists = cp.matmul(a_mat, x) - e_vec
# Constraints.
dist_eq_con = cp.sum(x) == 1
cor_lb_con = epsilon_dists <= 0
# Objective.
if cost is None:
player_totals = [
cp.sum(cp.multiply(payoff[p].flat, x)) for p in range(num_players)]
reward = cp.sum(player_totals)
else:
reward = cp.sum(cp.multiply(cost.flat, x))
obj = cp.Maximize(reward)
prob = cp.Problem(obj, [
dist_eq_con,
cor_lb_con,
])
# Solve.
prob.solve(**solver_kwargs)
status = prob.status
# Distribution.
dist = np.reshape(x.value, num_actions)
# Other.
val = reward.value
else:
if action_repeats is not None:
repeat_factor, _ = _get_repeat_factor(action_repeats)
x = repeat_factor / np.sum(repeat_factor)
else:
x = np.ones([num_dists]) / num_dists
val = 0.0 # Fix me.
dist = np.reshape(x, num_actions)
status = None
meta = dict(
x=x,
a_mat=a_mat,
val=val,
status=status,
payoff=payoff,
consistent=True,
unique=False,
)
return dist, meta
def _qp_cce(
payoff,
a_mats,
e_vecs,
assume_full_support=False,
action_repeats=None,
solver_kwargs=None,
min_epsilon=False):
"""Returns the correlated equilibrium with maximum Gini impurity.
Args:
payoff: A [NUM_PLAYER, NUM_ACT_0, NUM_ACT_1, ...] shape payoff tensor.
a_mats: A [NUM_CON, PROD(A)] shape gain tensor.
e_vecs: Epsilon vector.
assume_full_support: Whether to ignore beta values.
action_repeats: Vector of action repeats for each player.
solver_kwargs: Additional kwargs for solver.
min_epsilon: Whether to minimize epsilon.
Returns:
An epsilon-correlated equilibrium.
"""
num_players = payoff.shape[0]
num_actions = payoff.shape[1:]
num_dists = int(np.prod(num_actions))
if solver_kwargs is None:
solver_kwargs = DEFAULT_OSQP_SOLVER_KWARGS
epsilon = None
nonzero_cons = [a_mat.shape[0] > 0 for a_mat in a_mats if a_mat is not None]
if any(nonzero_cons):
x = cp.Variable(num_dists, nonneg=(not assume_full_support))
if min_epsilon:
epsilon = cp.Variable(nonpos=True)
e_vecs = [epsilon] * num_players
if action_repeats is not None:
repeat_factor, _ = _get_repeat_factor(action_repeats)
x_repeated = cp.multiply(x, repeat_factor)
dist_eq_con = cp.sum(x_repeated) == 1
cor_lb_cons = [
cp.matmul(a_mat, cp.multiply(x, repeat_factor)) <= e_vec
for a_mat, e_vec in
zip(a_mats, e_vecs) if a_mat.size > 0]
eye = sp.sparse.diags(repeat_factor)
else:
repeat_factor = 1
x_repeated = x
dist_eq_con = cp.sum(x_repeated) == 1
cor_lb_cons = [
cp.matmul(a_mat, x) <= e_vec for a_mat, e_vec in
zip(a_mats, e_vecs) if a_mat.size > 0]
eye = sp.sparse.eye(num_dists)
# This is more memory efficient than using cp.sum_squares.
cost = 1 - cp.quad_form(x, eye)
if min_epsilon:
cost -= cp.multiply(2, epsilon)
obj = cp.Maximize(cost)
prob = cp.Problem(obj, [dist_eq_con] + cor_lb_cons)
cost_value = prob.solve(**solver_kwargs)
status = prob.status
alphas = [cor_lb_con.dual_value for cor_lb_con in cor_lb_cons]
lamb = dist_eq_con.dual_value
val = cost.value
x = x_repeated.value
dist = np.reshape(x, num_actions)
else:
cost_value = 0.0
val = 1 - 1 / num_dists
if action_repeats is not None:
repeat_factor, _ = _get_repeat_factor(action_repeats)
x = repeat_factor / np.sum(repeat_factor)
else:
x = np.ones([num_dists]) / num_dists
dist = np.reshape(x, num_actions)
status = None
alphas = [np.zeros([])]
lamb = None
meta = dict(
x=x,
a_mats=a_mats,
status=status,
cost=cost_value,
val=val,
alphas=alphas,
lamb=lamb,
unique=True,
min_epsilon=None if epsilon is None else epsilon.value,
)
return dist, meta
def _qp_ce(
payoff,
a_mats,
e_vecs,
assume_full_support=False,
action_repeats=None,
solver_kwargs=None,
min_epsilon=False):
"""Returns the correlated equilibrium with maximum Gini impurity.
Args:
payoff: A [NUM_PLAYER, NUM_ACT_0, NUM_ACT_1, ...] shape payoff tensor.
a_mats: A [NUM_CON, PROD(A)] shape gain tensor.
e_vecs: Epsilon vector.
assume_full_support: Whether to ignore beta values.
action_repeats: Vector of action repeats for each player.
solver_kwargs: Additional kwargs for solver.
min_epsilon: Whether to minimize epsilon.
Returns:
An epsilon-correlated equilibrium.
"""
num_players = payoff.shape[0]
num_actions = payoff.shape[1:]
num_dists = int(np.prod(num_actions))
if solver_kwargs is None:
solver_kwargs = DEFAULT_OSQP_SOLVER_KWARGS
epsilon = None
nonzero_cons = [a_mat.shape[0] > 0 for a_mat in a_mats if a_mat is not None]
if any(nonzero_cons):
x = cp.Variable(num_dists, nonneg=(not assume_full_support))
if min_epsilon:
epsilon = cp.Variable(nonpos=True)
e_vecs = [epsilon] * num_players
if action_repeats is not None:
repeat_factor, indiv_repeat_factors = _get_repeat_factor(
action_repeats)
x_repeated = cp.multiply(x, repeat_factor)
dist_eq_con = cp.sum(x_repeated) == 1
cor_lb_cons = [
cp.matmul(a_mat, cp.multiply(x, rf)) <= e_vec for a_mat, e_vec, rf in
zip(a_mats, e_vecs, indiv_repeat_factors) if a_mat.size > 0]
eye = sp.sparse.diags(repeat_factor)
else:
repeat_factor = 1
x_repeated = x
dist_eq_con = cp.sum(x_repeated) == 1
cor_lb_cons = [
cp.matmul(a_mat, x) <= e_vec for a_mat, e_vec in
zip(a_mats, e_vecs) if a_mat.size > 0]
eye = sp.sparse.eye(num_dists)
# This is more memory efficient than using cp.sum_squares.
cost = 1 - cp.quad_form(x, eye)
if min_epsilon:
cost -= cp.multiply(2, epsilon)
obj = cp.Maximize(cost)
prob = cp.Problem(obj, [dist_eq_con] + cor_lb_cons)
cost_value = prob.solve(**solver_kwargs)
status = prob.status
alphas = [cor_lb_con.dual_value for cor_lb_con in cor_lb_cons]
lamb = dist_eq_con.dual_value
val = cost.value
x = x_repeated.value
dist = np.reshape(x, num_actions)
else:
cost_value = 0.0
val = 1 - 1 / num_dists
if action_repeats is not None:
repeat_factor, indiv_repeat_factors = _get_repeat_factor(
action_repeats)
x = repeat_factor / np.sum(repeat_factor)
else:
x = np.ones([num_dists]) / num_dists
dist = np.reshape(x, num_actions)
status = None
alphas = [np.zeros([])]
lamb = None
meta = dict(
x=x,
a_mats=a_mats,
status=status,
cost=cost_value,
val=val,
alphas=alphas,
lamb=lamb,
unique=True,
min_epsilon=None if epsilon is None else epsilon.value,
)
return dist, meta
def _expand_meta_game(meta_game, per_player_repeats):
num_players = meta_game.shape[0]
for player in range(num_players):
meta_game = np.repeat(meta_game, per_player_repeats[player], axis=player+1)
return meta_game
def _unexpand_meta_dist(meta_dist, per_player_repeats):
num_players = len(meta_dist.shape)
for player in range(num_players):
meta_dist = np.add.reduceat(
meta_dist, [0] + np.cumsum(per_player_repeats[player]).tolist()[:-1],
axis=player)
return meta_dist
# Meta-solvers - Baselines.
def _uni(meta_game, per_player_repeats, ignore_repeats=False):
"""Uniform."""
if ignore_repeats:
num_policies = meta_game.shape[1:]
num_dists = np.prod(num_policies)
meta_dist = np.full(num_policies, 1./num_dists)
else:
outs = [ppr / np.sum(ppr) for ppr in per_player_repeats]
labels = string.ascii_lowercase[:len(outs)]
comma_labels = ",".join(labels)
meta_dist = np.einsum("{}->{}".format(comma_labels, labels), *outs)
return meta_dist, dict()
@_eliminate_dominated_decorator
def _undominated_uni(meta_game, per_player_repeats, ignore_repeats=False):
"""Undominated uniform."""
return _uni(meta_game, per_player_repeats, ignore_repeats=ignore_repeats)
def _rj(meta_game, per_player_repeats, ignore_repeats=False):
"""Random joint."""
ignore_repeats = True
pvals, _ = _uni(
meta_game, per_player_repeats, ignore_repeats=ignore_repeats)
meta_dist = np.reshape(
np.random.multinomial(1, pvals.flat), pvals.shape).astype(np.float64)
return meta_dist, dict()
@_eliminate_dominated_decorator
def _undominated_rj(meta_game, per_player_repeats, ignore_repeats=False):
"""Undominated random joint."""
return _rj(meta_game, per_player_repeats, ignore_repeats=ignore_repeats)
def _rd(meta_game, per_player_repeats, ignore_repeats=False):
"""Random dirichlet."""
ignore_repeats = True
if ignore_repeats:
num_policies = meta_game.shape[1:]
alpha = np.ones(num_policies)
else:
outs = [ppr for ppr in per_player_repeats]
labels = string.ascii_lowercase[:len(outs)]
comma_labels = ",".join(labels)
alpha = np.einsum("{}->{}".format(comma_labels, labels), *outs)
meta_dist = np.reshape(
np.random.dirichlet(alpha.flat), alpha.shape).astype(np.float64)
return meta_dist, dict()
@_eliminate_dominated_decorator
def _undominated_rd(meta_game, per_player_repeats, ignore_repeats=False):
"""Undominated random dirichlet."""
return _rd(meta_game, per_player_repeats, ignore_repeats=ignore_repeats)
def _prd(meta_game, per_player_repeats, ignore_repeats=False):
"""Projected replicator dynamics."""
if not ignore_repeats:
meta_game = _expand_meta_game(meta_game, per_player_repeats)
meta_dist = projected_replicator_dynamics.projected_replicator_dynamics(
meta_game)
labels = string.ascii_lowercase[:len(meta_dist)]
comma_labels = ",".join(labels)
meta_dist = np.einsum("{}->{}".format(comma_labels, labels), *meta_dist)
meta_dist[meta_dist < DIST_TOL] = 0.0
meta_dist /= np.sum(meta_dist)
meta_dist = _unexpand_meta_dist(meta_dist, per_player_repeats)
return meta_dist, dict()
@_eliminate_dominated_decorator
def _alpharank(meta_game, per_player_repeats, ignore_repeats=False):
"""AlphaRank."""
if not ignore_repeats:
meta_game = _expand_meta_game(meta_game, per_player_repeats)
meta_dist = alpharank_lib.sweep_pi_vs_epsilon([mg for mg in meta_game])
meta_dist[meta_dist < DIST_TOL] = 0.0
meta_dist /= np.sum(meta_dist)
meta_dist = np.reshape(meta_dist, meta_game.shape[1:])
if not ignore_repeats:
meta_dist = _unexpand_meta_dist(meta_dist, per_player_repeats)
return meta_dist, dict()
# Meta-solvers - CEs.
@_eliminate_dominated_decorator
def _mgce(meta_game, per_player_repeats, ignore_repeats=False):
"""Maximum Gini CE."""
a_mat, e_vec, meta = _ace_constraints(
meta_game, [0.0] * len(per_player_repeats), remove_null=True,
zero_tolerance=1e-8)
a_mats = _partition_by_player(
a_mat, meta["p_vec"], len(per_player_repeats))
e_vecs = _partition_by_player(
e_vec, meta["p_vec"], len(per_player_repeats))
dist, _ = _try_two_solvers(
_qp_ce,
meta_game, a_mats, e_vecs,
action_repeats=(None if ignore_repeats else per_player_repeats))
return dist, dict()
@_eliminate_dominated_decorator
def _min_epsilon_mgce(meta_game, per_player_repeats, ignore_repeats=False):
"""Min Epsilon Maximum Gini CE."""
a_mat, e_vec, meta = _ace_constraints(
meta_game, [0.0] * len(per_player_repeats), remove_null=True,
zero_tolerance=1e-8)
a_mats = _partition_by_player(
a_mat, meta["p_vec"], len(per_player_repeats))
e_vecs = _partition_by_player(
e_vec, meta["p_vec"], len(per_player_repeats))
dist, _ = _try_two_solvers(
_qp_ce,
meta_game, a_mats, e_vecs,
action_repeats=(None if ignore_repeats else per_player_repeats),
min_epsilon=True)
return dist, dict()
@_eliminate_dominated_decorator
def _approx_mgce(meta_game, per_player_repeats, ignore_repeats=False,
epsilon=0.01):
"""Approximate Maximum Gini CE."""
a_mat, e_vec, meta = _ace_constraints(
meta_game, [0.0] * len(per_player_repeats), remove_null=True,
zero_tolerance=1e-8)
max_ab = 0.0
if a_mat.size:
max_ab = np.max(a_mat.mean(axis=1))
a_mat, e_vec, meta = _ace_constraints(
meta_game, [epsilon * max_ab] * len(per_player_repeats), remove_null=True,
zero_tolerance=1e-8)
a_mats = _partition_by_player(
a_mat, meta["p_vec"], len(per_player_repeats))
e_vecs = _partition_by_player(
e_vec, meta["p_vec"], len(per_player_repeats))
dist, _ = _try_two_solvers(
_qp_ce,
meta_game, a_mats, e_vecs,
action_repeats=(None if ignore_repeats else per_player_repeats))
return dist, dict()
@_eliminate_dominated_decorator
def _rmwce(meta_game, per_player_repeats, ignore_repeats=False):
"""Random maximum welfare CE."""
del ignore_repeats
num_players = len(per_player_repeats)
cost = np.ravel(np.sum(meta_game, axis=0))
cost += np.ravel(np.random.normal(size=meta_game.shape[1:])) * 1e-6
a_mat, e_vec, _ = _ace_constraints(
meta_game, [0.0] * num_players, remove_null=True,
zero_tolerance=1e-8)
x, _ = _linear(meta_game, a_mat, e_vec, cost=cost)
dist = np.reshape(x, meta_game.shape[1:])
return dist, dict()
@_eliminate_dominated_decorator
def _mwce(meta_game, per_player_repeats, ignore_repeats=False):
"""Maximum welfare CE."""
del ignore_repeats
num_players = len(per_player_repeats)
cost = np.ravel(np.sum(meta_game, axis=0))
a_mat, e_vec, _ = _ace_constraints(
meta_game, [0.0] * num_players, remove_null=True,
zero_tolerance=1e-8)
x, _ = _linear(meta_game, a_mat, e_vec, cost=cost)
dist = np.reshape(x, meta_game.shape[1:])
return dist, dict()
@_eliminate_dominated_decorator
def _rvce(meta_game, per_player_repeats, ignore_repeats=False):
"""Random vertex CE."""
del ignore_repeats
num_players = len(per_player_repeats)
cost = np.ravel(np.random.normal(size=meta_game.shape[1:]))
a_mat, e_vec, _ = _ace_constraints(
meta_game, [0.0] * num_players, remove_null=True,
zero_tolerance=1e-8)
x, _ = _linear(meta_game, a_mat, e_vec, cost=cost)
dist = np.reshape(x, meta_game.shape[1:])
return dist, dict()
# Meta-solvers - CCEs.
def _mgcce(meta_game, per_player_repeats, ignore_repeats=False):
"""Maximum Gini CCE."""
a_mat, meta = _cce_constraints(
meta_game, [0.0] * len(per_player_repeats), remove_null=True,
zero_tolerance=1e-8)
a_mats = _partition_by_player(
a_mat, meta["p_vec"], len(per_player_repeats))
dist, _ = _try_two_solvers(
_qp_cce,
meta_game, a_mats, [0.0] * len(per_player_repeats),
action_repeats=(None if ignore_repeats else per_player_repeats))
return dist, dict()
def _min_epsilon_mgcce(meta_game, per_player_repeats, ignore_repeats=False):
"""Min Epsilon Maximum Gini CCE."""
a_mat, meta = _cce_constraints(
meta_game, [0.0] * len(per_player_repeats), remove_null=True,
zero_tolerance=1e-8)
a_mats = _partition_by_player(
a_mat, meta["p_vec"], len(per_player_repeats))
dist, _ = _try_two_solvers(
_qp_cce,
meta_game, a_mats, [0.0] * len(per_player_repeats),
action_repeats=(None if ignore_repeats else per_player_repeats),
min_epsilon=True)
return dist, dict()
def _approx_mgcce(meta_game, per_player_repeats, ignore_repeats=False,
epsilon=0.01):
"""Maximum Gini CCE."""
a_mat, meta = _cce_constraints(
meta_game, [0.0] * len(per_player_repeats), remove_null=True,
zero_tolerance=1e-8)
max_ab = 0.0
if a_mat.size:
max_ab = np.max(a_mat.mean(axis=1))
a_mat, meta = _cce_constraints(
meta_game, [epsilon * max_ab] * len(per_player_repeats), remove_null=True,
zero_tolerance=1e-8)
a_mats = _partition_by_player(
a_mat, meta["p_vec"], len(per_player_repeats))
dist, _ = _try_two_solvers(
_qp_cce,
meta_game, a_mats, [0.0] * len(per_player_repeats),
action_repeats=(None if ignore_repeats else per_player_repeats))
return dist, dict()
def _rmwcce(meta_game, per_player_repeats, ignore_repeats=False):
"""Random maximum welfare CCE."""
del ignore_repeats
num_players = len(per_player_repeats)
cost = np.ravel(np.sum(meta_game, axis=0))
cost += np.ravel(np.random.normal(size=meta_game.shape[1:])) * 1e-6
a_mat, _ = _cce_constraints(
meta_game, [0.0] * num_players, remove_null=True,
zero_tolerance=1e-8)
e_vec = np.zeros([a_mat.shape[0]])
x, _ = _linear(meta_game, a_mat, e_vec, cost=cost)
dist = np.reshape(x, meta_game.shape[1:])
return dist, dict()
def _mwcce(meta_game, per_player_repeats, ignore_repeats=False):
"""Maximum welfare CCE."""
del ignore_repeats
num_players = len(per_player_repeats)
cost = np.ravel(np.sum(meta_game, axis=0))
a_mat, _ = _cce_constraints(
meta_game, [0.0] * num_players, remove_null=True,
zero_tolerance=1e-8)
e_vec = np.zeros([a_mat.shape[0]])
x, _ = _linear(meta_game, a_mat, e_vec, cost=cost)
dist = np.reshape(x, meta_game.shape[1:])
return dist, dict()
def _rvcce(meta_game, per_player_repeats, ignore_repeats=False):
"""Random vertex CCE."""
del ignore_repeats
num_players = len(per_player_repeats)
cost = np.ravel(np.random.normal(size=meta_game.shape[1:]))
a_mat, _ = _cce_constraints(
meta_game, [0.0] * num_players, remove_null=True,
zero_tolerance=1e-8)
e_vec = np.zeros([a_mat.shape[0]])
x, _ = _linear(meta_game, a_mat, e_vec, cost=cost)
dist = np.reshape(x, meta_game.shape[1:])
return dist, dict()
# Flags to functions.
_FLAG_TO_FUNC = dict(
uni=_uni,
undominated_uni=_undominated_uni,
rj=_rj,
undominated_rj=_undominated_rj,
rd=_rd,
undominated_rd=_undominated_rd,
prd=_prd,
alpharank=_alpharank,
mgce=_mgce,
min_epsilon_mgce=_min_epsilon_mgce,
approx_mgce=_approx_mgce,
rmwce=_rmwce,
mwce=_mwce,
rvce=_rvce,
mgcce=_mgcce,
min_epsilon_mgcce=_min_epsilon_mgcce,
approx_mgcce=_approx_mgcce,
rmwcce=_rmwcce,
mwcce=_mwcce,
rvcce=_rvcce,
)
## PSRO Functions.
def intilize_policy(game, player, policy_init):
"""Returns initial policy."""
if policy_init == "uniform":
new_policy = policy.TabularPolicy(game, players=(player,))
elif policy_init == "random_deterministic":
new_policy = policy.TabularPolicy(game, players=(player,))
for i in range(new_policy.action_probability_array.shape[0]):
new_policy.action_probability_array[i] = np.random.multinomial(
1, new_policy.action_probability_array[i]).astype(np.float64)
else:
raise ValueError(
"policy_init must be a valid initialization strategy: %s. "
"Received: %s" % (INIT_POLICIES, policy_init))
return new_policy
def add_new_policies(
per_player_new_policies,
per_player_gaps,
per_player_repeats,
per_player_policies,
joint_policies,
joint_returns,
game,
br_selection):
"""Adds novel policies from new policies."""
num_players = len(per_player_new_policies)
per_player_num_novel_policies = [0 for _ in range(num_players)]
# Update policies and policy counts.
for player in range(num_players):
new_policies = per_player_new_policies[player]
new_gaps = per_player_gaps[player]
repeat_policies = []
repeat_gaps = []
repeat_ids = []
novel_policies = []
novel_gaps = []
for new_policy, new_gap in zip(new_policies, new_gaps):
for policy_id, policy_ in enumerate(per_player_policies[player]):
if np.all( # New policy is not novel.
new_policy.action_probability_array ==
policy_.action_probability_array): # pytype: disable=attribute-error # py39-upgrade
logging.debug("Player %d's new policy is not novel.", player)
repeat_policies.append(new_policy)
repeat_gaps.append(new_gap)
repeat_ids.append(policy_id)
break
else: # New policy is novel.
logging.debug("Player %d's new policy is novel.", player)
novel_policies.append(new_policy)
novel_gaps.append(new_gap)
add_novel_policies = []
add_repeat_ids = []
if (novel_policies or repeat_policies):
if br_selection == "all":
add_novel_policies.extend(novel_policies)
add_repeat_ids.extend(repeat_ids)
elif br_selection == "all_novel":
add_novel_policies.extend(novel_policies)
elif br_selection == "random":
index = np.random.randint(0, len(repeat_policies) + len(novel_policies))
if index < len(novel_policies):
add_novel_policies.append(novel_policies[index])
else:
add_repeat_ids.append(repeat_ids[index - len(novel_policies)])
elif br_selection == "random_novel":
if novel_policies:
index = np.random.randint(0, len(novel_policies))
add_novel_policies.append(novel_policies[index])
else: # Fall back on random.
index = np.random.randint(0, len(repeat_policies))
add_repeat_ids.append(repeat_ids[index])
elif br_selection == "largest_gap":
if novel_policies:
index = np.argmax(novel_gaps)
if novel_gaps[index] == 0.0: # Fall back to random when zero.
index = np.random.randint(0, len(novel_policies))
add_novel_policies.append(novel_policies[index])
else: # Fall back on random.
index = np.random.randint(0, len(repeat_policies))
add_repeat_ids.append(repeat_ids[index])
else:
raise ValueError("Unrecognized br_selection method: %s"
% br_selection)
for add_repeat_id in add_repeat_ids:
per_player_repeats[player][add_repeat_id] += 1
for add_novel_policy in add_novel_policies:
per_player_policies[player].append(add_novel_policy) # Add new policy.
per_player_repeats[player].append(1) # Add new count.
per_player_num_novel_policies[player] += 1
# Add new joint policies.
for pids in itertools.product(*[
range(len(policies)) for policies in per_player_policies]):
if pids in joint_policies:
continue
logging.debug("Evaluating novel joint policy: %s.", pids)
policies = [
policies[pid] for pid, policies in zip(pids, per_player_policies)]
python_tabular_policy = policy.merge_tabular_policies(
policies, game)
pyspiel_tabular_policy = policy.python_policy_to_pyspiel_policy(
python_tabular_policy)
joint_policies[pids] = pyspiel_tabular_policy
joint_returns[pids] = [
0.0 if abs(er) < RETURN_TOL else er
for er in pyspiel.expected_returns(
game.new_initial_state(), pyspiel_tabular_policy, -1, True)]
return per_player_num_novel_policies
def add_meta_game(
meta_games,
per_player_policies,
joint_returns):
"""Returns a meta-game tensor."""
per_player_num_policies = [
len(policies) for policies in per_player_policies]
shape = [len(per_player_num_policies)] + per_player_num_policies
meta_game = np.zeros(shape)
for pids in itertools.product(*[
range(np_) for np_ in per_player_num_policies]):
meta_game[(slice(None),) + pids] = joint_returns[pids]
meta_games.append(meta_game)
return meta_games
def add_meta_dist(
meta_dists, meta_values, meta_solver, meta_game, per_player_repeats,
ignore_repeats):
"""Returns meta_dist."""
num_players = meta_game.shape[0]
meta_solver_func = _FLAG_TO_FUNC[meta_solver]
meta_dist, _ = meta_solver_func(
meta_game, per_player_repeats, ignore_repeats=ignore_repeats)
# Clean dist.
meta_dist = meta_dist.astype(np.float64)
meta_dist[meta_dist < DIST_TOL] = 0.0
meta_dist[meta_dist > 1.0] = 1.0
meta_dist /= np.sum(meta_dist)
meta_dist[meta_dist > 1.0] = 1.0
meta_dists.append(meta_dist)
meta_value = np.sum(
meta_dist * meta_game, axis=tuple(range(1, num_players + 1)))
meta_values.append(meta_value)
return meta_dist
def find_best_response(
game,
meta_dist,
meta_game,
iteration,
joint_policies,
target_equilibrium,
update_players_strategy,
action_value_tolerance,
):
"""Returns new best response policies."""
num_players = meta_game.shape[0]
per_player_num_policies = meta_dist.shape[:]
# Player update strategy.
if update_players_strategy == "all":
players = list(range(num_players))
elif update_players_strategy == "cycle":
players = [iteration % num_players]
elif update_players_strategy == "random":
players = [np.random.randint(0, num_players)]
else:
raise ValueError(
"update_players_strategy must be a valid player update strategy: "
"%s. Received: %s" % (UPDATE_PLAYERS_STRATEGY, update_players_strategy))
# Find best response.
per_player_new_policies = []
per_player_deviation_incentives = []
if target_equilibrium == "cce":
for player in range(num_players):
if player in players:
joint_policy_ids = itertools.product(*[
(np_-1,) if p_ == player else range(np_) for p_, np_
in enumerate(per_player_num_policies)])
joint_policies_slice = [
joint_policies[jpid] for jpid in joint_policy_ids]
meta_dist_slice = np.sum(meta_dist, axis=player)
meta_dist_slice[meta_dist_slice < DIST_TOL] = 0.0
meta_dist_slice[meta_dist_slice > 1.0] = 1.0
meta_dist_slice /= np.sum(meta_dist_slice)
meta_dist_slice = meta_dist_slice.flat
mu = [(p, mp) for mp, p in zip(joint_policies_slice, meta_dist_slice)
if p > 0]
info = pyspiel.cce_dist(
game,
mu,
player,
prob_cut_threshold=0.0,
action_value_tolerance=action_value_tolerance)
new_policy = policy.pyspiel_policy_to_python_policy(
game, info.best_response_policies[0], players=(player,))
on_policy_value = np.sum(meta_game[player] * meta_dist)
deviation_incentive = max(
info.best_response_values[0] - on_policy_value, 0)
if deviation_incentive < GAP_TOL:
deviation_incentive = 0.0
per_player_new_policies.append([new_policy])
per_player_deviation_incentives.append([deviation_incentive])
else:
per_player_new_policies.append([])
per_player_deviation_incentives.append([])
elif target_equilibrium == "ce":
for player in range(num_players):
if player in players:
per_player_new_policies.append([])
per_player_deviation_incentives.append([])
for pid in range(per_player_num_policies[player]):
joint_policy_ids = itertools.product(*[
(pid,) if p_ == player else range(np_) for p_, np_
in enumerate(per_player_num_policies)])
joint_policies_slice = [
joint_policies[jpid] for jpid in joint_policy_ids]
inds = tuple((pid,) if player == p_ else slice(None)
for p_ in range(num_players))
meta_dist_slice = np.ravel(meta_dist[inds]).copy()
meta_dist_slice[meta_dist_slice < DIST_TOL] = 0.0
meta_dist_slice[meta_dist_slice > 1.0] = 1.0
meta_dist_slice_sum = np.sum(meta_dist_slice)
if meta_dist_slice_sum > 0.0:
meta_dist_slice /= meta_dist_slice_sum
mu = [(p, mp) for mp, p in
zip(joint_policies_slice, meta_dist_slice)
if p > 0]
info = pyspiel.cce_dist(
game,
mu,
player,
prob_cut_threshold=0.0,
action_value_tolerance=action_value_tolerance)
new_policy = policy.pyspiel_policy_to_python_policy(
game, info.best_response_policies[0], players=(player,))
on_policy_value = np.sum(
np.ravel(meta_game[player][inds]) * meta_dist_slice)
deviation_incentive = max(
info.best_response_values[0] - on_policy_value, 0)
if deviation_incentive < GAP_TOL:
deviation_incentive = 0.0
per_player_new_policies[-1].append(new_policy)
per_player_deviation_incentives[-1].append(
meta_dist_slice_sum * deviation_incentive)
else:
per_player_new_policies.append([])
per_player_deviation_incentives.append([])
else:
raise ValueError(
"target_equilibrium must be a valid best response strategy: %s. "
"Received: %s" % (BRS, target_equilibrium))
return per_player_new_policies, per_player_deviation_incentives
## Main Loop.
def initialize(game, train_meta_solver, eval_meta_solver, policy_init,
ignore_repeats, br_selection):
"""Return initialized data structures."""
num_players = game.num_players()
# Initialize.
iteration = 0
per_player_repeats = [[] for _ in range(num_players)]
per_player_policies = [[] for _ in range(num_players)]
joint_policies = {} # Eg. (1, 0): Joint policy.
joint_returns = {}
meta_games = []
train_meta_dists = []
eval_meta_dists = []
train_meta_values = []
eval_meta_values = []
train_meta_gaps = []
eval_meta_gaps = []
# Initialize policies.
per_player_new_policies = [
[intilize_policy(game, player, policy_init)]
for player in range(num_players)]
per_player_gaps_train = [[1.0] for player in range(num_players)]
per_player_num_novel_policies = add_new_policies(
per_player_new_policies, per_player_gaps_train, per_player_repeats,
per_player_policies, joint_policies, joint_returns, game, br_selection)
del per_player_num_novel_policies
add_meta_game(
meta_games,
per_player_policies,
joint_returns)
add_meta_dist(
train_meta_dists, train_meta_values, train_meta_solver,
meta_games[-1], per_player_repeats, ignore_repeats)
add_meta_dist(
eval_meta_dists, eval_meta_values, eval_meta_solver,
meta_games[-1], per_player_repeats, ignore_repeats)
return (
iteration,
per_player_repeats,
per_player_policies,
joint_policies,
joint_returns,
meta_games,
train_meta_dists,
eval_meta_dists,
train_meta_values,
eval_meta_values,
train_meta_gaps,
eval_meta_gaps)
def initialize_callback_(
iteration,
per_player_repeats,
per_player_policies,
joint_policies,
joint_returns,
meta_games,
train_meta_dists,
eval_meta_dists,
train_meta_values,
eval_meta_values,
train_meta_gaps,
eval_meta_gaps,
game):
"""Callback which allows initializing from checkpoint."""
del game
checkpoint = None
return (
iteration,
per_player_repeats,
per_player_policies,
joint_policies,
joint_returns,
meta_games,
train_meta_dists,
eval_meta_dists,
train_meta_values,
eval_meta_values,
train_meta_gaps,
eval_meta_gaps,
checkpoint)
def callback_(
iteration,
per_player_repeats,
per_player_policies,
joint_policies,
joint_returns,
meta_games,
train_meta_dists,
eval_meta_dists,
train_meta_values,
eval_meta_values,
train_meta_gaps,
eval_meta_gaps,
kwargs,
checkpoint):
"""Callback for updating checkpoint."""
del iteration, per_player_repeats, per_player_policies, joint_policies
del joint_returns, meta_games, train_meta_dists, eval_meta_dists
del train_meta_values, eval_meta_values, train_meta_gaps, eval_meta_gaps
del kwargs
return checkpoint
def run_loop(game,
game_name,
seed=0,
iterations=40,
policy_init="uniform",
update_players_strategy="all",
target_equilibrium="cce",
br_selection="largest_gap",
train_meta_solver="mgcce",
eval_meta_solver="mwcce",
ignore_repeats=False,
initialize_callback=None,
action_value_tolerance=-1.0,
callback=None):
"""Runs JPSRO."""
if initialize_callback is None:
initialize_callback = initialize_callback_
if callback is None:
callback = callback_
kwargs = dict(
game=game,
game_name=game_name,
seed=seed,
iterations=iterations,
policy_init=policy_init,
update_players_strategy=update_players_strategy,
target_equilibrium=target_equilibrium,
br_selection=br_selection,
train_meta_solver=train_meta_solver,
eval_meta_solver=eval_meta_solver,
ignore_repeats=ignore_repeats,
)
# Set seed.
np.random.seed(seed)
# Some statistics.
num_players = game.num_players() # Look in the game.
# Initialize.
values = initialize(game, train_meta_solver, eval_meta_solver, policy_init,
ignore_repeats, br_selection)
# Initialize Callback.
(iteration,
per_player_repeats,
per_player_policies,
joint_policies,
joint_returns,
meta_games,
train_meta_dists,
eval_meta_dists,
train_meta_values,
eval_meta_values,
train_meta_gaps,
eval_meta_gaps,
checkpoint) = initialize_callback(*values, game)
# Run JPSRO.
while iteration <= iterations:
logging.debug("Beginning JPSRO iteration %03d", iteration)
per_player_new_policies, per_player_gaps_train = find_best_response(
game,
train_meta_dists[-1],
meta_games[-1],
iteration,
joint_policies,
target_equilibrium,
update_players_strategy,
action_value_tolerance,
)
train_meta_gaps.append([sum(gaps) for gaps in per_player_gaps_train])
_, per_player_gaps_eval = find_best_response(
game,
eval_meta_dists[-1],
meta_games[-1],
iteration,
joint_policies,
target_equilibrium,
update_players_strategy,
action_value_tolerance,
)
eval_meta_gaps.append([sum(gaps) for gaps in per_player_gaps_eval])
per_player_num_novel_policies = add_new_policies(
per_player_new_policies, per_player_gaps_train, per_player_repeats,
per_player_policies, joint_policies, joint_returns, game, br_selection)
del per_player_num_novel_policies
add_meta_game(
meta_games,
per_player_policies,
joint_returns)
add_meta_dist(
train_meta_dists, train_meta_values, train_meta_solver,
meta_games[-1], per_player_repeats, ignore_repeats)
add_meta_dist(
eval_meta_dists, eval_meta_values, eval_meta_solver,
meta_games[-1], per_player_repeats, ignore_repeats)
# Stats.
per_player_num_policies = train_meta_dists[-1].shape[:]
log_string = LOG_STRING.format(
iteration=iteration,
game=game_name,
player=("{: 12d}" * num_players).format(*list(range(num_players))),
brs="",
num_policies=("{: 12d}" * num_players).format(*[
sum(ppr) for ppr in per_player_repeats]),
unique=("{: 12d}" * num_players).format(*per_player_num_policies),
train_meta_solver=train_meta_solver,
train_value=("{: 12g}" * num_players).format(*train_meta_values[-1]),
train_gap=("{: 12g}" * num_players).format(*train_meta_gaps[-1]),
eval_meta_solver=eval_meta_solver,
eval_value=("{: 12g}" * num_players).format(*eval_meta_values[-1]),
eval_gap=("{: 12g}" * num_players).format(*eval_meta_gaps[-1]),
)
logging.info(log_string)
# Increment.
iteration += 1
# Callback.
checkpoint = callback(
iteration,
per_player_repeats,
per_player_policies,
joint_policies,
joint_returns,
meta_games,
train_meta_dists,
eval_meta_dists,
train_meta_values,
eval_meta_values,
train_meta_gaps,
eval_meta_gaps,
kwargs,
checkpoint)
| open_spiel-master | open_spiel/python/algorithms/jpsro.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.policy_aggregator."""
import unittest
from absl.testing import parameterized
import numpy as np
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import policy_aggregator
import pyspiel
class PolicyAggregatorTest(parameterized.TestCase):
@parameterized.named_parameters(
{
"testcase_name": "kuhn_poker",
"game_name": "kuhn_poker"
}, {
"testcase_name": "leduc_poker",
"game_name": "leduc_poker"
})
def test_policy_aggregation_random(self, game_name):
env = rl_environment.Environment(game_name)
policies = [[policy.UniformRandomPolicy(env.game)
for _ in range(2)]
for _ in range(2)]
probabilities = [
list(np.ones(len(policies)) / len(policies)) for _ in range(2)
]
pol_ag = policy_aggregator.PolicyAggregator(env.game)
aggr_policy = pol_ag.aggregate([0], policies, probabilities)
for item in aggr_policy.policy[0].items():
_, probs = zip(*item[1].items())
const_probs = tuple([probs[0]] * len(probs))
self.assertEqual(probs, const_probs)
@parameterized.named_parameters(
{
"testcase_name": "kuhn_poker",
"game_name": "kuhn_poker"
}, {
"testcase_name": "leduc_poker",
"game_name": "leduc_poker"
})
def test_policy_aggregation_tabular_randinit(self, game_name):
env = rl_environment.Environment(game_name)
mother_policy = policy.TabularPolicy(env.game).copy_with_noise(
1, 10, np.random.RandomState(0))
policies = [[mother_policy.__copy__() for _ in range(2)] for _ in range(2)]
probabilities = [
list(np.ones(len(policies)) / len(policies)) for _ in range(2)
]
pol_ag = policy_aggregator.PolicyAggregator(env.game)
aggr_policy = pol_ag.aggregate([0], policies, probabilities)
for state, value in aggr_policy.policy[0].items():
polici = mother_policy.policy_for_key(state)
value_normal = {
action: probability
for action, probability in enumerate(polici)
if probability > 0
}
for key in value_normal.keys():
self.assertAlmostEqual(value[key], value_normal[key], 8)
@parameterized.named_parameters({
"testcase_name": "tic_tac_toe",
"game_name": "tic_tac_toe",
})
def test_policy_aggregation_variadic(self, game_name):
game = pyspiel.load_game(game_name)
uniform_policy = policy.UniformRandomPolicy(game)
first_action_policy = policy.FirstActionPolicy(game)
pol_ag = policy_aggregator.PolicyAggregator(game)
weights0 = [1.0, 0.0]
player0 = pol_ag.aggregate(
list(range(game.num_players())),
[[uniform_policy, first_action_policy]] + [[uniform_policy]] *
(game.num_players() - 1),
[weights0] + [[1.0]] * (game.num_players() - 1))
state = game.new_initial_state()
action_prob = player0.action_probabilities(state)
for action in action_prob:
if action_prob[action] > 0:
self.assertAlmostEqual(action_prob[action],
1. / len(state.legal_actions()))
if __name__ == "__main__":
unittest.main()
| open_spiel-master | open_spiel/python/algorithms/policy_aggregator_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Play bots against each other."""
import pyspiel
def evaluate_bots(state, bots, rng):
"""Plays bots against each other, returns terminal utility for each bot."""
for bot in bots:
bot.restart_at(state)
while not state.is_terminal():
if state.is_chance_node():
outcomes, probs = zip(*state.chance_outcomes())
action = rng.choice(outcomes, p=probs)
for bot in bots:
bot.inform_action(state, pyspiel.PlayerId.CHANCE, action)
state.apply_action(action)
elif state.is_simultaneous_node():
joint_actions = [
bot.step(state)
if state.legal_actions(player_id) else pyspiel.INVALID_ACTION
for player_id, bot in enumerate(bots)
]
state.apply_actions(joint_actions)
else:
current_player = state.current_player()
action = bots[current_player].step(state)
for i, bot in enumerate(bots):
if i != current_player:
bot.inform_action(state, current_player, action)
state.apply_action(action)
return state.returns()
| open_spiel-master | open_spiel/python/algorithms/evaluate_bots.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.mmd_dilated.py."""
import copy
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python.algorithms import mmd_dilated
import pyspiel
_DATA = [
{
'game':
pyspiel.load_game('kuhn_poker'),
'inverse_alpha':
10,
'gambit_qre_sol': [
np.array([
1., 0.75364232, 0.64695966, 0.10668266, 0.24635768, 0.70309809,
0.25609184, 0.44700625, 0.29690191, 0.47546799, 0.01290797,
0.46256001, 0.52453201
]),
np.array([
1., 0.63415944, 0.36584056, 0.41154828, 0.58845172, 0.28438486,
0.71561514, 0.0620185, 0.9379815, 0.65005434, 0.34994566,
0.79722767, 0.20277233
])
]
},
{
'game':
pyspiel.load_game('dark_hex(board_size=2,gameversion=adh)'),
'inverse_alpha':
2,
'gambit_qre_sol': [
np.array([
1., 0.1997415, 0.0630504, 0.0320848, 0.0309656, 0.0320848,
0.0309656, 0.0696913, 0.0669998, 0.0334999, 0.0334999,
0.0334999, 0.0334999, 0.0377519, 0.0252985, 0.0252985,
0.0252985, 0.0347624, 0.0347624, 0.0349289, 0.0349289, 0.0273,
0.0273, 0.0396998, 0.0273, 0.3002587, 0.0832425, 0.0414444,
0.0417981, 0.0414444, 0.0417981, 0.0983483, 0.1186679,
0.0423458, 0.0408967, 0.0423458, 0.0408967, 0.0397914,
0.0397914, 0.0585569, 0.0397914, 0.047948, 0.047948, 0.0707199,
0.047948, 0.3002587, 0.1186679, 0.0707199, 0.047948, 0.047948,
0.047948, 0.0983483, 0.0832425, 0.0408967, 0.0408967, 0.0423458,
0.0585569, 0.0397914, 0.0397914, 0.0397914, 0.0423458,
0.0417981, 0.0417981, 0.0414444, 0.0414444, 0.1997415,
0.0669998, 0.0396998, 0.0273, 0.0273, 0.0273, 0.0696913,
0.0630504, 0.0309656, 0.0309656, 0.0320848, 0.0334999,
0.0334999, 0.0334999, 0.0349289, 0.0349289, 0.0347624,
0.0347624, 0.0320848, 0.0334999, 0.0252985, 0.0252985,
0.0377519, 0.0252985
]),
np.array([
1., 0.22738648, 0.07434555, 0.0790954, 0.03965962, 0.03943577,
0.07394554, 0.03468592, 0.03925961, 0.03965962, 0.03468592,
0.27261352, 0.10172918, 0.06014879, 0.04158039, 0.08865251,
0.08223183, 0.04230736, 0.03992446, 0.04171322, 0.0405186,
0.27261352, 0.08223183, 0.0405186, 0.04171322, 0.08865251,
0.03437272, 0.05427979, 0.10172918, 0.04158039, 0.06014879,
0.22738648, 0.08605167, 0.0346029, 0.05144877, 0.08678769,
0.03319034, 0.05359735, 0.05454711, 0.04462109, 0.0421666,
0.05454711, 0.08678769, 0.0421666, 0.04462109, 0.08605167,
0.04355502, 0.04249665, 0.05083895, 0.11106131, 0.05083895,
0.06022236, 0.11071326, 0.05083895, 0.05987431, 0.03992446,
0.04230736, 0.04249665, 0.04355502, 0.05359735, 0.03319034,
0.05144877, 0.0346029, 0.05427979, 0.03437272, 0.11071326,
0.05987431, 0.05083895, 0.11106131, 0.06022236, 0.05083895,
0.05083895, 0.07394554, 0.0790954, 0.03943577, 0.03965962,
0.07434555, 0.03468592, 0.03965962, 0.03925961, 0.03468592
])
]
},
]
class MMDDilatedTest(parameterized.TestCase):
@parameterized.parameters(*_DATA)
def test_solution_fixed_point(self, game, inverse_alpha, gambit_qre_sol):
# Check if a QRE solution is a fixed point of MMD
mmd = mmd_dilated.MMDDilatedEnt(game, 1. / inverse_alpha)
mmd.sequences = copy.deepcopy(gambit_qre_sol)
mmd.update_sequences()
np.testing.assert_allclose(
mmd.current_sequences()[0], gambit_qre_sol[0], rtol=1e-6)
np.testing.assert_allclose(
mmd.current_sequences()[1], gambit_qre_sol[1], rtol=1e-6)
@parameterized.parameters(*_DATA)
def test_gap(self, game, inverse_alpha, gambit_qre_sol):
mmd = mmd_dilated.MMDDilatedEnt(game, 1. / inverse_alpha)
mmd.sequences = copy.deepcopy(gambit_qre_sol)
np.testing.assert_allclose(mmd.get_gap(), 0., atol=1e-6)
@parameterized.parameters((0.), (0.5), (1.), (1.5))
def test_rps_update(self, alpha):
game = pyspiel.load_game_as_turn_based('matrix_rps')
start_sequences = [
np.array([1, 0.2, 0.2, 0.6]),
np.array([1, 0.5, 0.2, 0.3])
]
mmd = mmd_dilated.MMDDilatedEnt(game, alpha)
mmd.sequences = copy.deepcopy(start_sequences)
mmd.update_sequences()
updated_sequences = copy.deepcopy(start_sequences)
# manually perform update for p1
updated_sequences[0][1:] = updated_sequences[0][1:] * np.exp(
mmd.stepsize * -mmd.payoff_mat[1:, 1:] @ start_sequences[1][1:])
updated_sequences[0][1:] = updated_sequences[0][1:]**(
1. / (1 + mmd.stepsize * alpha))
updated_sequences[0][1:] = updated_sequences[0][1:] / np.sum(
updated_sequences[0][1:])
np.testing.assert_allclose(mmd.current_sequences()[0], updated_sequences[0])
# manually perform update for p2
updated_sequences[1][1:] = updated_sequences[1][1:] * np.exp(
mmd.stepsize * mmd.payoff_mat[1:, 1:].T @ start_sequences[0][1:])
updated_sequences[1][1:] = updated_sequences[1][1:]**(
1. / (1 + mmd.stepsize * alpha))
updated_sequences[1][1:] = updated_sequences[1][1:] / np.sum(
updated_sequences[1][1:])
np.testing.assert_allclose(mmd.current_sequences()[1], updated_sequences[1])
if alpha > 0:
# gap cannot be computed for a value of alpha = 0
# check that uniform random has a gap of zero
mmd.sequences = [
np.array([1, 0.33333333, 0.33333333, 0.33333333]),
np.array([1, 0.33333333, 0.33333333, 0.33333333])
]
np.testing.assert_allclose(mmd.get_gap(), 0.)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/mmd_dilated_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.rpg."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import policy_gradient
from open_spiel.python.algorithms.losses import rl_losses
import pyspiel
class PolicyGradientTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
itertools.product(("rpg", "qpg", "rm", "a2c"),
("kuhn_poker", "leduc_poker")))
def test_run_game(self, loss_str, game_name):
env = rl_environment.Environment(game_name)
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
with self.session() as sess:
agents = [
policy_gradient.PolicyGradient( # pylint: disable=g-complex-comprehension
sess,
player_id=player_id,
info_state_size=info_state_size,
num_actions=num_actions,
loss_str=loss_str,
hidden_layers_sizes=[8, 8],
batch_size=16,
entropy_cost=0.001,
critic_learning_rate=0.01,
pi_learning_rate=0.01,
num_critic_before_pi=4) for player_id in [0, 1]
]
sess.run(tf.global_variables_initializer())
for _ in range(2):
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
current_agent = agents[current_player]
agent_output = current_agent.step(time_step)
time_step = env.step([agent_output.action])
for agent in agents:
agent.step(time_step)
@absltest.skip("Causing a segmentation fault on wheel tests")
def test_run_hanabi(self):
# Hanabi is an optional game, so check we have it before running the test.
game = "hanabi"
if game not in pyspiel.registered_names():
return
num_players = 3
env_configs = {
"players": num_players,
"max_life_tokens": 1,
"colors": 2,
"ranks": 3,
"hand_size": 2,
"max_information_tokens": 3,
"discount": 0.
}
env = rl_environment.Environment(game, **env_configs)
info_state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
with self.session() as sess:
agents = [
policy_gradient.PolicyGradient( # pylint: disable=g-complex-comprehension
sess,
player_id=player_id,
info_state_size=info_state_size,
num_actions=num_actions,
hidden_layers_sizes=[8, 8],
batch_size=16,
entropy_cost=0.001,
critic_learning_rate=0.01,
pi_learning_rate=0.01,
num_critic_before_pi=4) for player_id in range(num_players)
]
sess.run(tf.global_variables_initializer())
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
agent_output = [agent.step(time_step) for agent in agents]
time_step = env.step([agent_output[current_player].action])
for agent in agents:
agent.step(time_step)
def test_loss_modes(self):
loss_dict = {
"qpg": rl_losses.BatchQPGLoss,
"rpg": rl_losses.BatchRPGLoss,
"rm": rl_losses.BatchRMLoss,
"a2c": rl_losses.BatchA2CLoss,
}
with self.session() as sess:
for loss_str, loss_class in loss_dict.items():
agent_by_str = policy_gradient.PolicyGradient(
sess,
player_id=0,
info_state_size=32,
num_actions=2,
loss_str=loss_str,
loss_class=None)
agent_by_class = policy_gradient.PolicyGradient(
sess,
player_id=0,
info_state_size=32,
num_actions=2,
loss_str=None,
loss_class=loss_class)
self.assertEqual(agent_by_str._pi_loss.shape,
agent_by_class._pi_loss.shape)
self.assertEqual(agent_by_str._pi_loss.dtype,
agent_by_class._pi_loss.dtype)
self.assertEqual(agent_by_str._pi_loss.op.type,
agent_by_class._pi_loss.op.type)
if __name__ == "__main__":
tf.test.main()
| open_spiel-master | open_spiel/python/algorithms/policy_gradient_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/algorithms/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy aggregator.
Turns a weighted sum of N policies into a realization-equivalent single
policy by sweeping over the state space.
"""
import copy
import itertools
from open_spiel.python import policy
class PolicyFunction(policy.Policy):
"""A callable policy class."""
def __init__(self, pids, policies, game):
"""Construct a policy function.
Arguments:
pids: spiel player id of players these policies belong to.
policies: a list of dictionaries of keys (stringified binary observations)
to a list of probabilities for each move uid (between 0 and max_moves -
1).
game: OpenSpiel game.
"""
super().__init__(game, pids)
self._policies = policies
self._game_type = game.get_type()
def _state_key(self, state, player_id=None):
"""Returns the key to use to look up this (state, player_id) pair."""
if self._game_type.provides_information_state_string:
if player_id is None:
return state.information_state_string()
else:
return state.information_state_string(player_id)
elif self._game_type.provides_observation_tensor:
if player_id is None:
return state.observation_tensor()
else:
return state.observation_tensor(player_id)
else:
return str(state)
@property
def policy(self):
return self._policies
def action_probabilities(self, state, player_id=None):
"""Returns the policy for a player in a state.
Args:
state: A `pyspiel.State` object.
player_id: Optional, the player id for whom we want an action. Optional
unless this is a simultaneous state at which multiple players can act.
Returns:
A `dict` of `{action: probability}` for the specified player in the
supplied state.
"""
state_key = self._state_key(state, player_id=player_id)
if state.is_simultaneous_node():
# for simultaneous node, assume player id must be provided
assert player_id >= 0
return self._policies[player_id][state_key]
if player_id is None:
player_id = state.current_player()
return self._policies[player_id][state_key]
class PolicyPool(object):
"""Transforms a list of list of policies (One list per player) to callable."""
def __init__(self, policies):
"""Transforms a list of list of policies (One list per player) to callable.
Args:
policies: List of list of policies.
"""
self._policies = policies
def __call__(self, state, player):
return [
a.action_probabilities(state, player_id=player)
for a in self._policies[player]
]
class PolicyAggregator(object):
"""Main aggregator object."""
def __init__(self, game, epsilon=1e-40):
self._game = game
self._game_type = game.get_type()
self._num_players = self._game.num_players()
self._policy_pool = None
self._weights = None
self._policy = {}
self._epsilon = epsilon
def _state_key(self, state, player_id=None):
"""Returns the key to use to look up this (state, player) pair."""
# TODO(somidshafiei): fuse this with the identical PolicyFunction._state_key
if self._game_type.provides_information_state_string:
if player_id is None:
return state.information_state_string()
else:
return state.information_state_string(player_id)
elif self._game_type.provides_observation_string:
if player_id is None:
return state.observation_string()
else:
return state.observation_string(player_id)
else:
return str(state)
def aggregate(self, pids, policies, weights):
"""Aggregate the list of policies for each player.
Arguments:
pids: the spiel player ids of the players the strategies belong to.
policies: List of list of policies (One list per player)
weights: the list of weights to attach to each policy.
Returns:
A PolicyFunction, a callable object representing the policy.
"""
aggr_policies = []
for pid in pids:
aggr_policies.append(self._sub_aggregate(pid, policies, weights))
return PolicyFunction(pids, aggr_policies, self._game)
def _sub_aggregate(self, pid, policies, weights):
"""Aggregate the list of policies for one player.
Arguments:
pid: the spiel player id of the player the strategies belong to.
policies: List of list of policies (One list per player)
weights: the list of weights to attach to each policy.
Returns:
A PolicyFunction, a callable object representing the policy.
"""
self._policy_pool = PolicyPool(policies)
# ipdb.set_trace()
assert self._policy_pool is not None
self._weights = weights
# string of state -> probs list
self._policy = {}
state = self._game.new_initial_state()
my_reaches = weights[:]
self._rec_aggregate(pid, state, my_reaches)
# Now normalize
for key in self._policy:
actions, probabilities = zip(*self._policy[key].items())
# Add some small proba mass to avoid divide by zero, which happens for
# games with low reach probabilities for certain states (keys)
new_probs = [prob + self._epsilon for prob in probabilities]
denom = sum(new_probs)
for i in range(len(actions)):
self._policy[key][actions[i]] = new_probs[i] / denom
return self._policy
def _rec_aggregate(self, pid, state, my_reaches):
"""Recursively traverse game tree to compute aggregate policy."""
if state.is_terminal():
return
elif state.is_simultaneous_node():
policies = self._policy_pool(state, pid)
state_key = self._state_key(state, pid)
self._policy[state_key] = {}
used_moves = state.legal_actions(pid)
for uid in used_moves:
new_reaches = copy.deepcopy(my_reaches)
for i in range(len(policies)):
# compute the new reach for each policy for this action
new_reaches[pid][i] *= policies[i].get(uid, 0)
# add reach * prob(a) for this policy to the computed policy
if uid in self._policy[state_key].keys():
self._policy[state_key][uid] += new_reaches[pid][i]
else:
self._policy[state_key][uid] = new_reaches[pid][i]
num_players = self._game.num_players()
all_other_used_moves = []
for player in range(num_players):
if player != pid:
all_other_used_moves.append(state.legal_actions(player))
other_joint_actions = itertools.product(*all_other_used_moves)
# enumerate every possible other-agent actions for next-state
for other_joint_action in other_joint_actions:
for uid in used_moves:
new_reaches = copy.deepcopy(my_reaches)
for i in range(len(policies)):
# compute the new reach for each policy for this action
new_reaches[pid][i] *= policies[i].get(uid, 0)
joint_action = list(
other_joint_action[:pid] + (uid,) + other_joint_action[pid:]
)
new_state = state.clone()
new_state.apply_actions(joint_action)
self._rec_aggregate(pid, new_state, new_reaches)
return
elif state.is_chance_node():
# do not factor in opponent reaches
outcomes, _ = zip(*state.chance_outcomes())
for i in range(0, len(outcomes)):
outcome = outcomes[i]
new_state = state.clone()
new_state.apply_action(outcome)
self._rec_aggregate(pid, new_state, my_reaches)
return
else:
turn_player = state.current_player()
state_key = self._state_key(state, turn_player)
legal_policies = self._policy_pool(state, turn_player)
if pid == turn_player:
# update the current node
# will need the observation to query the policies
if state_key not in self._policy:
self._policy[state_key] = {}
used_moves = state.legal_actions(turn_player)
for uid in used_moves:
new_reaches = copy.deepcopy(my_reaches)
if pid == turn_player:
for i in range(len(legal_policies)):
# compute the new reach for each policy for this action
new_reaches[turn_player][i] *= legal_policies[i].get(uid, 0)
# add reach * prob(a) for this policy to the computed policy
if uid in self._policy[state_key].keys():
self._policy[state_key][uid] += new_reaches[turn_player][i]
else:
self._policy[state_key][uid] = new_reaches[turn_player][i]
# recurse
new_state = state.clone()
new_state.apply_action(uid)
self._rec_aggregate(pid, new_state, new_reaches)
| open_spiel-master | open_spiel/python/algorithms/policy_aggregator.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Implementations of classical fictitious play.
See https://en.wikipedia.org/wiki/Fictitious_play.
"""
import itertools
import numpy as np
from open_spiel.python import policy
from open_spiel.python.algorithms import exploitability
def _uniform_policy(state):
legal_actions = state.legal_actions()
return [(action, 1.0 / len(legal_actions)) for action in legal_actions]
def _callable_tabular_policy(tabular_policy):
"""Turns a tabular policy into a callable.
Args:
tabular_policy: A dictionary mapping information state key to a dictionary
of action probabilities (action -> prob).
Returns:
A function `state` -> list of (action, prob)
"""
def wrap(state):
infostate_key = state.information_state_string(state.current_player())
assert infostate_key in tabular_policy
ap_list = []
for action in state.legal_actions():
assert action in tabular_policy[infostate_key]
ap_list.append((action, tabular_policy[infostate_key][action]))
return ap_list
return wrap
class JointPolicy(policy.Policy):
"""A policy for all players in the game."""
def __init__(self, game, policies):
"""Initializes a joint policy from a table of callables.
Args:
game: The game being played.
policies: A dictionary mapping player number to a function `state` ->
list of (action, prob).
"""
super().__init__(game, list(range(game.num_players())))
self.policies = policies
def action_probabilities(self, state, player_id=None):
return dict(self.policies[player_id or state.current_player()](state))
def _full_best_response_policy(br_infoset_dict):
"""Turns a dictionary of best response action selections into a full policy.
Args:
br_infoset_dict: A dictionary mapping information state to a best response
action.
Returns:
A function `state` -> list of (action, prob)
"""
def wrap(state):
infostate_key = state.information_state_string(state.current_player())
br_action = br_infoset_dict[infostate_key]
ap_list = []
for action in state.legal_actions():
ap_list.append((action, 1.0 if action == br_action else 0.0))
return ap_list
return wrap
def _policy_dict_at_state(callable_policy, state):
"""Turns a policy function into a dictionary at a specific state.
Args:
callable_policy: A function from `state` -> lis of (action, prob),
state: the specific state to extract the policy from.
Returns:
A dictionary of action -> prob at this state.
"""
infostate_policy_list = callable_policy(state)
infostate_policy = {}
for ap in infostate_policy_list:
infostate_policy[ap[0]] = ap[1]
return infostate_policy
class XFPSolver(object):
"""An implementation of extensive-form fictitious play (XFP).
XFP is Algorithm 1 in (Heinrich, Lanctot, and Silver, 2015, "Fictitious
Self-Play in Extensive-Form Games"). Refer to the paper for details:
http://mlanctot.info/files/papers/icml15-fsp.pdf.
"""
def __init__(self, game, save_oracles=False):
"""Initialize the XFP solver.
Arguments:
game: the open_spiel game object.
save_oracles: a boolean, indicating whether or not to save all the BR
policies along the way (including the initial uniform policy). This
could take up some space, and is only used when generating the meta-game
for analysis.
"""
self._game = game
self._num_players = self._game.num_players()
# A set of callables that take in a state and return a list of
# (action, probability) tuples.
self._oracles = [] if save_oracles else None
# A set of callables that take in a state and return a list of
# (action, probability) tuples.
self._policies = []
for _ in range(self._num_players):
self._policies.append(_uniform_policy)
if save_oracles:
self._oracles.append([_uniform_policy])
self._best_responses = [None] * self._num_players
self._iterations = 0
self._delta_tolerance = 1e-10
self._average_policy_tables = []
def average_policy_tables(self):
"""Returns a dictionary of information state -> dict of action -> prob.
This is a joint policy (policy for all players).
"""
return self._average_policy_tables
def average_policy(self):
"""Returns the current average joint policy (policy for all players)."""
return JointPolicy(self._game, self._policies)
def iteration(self):
self._iterations += 1
self.compute_best_responses()
self.update_average_policies()
def compute_best_responses(self):
"""Updates self._oracles to hold best responses for each player."""
for i in range(self._num_players):
# Compute a best response policy to pi_{-i}.
# First, construct pi_{-i}.
joint_policy = self.average_policy()
br_info = exploitability.best_response(self._game,
joint_policy.to_tabular(), i)
full_br_policy = _full_best_response_policy(
br_info["best_response_action"])
self._best_responses[i] = full_br_policy
if self._oracles is not None:
self._oracles[i].append(full_br_policy)
def update_average_policies(self):
"""Update the average policies given the newly computed best response."""
br_reach_probs = np.ones(self._num_players)
avg_reach_probs = np.ones(self._num_players)
self._average_policy_tables = [{} for _ in range(self._num_players)]
self._recursively_update_average_policies(self._game.new_initial_state(),
avg_reach_probs, br_reach_probs)
for i in range(self._num_players):
self._policies[i] = _callable_tabular_policy(
self._average_policy_tables[i])
def _recursively_update_average_policies(self, state, avg_reach_probs,
br_reach_probs):
"""Recursive implementation of the average strategy update."""
if state.is_terminal():
return
elif state.is_chance_node():
for action, _ in state.chance_outcomes():
new_state = state.clone()
new_state.apply_action(action)
self._recursively_update_average_policies(new_state, avg_reach_probs,
br_reach_probs)
else:
player = state.current_player()
avg_policy = _policy_dict_at_state(self._policies[player], state)
br_policy = _policy_dict_at_state(self._best_responses[player], state)
legal_actions = state.legal_actions()
infostate_key = state.information_state_string(player)
# First traverse the subtrees.
for action in legal_actions:
assert action in br_policy
assert action in avg_policy
new_state = state.clone()
new_state.apply_action(action)
new_avg_reach = np.copy(avg_reach_probs)
new_avg_reach[player] *= avg_policy[action]
new_br_reach = np.copy(br_reach_probs)
new_br_reach[player] *= br_policy[action]
self._recursively_update_average_policies(new_state, new_avg_reach,
new_br_reach)
# Now, do the updates.
if infostate_key not in self._average_policy_tables[player]:
alpha = 1 / (self._iterations + 1)
self._average_policy_tables[player][infostate_key] = {}
pr_sum = 0.0
for action in legal_actions:
pr = (
avg_policy[action] + (alpha * br_reach_probs[player] *
(br_policy[action] - avg_policy[action])) /
((1.0 - alpha) * avg_reach_probs[player] +
alpha * br_reach_probs[player]))
self._average_policy_tables[player][infostate_key][action] = pr
pr_sum += pr
assert (1.0 - self._delta_tolerance <= pr_sum <=
1.0 + self._delta_tolerance)
def sample_episode(self, state, policies):
"""Samples an episode according to the policies, starting from state.
Args:
state: Pyspiel state representing the current state.
policies: List of policy representing the policy executed by each player.
Returns:
The result of the call to returns() of the final state in the episode.
Meant to be a win/loss integer.
"""
if state.is_terminal():
return np.array(state.returns(), dtype=np.float32)
elif state.is_chance_node():
outcomes = []
probs = []
for action, prob in state.chance_outcomes():
outcomes.append(action)
probs.append(prob)
outcome = np.random.choice(outcomes, p=probs)
state.apply_action(outcome)
return self.sample_episode(state, policies)
else:
player = state.current_player()
state_policy = _policy_dict_at_state(policies[player], state)
actions = []
probs = []
for action in state_policy:
actions.append(action)
probs.append(state_policy[action])
action = np.random.choice(actions, p=probs)
state.apply_action(action)
return self.sample_episode(state, policies)
def sample_episodes(self, policies, num):
"""Samples episodes and averages their returns.
Args:
policies: A list of policies representing the policies executed by each
player.
num: Number of episodes to execute to estimate average return of policies.
Returns:
Average episode return over num episodes.
"""
totals = np.zeros(self._num_players)
for _ in range(num):
totals += self.sample_episode(self._game.new_initial_state(), policies)
return totals / num
def get_empirical_metagame(self, sims_per_entry, seed=None):
"""Gets a meta-game tensor of utilities from episode samples.
The tensor is a cross-table of all the saved oracles and initial uniform
policy.
Args:
sims_per_entry: number of simulations (episodes) to perform per entry in
the tables, i.e. each is a crude Monte Carlo estimate
seed: the seed to set for random sampling, for reproducibility
Returns:
the K^n (KxKx...K, with dimension n) meta-game tensor where n is the
number of players and K is the number of strategies (one more than the
number of iterations of fictitious play since the initial uniform
policy is included).
"""
if seed is not None:
np.random.seed(seed=seed)
assert self._oracles is not None
num_strategies = len(self._oracles[0])
# Each metagame will be (num_strategies)^self._num_players.
# There are self._num_player metagames, one per player.
meta_games = []
for _ in range(self._num_players):
shape = [num_strategies] * self._num_players
meta_game = np.ndarray(shape=shape, dtype=np.float32)
meta_games.append(meta_game)
for coord in itertools.product(
range(num_strategies), repeat=self._num_players):
policies = []
for i in range(self._num_players):
iteration = coord[i]
policies.append(self._oracles[i][iteration])
utility_estimates = self.sample_episodes(policies, sims_per_entry)
for i in range(self._num_players):
meta_games[i][coord] = utility_estimates[i]
return meta_games
| open_spiel-master | open_spiel/python/algorithms/fictitious_play.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.noisy_policy."""
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python import games # pylint:disable=unused-import
from open_spiel.python import policy as openspiel_policy
from open_spiel.python.algorithms import get_all_states
from open_spiel.python.algorithms import noisy_policy
import pyspiel
class NoisyPolicyTest(parameterized.TestCase, absltest.TestCase):
@parameterized.parameters(["kuhn_poker", "leduc_poker"])
def test_cpp_and_python_implementations_are_identical(self, game_name):
game = pyspiel.load_game(game_name)
policy = openspiel_policy.UniformRandomPolicy(game)
all_states = get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False,
to_string=lambda s: s.information_state_string())
for current_player in range(game.num_players()):
noise = noisy_policy.NoisyPolicy(
policy, player_id=current_player, alpha=0.5, beta=10.)
for state in all_states.values():
if state.current_player() < 0:
continue
if state.current_player() != current_player:
self.assertEqual(
policy.action_probabilities(state),
noise.action_probabilities(state))
else:
self.assertNotEqual(
policy.action_probabilities(state),
noise.action_probabilities(state))
@parameterized.parameters(["python_iterated_prisoners_dilemma"])
def test_simultaneous_game_noisy_policy(self, game_name):
game = pyspiel.load_game(game_name)
policy = openspiel_policy.UniformRandomPolicy(game)
all_states = get_all_states.get_all_states(
game,
depth_limit=10,
include_terminals=False,
include_chance_states=False,
to_string=lambda s: s.history_str())
for current_player in range(game.num_players()):
noise = noisy_policy.NoisyPolicy(
policy, player_id=current_player, alpha=0.5, beta=10.)
for state in all_states.values():
if state.current_player() == pyspiel.PlayerId.SIMULTANEOUS:
for player_id in range(game.num_players()):
if player_id != current_player:
self.assertEqual(
policy.action_probabilities(state, player_id),
noise.action_probabilities(state, player_id))
else:
self.assertNotEqual(
policy.action_probabilities(state, player_id),
noise.action_probabilities(state, player_id))
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/noisy_policy_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.deep_cfr."""
from absl.testing import parameterized
import tensorflow as tf
from open_spiel.python import policy
from open_spiel.python.algorithms import deep_cfr_tf2
from open_spiel.python.algorithms import exploitability
import pyspiel
class DeepCFRTest(parameterized.TestCase):
@parameterized.parameters('leduc_poker', 'kuhn_poker', 'liars_dice')
def test_deep_cfr_runs(self, game_name):
game = pyspiel.load_game(game_name)
deep_cfr_solver = deep_cfr_tf2.DeepCFRSolver(
game,
policy_network_layers=(8, 4),
advantage_network_layers=(4, 2),
num_iterations=2,
num_traversals=2,
learning_rate=1e-3,
batch_size_advantage=8,
batch_size_strategy=8,
memory_capacity=1e7)
deep_cfr_solver.solve()
def test_matching_pennies_3p(self):
# We don't expect Deep CFR to necessarily converge on 3-player games but
# it's nonetheless interesting to see this result.
game = pyspiel.load_game_as_turn_based('matching_pennies_3p')
deep_cfr_solver = deep_cfr_tf2.DeepCFRSolver(
game,
policy_network_layers=(16, 8),
advantage_network_layers=(32, 16),
num_iterations=2,
num_traversals=2,
learning_rate=1e-3,
batch_size_advantage=8,
batch_size_strategy=8,
memory_capacity=1e7)
deep_cfr_solver.solve()
conv = exploitability.nash_conv(
game,
policy.tabular_policy_from_callable(
game, deep_cfr_solver.action_probabilities))
print('Deep CFR in Matching Pennies 3p. NashConv: {}'.format(conv))
if __name__ == '__main__':
tf.test.main()
| open_spiel-master | open_spiel/python/algorithms/deep_cfr_tf2_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Approximate the limiting logit equilbrium (Nash) of a large normal-form game.
This is a python implementation of the Nash solver for normal-form games,
Average Deviation Incentive Descent with Adaptive Sampling (ADIDAS), from
"Sample-based Approximation of Nash in Large Many-player Games via Gradient
Descent" [Gemp et al, AAMAS 2022].
Link to paper: https://arxiv.org/abs/2106.01285.
The limiting logit equilibrium (LLE) was originally defined in "Quantal Response
Equilibria for Normal Form Games" [McKelvey & Palfrey, Games and Economic
Behavior 1995]. The LLE is a Nash equilibrium that is uniquely defined for
*almost* all games.
"""
import itertools
import time
import numpy as np
from open_spiel.python.algorithms.adidas_utils.helpers import misc
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.nonsymmetric import exploitability as nonsym_exp
from open_spiel.python.algorithms.adidas_utils.helpers.nonsymmetric import game_runner as nonsym_game_runner
from open_spiel.python.algorithms.adidas_utils.helpers.symmetric import exploitability as sym_exp
from open_spiel.python.algorithms.adidas_utils.helpers.symmetric import game_runner as sym_game_runner
class ADIDAS(object):
"""Average Deviation Incentive Descent with Adaptive Sampling.
Approximate the limiting logit equilibrium of a normal-form game.
Attributes:
experiment_seed: int, seed for random number generator
random: numpy.random.RandomState object
results: dictionary of results populated upon completion of solver
"""
def __init__(self, seed=0):
self.experiment_seed = seed
self.random = np.random.RandomState(self.experiment_seed)
self.results = None
def estimate_exploitability_sym(self, dist, num_eval_samples, num_ckpts,
num_players, game, policies):
"""Estimate exploitability via monte carlo.
Args:
dist: 1-d np.array, estimate of nash distribution
num_eval_samples: int, number of samples to estimate exploitability
num_ckpts: int, number of checkpoints (actions, policies, ...)
num_players: int, number of players
game: game with minimal functionality (see games/small.py)
policies: list mapping checkpoints to policies
Returns:
list of exploitabilities computed using [index] monte carlo samples
"""
pg_mean = np.zeros_like(dist)
exps_estimated = []
for s in range(num_eval_samples):
base_profile = tuple([
self.random.choice(num_ckpts, p=dist) for _ in range(num_players)
])
game_queries = sym_game_runner.construct_game_queries_for_exp(
base_profile, num_ckpts)
game_results = sym_game_runner.run_games_and_record_payoffs(
game_queries, game.get_payoffs_for_strategies, policies)
pg_s = np.zeros_like(dist)
for query, payoffs in game_results.items():
pg_s[query[0]] = payoffs[0]
pg_mean = (pg_mean * float(s) + pg_s) / float(s + 1)
exps_estimated.append(pg_mean.max() - pg_mean.dot(dist))
return exps_estimated
def estimate_exploitability_nonsym(self, dist, num_eval_samples, num_ckpts,
num_players, game, policies):
"""Estimate exploitability via monte carlo.
Args:
dist: list of 1-d np.arrays, estimate of nash distribution
num_eval_samples: int, number of samples to estimate exploitability
num_ckpts: int, number of checkpoints (actions, policies, ...)
num_players: int, number of players
game: game with minimal functionality (see games/small.py)
policies: list mapping checkpoints to policies
Returns:
list of exploitabilities computed using [index] monte carlo samples
"""
pg_mean = [np.zeros_like(dist_i) for dist_i in dist]
exps_estimated = []
for s in range(num_eval_samples):
base_profile = tuple([
self.random.choice(num_ckpts[i], p=dist[i])
for i in range(num_players)
])
game_queries = nonsym_game_runner.construct_game_queries_for_exp(
base_profile, num_ckpts)
game_results = nonsym_game_runner.run_games_and_record_payoffs(
game_queries, game.get_payoffs_for_strategies, policies)
for pi_query, payoffs in game_results.items():
pi, query = pi_query
ai = query[pi]
pg_mean[pi][ai] += (payoffs[pi] - pg_mean[pi][ai]) / float(s + 1)
exp_is = []
for i in range(num_players):
exp_is.append(pg_mean[i].max() - pg_mean[i].dot(dist[i]))
exps_estimated.append(np.mean(exp_is))
return exps_estimated
def update_payoff_matrices(self, payoff_matrices, payoff_matrices_new, s):
"""Update mean of payoff matrices.
Args:
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
**current mean
payoff_matrices_new: **new sample
s: int, sample number
Returns:
payoff_matrices with updated means
"""
if payoff_matrices:
for key in payoff_matrices_new:
new = payoff_matrices_new[key]
old = payoff_matrices[key]
payoff_matrices[key] += (new - old) / float(s + 1)
else:
payoff_matrices = payoff_matrices_new
return payoff_matrices
def construct_payoff_matrices_from_samples_sym(
self, game, dist, num_samples, policies, num_players, num_ckpts):
"""Construct payoff matrices (approx. sym. polymatrix game) from samples.
Args:
game: game with minimal functionality (see games/small.py)
dist: 1-d np.array, estimate of nash distribution
num_samples: int, `minibatch' size for stochastic gradient
policies: list mapping checkpoints to policies
num_players: int, number of players
num_ckpts: int, number of checkpoints (actions, policies, ...)
Returns:
payoff_matrices (2 x num_ckpts x num_ckpts array) to compute adidas grad
"""
payoff_matrices = np.zeros((2, num_ckpts, num_ckpts))
for _ in range(num_samples):
base_profile = tuple([
self.random.choice(num_ckpts, p=dist) for _ in range(num_players)
])
game_queries = sym_game_runner.construct_game_queries(
base_profile, num_ckpts)
game_results = sym_game_runner.run_games_and_record_payoffs(
game_queries, game.get_payoffs_for_strategies, policies)
payoff_matrices += sym_game_runner.form_payoff_matrices(
game_results, num_ckpts) / float(num_samples)
return payoff_matrices
def construct_payoff_matrices_exactly_sym(
self, game, dist, num_players):
"""Construct payoff matrices exactly (expected sym. polymatrix game).
Args:
game: game with minimal functionality (see games/small.py)
dist: 1-d np.array, estimate of nash distribution
num_players: int, number of players
Returns:
payoff_matrices (2 x A x A array) to compute adidas gradient
"""
sym_nash = [dist for _ in range(num_players)]
pt = game.payoff_tensor()
payoff_matrix_exp_0 = misc.pt_reduce(pt[0], sym_nash, [0, 1])
payoff_matrix_exp_1 = misc.pt_reduce(pt[1], sym_nash, [0, 1])
payoff_matrices = np.stack((payoff_matrix_exp_0, payoff_matrix_exp_1))
return payoff_matrices
def construct_payoff_matrices_from_samples_nonsym(
self, game, dist, num_samples, policies, num_players, num_ckpts):
"""Construct payoff matrices (approx. nonsym. polymatrix) from samples.
Args:
game: game with minimal functionality (see games/small.py)
dist: list of 1-d np.arrays, estimate of nash distribution
num_samples: int, `minibatch' size for stochastic gradient
policies: list mapping checkpoints to policies
num_players: int, number of players
num_ckpts: int, number of checkpoints (actions, policies, ...)
Returns:
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
"""
payoff_matrices = None
for s in range(num_samples):
base_profile = tuple([
self.random.choice(num_ckpts[i], p=dist[i])
for i in range(num_players)
])
game_queries = nonsym_game_runner.construct_game_queries(
base_profile, num_ckpts)
game_results = nonsym_game_runner.run_games_and_record_payoffs(
game_queries, game.get_payoffs_for_strategies, policies)
payoff_matrices_new = nonsym_game_runner.form_payoff_matrices(
game_results, num_ckpts)
payoff_matrices = self.update_payoff_matrices(payoff_matrices,
payoff_matrices_new,
s)
return payoff_matrices
def construct_payoff_matrices_exactly_nonsym(
self, game, dist, num_players):
"""Construct payoff matrices exactly (expected nonsym. polymatrix game).
Args:
game: game with minimal functionality (see games/small.py)
dist: list of 1-d np.arrays, estimate of nash distribution
num_players: int, number of players
Returns:
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
"""
pt = game.payoff_tensor()
payoff_matrices = {}
for pi, pj in itertools.combinations(range(num_players), 2):
key = (pi, pj)
pt_i = misc.pt_reduce(pt[pi], dist, [pi, pj])
pt_j = misc.pt_reduce(pt[pj], dist, [pi, pj])
payoff_matrices[key] = np.stack((pt_i, pt_j), axis=0)
return payoff_matrices
def approximate_nash(self, game, solver, sym,
num_iterations=10000, num_samples=1,
num_eval_samples=int(10e4), approx_eval=False,
exact_eval=False, avg_trajectory=False,
return_trajectory=False):
"""Runs solver on game.
Args:
game: game with minimal functionality (see games/small.py)
solver: gradient solver (see utils/updates.py)
sym: bool, true if the game is symmetric across players
num_iterations: int, number of incremental updates
num_samples: int, `minibatch' size for stochastic gradient
num_eval_samples: int, number of samples to estimate exploitability
default = # of samples for P[|sample_payoff-true| > C/100] < ~5e-7%
where C = pt.max() - pt.min();
P[|pt_grad|_inf <= C/100] > (1-5e-7)^num_actions
approx_eval: bool, whether to evaluate exploitability during
descent with stochastic samples
exact_eval: bool, whether to evaluate exploitability during
descent with exact expectation (req. full payoff tensor)
avg_trajectory: bool, whether to evaluate w.r.t. the average distribution
up to time t instead of the distribution at time t
return_trajectory: bool, whether to record all parameters (e.g., dist)
during learning and return them -- see solver code for details
Returns:
None -- dict of results stored in `results` attribute upon completion
(key=name of metric, value=[m_0, ..., m_{last_iter}])
"""
num_players = game.num_players()
num_strats = game.num_strategies()
if sym:
if len(set(num_strats)) != 1:
raise ValueError('Each player should have the same number of actions.')
num_strats = num_strats[0]
params = solver.init_vars(num_strats, num_players) # dist = params[0]
if sym:
dist_avg = np.zeros_like(params[0])
policies = list(range(num_strats))
num_ckpts = len(policies)
form_payoffs_appx = self.construct_payoff_matrices_from_samples_sym
form_payoffs_exact = self.construct_payoff_matrices_exactly_sym
exp = sym_exp
estimate_exploitability = self.estimate_exploitability_sym
else:
dist_avg = [np.zeros_like(dist_i) for dist_i in params[0]]
policies = [list(range(num_strats_i)) for num_strats_i in num_strats]
num_ckpts = [len(policy_i) for policy_i in policies]
form_payoffs_appx = self.construct_payoff_matrices_from_samples_nonsym
form_payoffs_exact = self.construct_payoff_matrices_exactly_nonsym
exp = nonsym_exp
estimate_exploitability = self.estimate_exploitability_nonsym
exps_exact = []
exps_solver_exact = []
exps_approx = []
exps_solver_approx = []
grad_norms = []
if return_trajectory:
params_traj = []
has_temp = False
if hasattr(solver, 'temperature') or hasattr(solver, 'p'):
has_temp = True
temperatures = []
if hasattr(solver, 'temperature'):
temp_attr = 'temperature'
else:
temp_attr = 'p'
early_exit = False
start = time.time()
# search for nash (sgd)
for t in range(num_iterations + 1):
dist = params[0]
if return_trajectory:
params_traj.append(params)
if return_trajectory:
params_traj.append(params)
if has_temp:
temperatures.append(getattr(solver, temp_attr))
if num_samples < np.inf:
payoff_matrices = form_payoffs_appx(game, dist, num_samples,
policies, num_players, num_ckpts)
else:
payoff_matrices = form_payoffs_exact(game, dist, num_players)
grads, exp_sto, exp_solver_sto = solver.compute_gradients(params,
payoff_matrices)
if sym:
grads_dist = grads[0]
grad_norms.append(simplex.grad_norm(dist, grads_dist))
else:
grad_norm = 0.
grads_dist = grads[0]
for dist_i, grads_i in zip(dist, grads_dist[0]):
grad_norm += simplex.grad_norm(dist_i, grads_i)**2.
grad_norm = np.sqrt(grad_norm)
grad_norms.append(grad_norm)
if solver.has_aux:
solver.record_aux_errors(grads)
if sym:
dist_avg += (dist - dist_avg) / float(t + 1)
else:
for i, dist_i in enumerate(dist):
dist_avg[i] += (dist_i - dist_avg[i]) / float(t + 1)
if avg_trajectory:
dist_eval = dist_avg
else:
dist_eval = dist
if approx_eval:
exps_approx.append(exp_sto)
exps_solver_approx.append(exp_solver_sto)
if exact_eval:
pt = game.payoff_tensor()
exps_exact.append(exp.unreg_exploitability(dist_eval, pt))
exps_solver_exact.append(solver.exploitability(dist_eval, pt))
# skip the last update so to avoid computing the matching exploitability
# and gradient norm information outside the loop
if t < num_iterations:
params = solver.update(params, grads, t)
if misc.isnan(params):
print('Warning: NaN detected in params post-update. Exiting loop.')
early_exit = True
break
end = time.time()
solve_runtime = end - start
start = end
# evaluating exploitability (monte-carlo)
exp_estimated = estimate_exploitability(dist_eval, num_eval_samples,
num_ckpts, num_players,
game, policies)
eval_runtime = time.time() - start
results = {'exps_approx': exps_approx,
'exps_solver_approx': exps_solver_approx,
'exps_exact': exps_exact,
'exps_solver_exact': exps_solver_exact,
'exp_estimated': exp_estimated,
'grad_norms': grad_norms,
'dist': dist,
'dist_avg': dist_avg,
'solve_runtime': solve_runtime,
'eval_runtime': eval_runtime,
'early_exit': early_exit}
if solver.has_aux:
results.update({'aux_errors': solver.aux_errors})
if return_trajectory:
results.update({'params_trajectory': params_traj})
if has_temp:
results.update({'temperatures': temperatures})
self.results = results
| open_spiel-master | open_spiel/python/algorithms/adidas.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.stackelberg_lp."""
from absl.testing import absltest
from absl.testing import parameterized
import nashpy as nash
import numpy as np
from open_spiel.python.algorithms.stackelberg_lp import solve_stackelberg
from open_spiel.python.egt.utils import game_payoffs_array
import pyspiel
# game instances based on Conitzer & Sandholm'06 paper
game0 = pyspiel.create_matrix_game([[2, 4], [1, 3]], [[1, 0], [0, 1]])
commit_strategy0 = np.array([0.5, 0.5])
commit_value0 = 3.5
game1 = pyspiel.create_matrix_game([[2, 0, 0], [1, 0, 0]],
[[0, 2, 5], [0, -1, -4]])
commit_strategy1 = np.array([1 / 3, 2 / 3])
commit_value1 = 4 / 3
class StackelbergLPTest(parameterized.TestCase):
@parameterized.named_parameters(
("game0", game0, commit_strategy0, commit_value0),
("game1", game1, commit_strategy1, commit_value1),
)
def test_simple_games(self, game, commit_strategy, commit_value):
leader_eq_strategy, _, leader_eq_value, _ = solve_stackelberg(game)
with self.subTest("optimal commitment"):
np.testing.assert_array_almost_equal(commit_strategy, leader_eq_strategy)
self.assertAlmostEqual(commit_value, leader_eq_value)
with self.subTest("Leader-payoff in SSE no less than in NE"):
p_mat = game_payoffs_array(game)
nashpy_game = nash.Game(p_mat[0], p_mat[1])
for eq in nashpy_game.support_enumeration():
leader_nash_value = eq[0].reshape(1,
-1).dot(p_mat[0]).dot(eq[1].reshape(
-1, 1))
self.assertGreaterEqual(leader_eq_value, leader_nash_value)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/stackelberg_lp_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.exploitability."""
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python import policy
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import policy_utils
from open_spiel.python.games import data
import pyspiel
class ExploitabilityTest(parameterized.TestCase):
def test_exploitability_on_kuhn_poker_uniform_random(self):
# NashConv of uniform random test_policy from (found on Google books):
# https://link.springer.com/chapter/10.1007/978-3-319-75931-9_5
game = pyspiel.load_game("kuhn_poker")
test_policy = policy.UniformRandomPolicy(game)
expected_nash_conv = 11 / 12
self.assertAlmostEqual(
exploitability.exploitability(game, test_policy),
expected_nash_conv / 2)
def test_kuhn_poker_uniform_random_best_response_pid0(self):
game = pyspiel.load_game("kuhn_poker")
test_policy = policy.UniformRandomPolicy(game)
results = exploitability.best_response(game, test_policy, player_id=0)
self.assertEqual(
results["best_response_action"],
{
"0": 1, # Bet in case opponent folds when winning
"1": 1, # Bet in case opponent folds when winning
"2": 0, # Both equally good (we return the lowest action)
# Some of these will never happen under the best-response policy,
# but we have computed best-response actions anyway.
"0pb": 0, # Fold - we're losing
"1pb": 1, # Call - we're 50-50
"2pb": 1, # Call - we've won
})
self.assertGreater(results["nash_conv"], 0.1)
def test_kuhn_poker_uniform_random_best_response_pid1(self):
game = pyspiel.load_game("kuhn_poker")
test_policy = policy.UniformRandomPolicy(game)
results = exploitability.best_response(game, test_policy, player_id=1)
self.assertEqual(
results["best_response_action"],
{
# Bet is always best
"0p": 1,
"1p": 1,
"2p": 1,
# Call unless we know we're beaten
"0b": 0,
"1b": 1,
"2b": 1,
})
self.assertGreater(results["nash_conv"], 0.1)
def test_kuhn_poker_uniform_random(self):
# NashConv of uniform random test_policy from (found on Google books):
# https://link.springer.com/chapter/10.1007/978-3-319-75931-9_5
game = pyspiel.load_game("kuhn_poker")
test_policy = policy.UniformRandomPolicy(game)
self.assertAlmostEqual(exploitability.nash_conv(game, test_policy), 11 / 12)
def test_kuhn_poker_always_fold(self):
game = pyspiel.load_game("kuhn_poker")
test_policy = policy.FirstActionPolicy(game)
self.assertAlmostEqual(exploitability.nash_conv(game, test_policy), 2)
def test_kuhn_poker_optimal(self):
game = pyspiel.load_game("kuhn_poker")
test_policy = data.kuhn_nash_equilibrium(alpha=0.2)
self.assertAlmostEqual(exploitability.nash_conv(game, test_policy), 0)
def test_leduc_poker_uniform_random(self):
# NashConv taken from independent implementations
game = pyspiel.load_game("leduc_poker")
test_policy = policy.UniformRandomPolicy(game)
self.assertAlmostEqual(
exploitability.nash_conv(game, test_policy), 4.747222222222222)
def test_leduc_poker_always_fold(self):
game = pyspiel.load_game("leduc_poker")
test_policy = policy.FirstActionPolicy(game)
self.assertAlmostEqual(exploitability.nash_conv(game, test_policy), 2)
# Values for uniform policies taken from
# https://link.springer.com/chapter/10.1007/978-3-319-75931-9_5
# (including multiplayer games below). However, the value for Leduc against
# the uniform test_policy is wrong in the paper. This has been independently
# verified in a number of independent code bases. The 4.7472 value is correct.
# Value for AlwaysFold is trivial: if you
# always fold, you win 0 chips, but if you switch to AlwaysBet, you win 1
# chip everytime if playing against a player who always folds.
@parameterized.parameters(
("kuhn_poker", policy.UniformRandomPolicy, 0.9166666666666666),
("kuhn_poker", policy.FirstActionPolicy, 2.),
("kuhn_poker", lambda _: data.kuhn_nash_equilibrium(alpha=0.2), 0.),
("leduc_poker", policy.FirstActionPolicy, 2.),
("leduc_poker", policy.UniformRandomPolicy, 4.7472222222222),
)
def test_2p_nash_conv(self, game_name, policy_func, expected):
game = pyspiel.load_game(game_name)
self.assertAlmostEqual(
exploitability.nash_conv(game, policy_func(game)), expected)
@parameterized.parameters(3, 4)
def test_kuhn_poker_uniform_random_nash_conv(self, num_players):
game = pyspiel.load_game("kuhn_poker", {"players": num_players})
test_policy = policy.UniformRandomPolicy(game)
self.assertGreater(exploitability.nash_conv(game, test_policy), 2)
@parameterized.parameters(("kuhn_poker", 2), ("kuhn_poker", 3),
("kuhn_poker", 4))
def test_python_same_as_cpp_for_multiplayer_uniform_random_nash_conv(
self, game_name, num_players):
game = pyspiel.load_game(game_name, {"players": num_players})
# TabularPolicy defaults to being a uniform random policy.
test_policy = policy.TabularPolicy(game)
python_nash_conv = exploitability.nash_conv(game, test_policy)
cpp_nash_conv = pyspiel.nash_conv(
game, policy_utils.policy_to_dict(test_policy, game))
self.assertAlmostEqual(python_nash_conv, cpp_nash_conv)
def test_cpp_python_cfr_kuhn(self):
game = pyspiel.load_game("kuhn_poker")
solver = pyspiel.CFRSolver(game)
for _ in range(100):
solver.evaluate_and_update_policy()
pyspiel_average_policy = solver.tabular_average_policy()
cpp_nash_conv = pyspiel.nash_conv(game, pyspiel_average_policy)
python_policy = policy.pyspiel_policy_to_python_policy(
game, pyspiel_average_policy)
python_nash_conv = exploitability.nash_conv(game, python_policy)
self.assertAlmostEqual(python_nash_conv, cpp_nash_conv)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/exploitability_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various masked_softmax implementations, both in numpy and tensorflow."""
import numpy as np
import tensorflow.compat.v1 as tf
# Temporarily disable TF2 behavior until the code is updated.
tf.disable_v2_behavior()
def tf_masked_softmax(logits, legal_actions_mask):
"""Returns the softmax over the valid actions defined by `legal_actions_mask`.
Args:
logits: A tensor [..., num_actions] (e.g. [num_actions] or [B, num_actions])
representing the logits to mask.
legal_actions_mask: The legal action mask, same shape as logits. 1 means
it's a legal action, 0 means it's illegal. If can be a tensorflow or numpy
tensor.
"""
# This will raise a warning as we are taking the log of 0, which sets the 0
# values to -inf. However, this is fine, as we then apply tf.exp, which sets
# tf.exp(-inf) to 0. e.g. if we have logits [5, 3, 1], with legal_mask
# [0, 1, 1], then masked_logits == [-inf, 3, 1], so we subtract the max to
# get [-inf, 0, -2], and apply tf.exp to get [0, 1, e^-2].
legal_actions_mask = tf.cast(legal_actions_mask, dtype=logits.dtype)
masked_logits = logits + tf.log(legal_actions_mask)
max_logit = tf.reduce_max(masked_logits, axis=-1, keepdims=True)
exp_logit = tf.exp(masked_logits - max_logit)
return exp_logit / tf.reduce_sum(exp_logit, axis=-1, keepdims=True)
def np_masked_softmax(logits, legal_actions_mask):
"""Returns the softmax over the valid actions defined by `legal_actions_mask`.
Args:
logits: A tensor [..., num_actions] (e.g. [num_actions] or [B, num_actions])
representing the logits to mask.
legal_actions_mask: The legal action mask, same shape as logits. 1 means
it's a legal action, 0 means it's illegal.
"""
masked_logits = logits + np.log(legal_actions_mask)
max_logit = np.amax(masked_logits, axis=-1, keepdims=True)
exp_logit = np.exp(masked_logits - max_logit)
return exp_logit / np.sum(exp_logit, axis=-1, keepdims=True)
| open_spiel-master | open_spiel/python/algorithms/masked_softmax.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.sequence_form_utils.py."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python import policy
from open_spiel.python.algorithms import cfr
from open_spiel.python.algorithms import sequence_form_utils
from open_spiel.python.algorithms.expected_game_score import policy_value
import pyspiel
_KUHN_GAME = pyspiel.load_game('kuhn_poker')
_LEDUC_GAME = pyspiel.load_game('leduc_poker')
class SequenceFormTest(parameterized.TestCase):
@parameterized.parameters(
{
'game': _KUHN_GAME,
'cfr_iters': 100
},
{
'game': _LEDUC_GAME,
'cfr_iters': 10
},
)
def test_sequence_to_policy(self, game, cfr_iters):
cfr_solver = cfr.CFRSolver(game)
for _ in range(cfr_iters):
cfr_solver.evaluate_and_update_policy()
(_, infoset_actions_to_seq, infoset_action_maps, _, _,
_) = sequence_form_utils.construct_vars(game)
policies = cfr_solver.average_policy()
sequences = sequence_form_utils.policy_to_sequence(game, policies,
infoset_actions_to_seq)
converted_policies = sequence_form_utils.sequence_to_policy(
sequences, game, infoset_actions_to_seq, infoset_action_maps)
np.testing.assert_allclose(
policies.action_probability_array,
converted_policies.action_probability_array,
rtol=1e-10)
@parameterized.parameters(
{
'game': _KUHN_GAME,
'cfr_iters': 100
},
{
'game': _LEDUC_GAME,
'cfr_iters': 10
},
)
def test_sequence_payoff(self, game, cfr_iters):
(_, infoset_actions_to_seq, _, _, payoff_mat,
_) = sequence_form_utils.construct_vars(game)
uniform_policies = policy.TabularPolicy(game)
uniform_value = policy_value(game.new_initial_state(),
[uniform_policies, uniform_policies])
sequences = sequence_form_utils.policy_to_sequence(game, uniform_policies,
infoset_actions_to_seq)
np.testing.assert_allclose(
uniform_value[0],
-sequences[0].T @ payoff_mat @ sequences[1],
rtol=1e-10)
# use cfr iterations to construct new policy
cfr_solver = cfr.CFRSolver(game)
for _ in range(cfr_iters):
cfr_solver.evaluate_and_update_policy()
policies = cfr_solver.average_policy()
cfr_value = policy_value(game.new_initial_state(), [policies, policies])
sequences = sequence_form_utils.policy_to_sequence(game, policies,
infoset_actions_to_seq)
np.testing.assert_allclose(
cfr_value[0], -sequences[0].T @ payoff_mat @ sequences[1], rtol=1e-10)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/sequence_form_utils_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for LP solvers."""
from absl.testing import absltest
from open_spiel.python.algorithms import sequence_form_lp
import pyspiel
class SFLPTest(absltest.TestCase):
def test_rock_paper_scissors(self):
game = pyspiel.load_game_as_turn_based("matrix_rps")
val1, val2, _, _ = sequence_form_lp.solve_zero_sum_game(game)
self.assertAlmostEqual(val1, 0)
self.assertAlmostEqual(val2, 0)
def test_kuhn_poker(self):
game = pyspiel.load_game("kuhn_poker")
val1, val2, _, _ = sequence_form_lp.solve_zero_sum_game(game)
# value from Kuhn 1950 or https://en.wikipedia.org/wiki/Kuhn_poker
self.assertAlmostEqual(val1, -1 / 18)
self.assertAlmostEqual(val2, +1 / 18)
def test_kuhn_poker_efg(self):
game = pyspiel.load_efg_game(pyspiel.get_kuhn_poker_efg_data())
val1, val2, _, _ = sequence_form_lp.solve_zero_sum_game(game)
# value from Kuhn 1950 or https://en.wikipedia.org/wiki/Kuhn_poker
self.assertAlmostEqual(val1, -1 / 18)
self.assertAlmostEqual(val2, +1 / 18)
def test_leduc_poker(self):
game = pyspiel.load_game("leduc_poker")
val1, val2, _, _ = sequence_form_lp.solve_zero_sum_game(game)
# values obtained from Appendix E.2 of Lanctot et al. 2017, A Unified
# Game-Theoretic Approach to Multiagent Reinforcement Learning.
# https://arxiv.org/abs/1711.00832
self.assertAlmostEqual(val1, -0.085606424078, places=6)
self.assertAlmostEqual(val2, 0.085606424078, places=6)
def test_iigoofspiel4(self):
game = pyspiel.load_game_as_turn_based("goofspiel", {
"imp_info": True,
"num_cards": 4,
"points_order": "descending",
})
val1, val2, _, _ = sequence_form_lp.solve_zero_sum_game(game)
# symmetric game, should be 0
self.assertAlmostEqual(val1, 0)
self.assertAlmostEqual(val2, 0)
# TODO(author5): currently does not work because TTT's information state is
# not perfect recall. Enable this test when fixed.
# def test_tictactoe(self):
# game = pyspiel.load_game("tic_tac_toe")
# val1, val2 = sequence_form_lp.solve_zero_sum_game(game)
# self.assertAlmostEqual(val1, 0)
# self.assertAlmostEqual(val2, 0)
# This test takes too long for non-glpk solvers, and glpk solver is not
# supported within google's internal cvxopt import. When solving via glpk,
# (locally, outside of google's testing framework), the test takes >300
# seconds, so it is disabled by default, but still left here for reference.
# Note, value is taken from an independent implementation but also found in
# Neller & Lanctot 2013, An Introduction to Counterfactual Regret Minimization
# http://modelai.gettysburg.edu/2013/cfr/cfr.pdf
#
# def test_liars_dice(self):
# game = pyspiel.load_game("liars_dice")
# val1, val2 = sequence_form_lp.solve_zero_sum_game(game, solver="glpk")
# self.assertAlmostEqual(val1, -0.027131782945736)
# self.assertAlmostEqual(val2, 0.027131782945736)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/sequence_form_lp_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Export game trees in gambit format.
An exporter for the .efg format used by Gambit:
http://www.gambit-project.org/gambit14/formats.html
See `examples/gambit_example.py` for an example of usage.
"""
import collections
import functools
def quote(x):
return f"\"{x}\""
def export_gambit(game):
"""Builds gambit representation of the game tree.
Args:
game: A `pyspiel.Game` object.
Returns:
string: Gambit tree
"""
players = " ".join([f"\"Pl{i}\"" for i in range(game.num_players())])
ret = f"EFG 2 R {quote(game)} {{ {players} }} \n"
terminal_idx = 1
chance_idx = 1
# We will keep separate infoset idx per each player.
# Note that gambit infosets start at 1, but we start them here at 0 because
# they get incremented when accessed from infoset_tables below.
infoset_idx = [0] * game.num_players()
def infoset_next_id(player):
nonlocal infoset_idx
infoset_idx[player] += 1
return infoset_idx[player]
infoset_tables = [
collections.defaultdict(functools.partial(infoset_next_id, player))
for player in range(game.num_players())
]
def build_tree(state, depth):
nonlocal ret, terminal_idx, chance_idx, infoset_tables
ret += " " * depth # add nice spacing
state_str = str(state)
if len(state_str) > 10:
state_str = ""
if state.is_terminal():
utils = " ".join(map(str, state.returns()))
ret += f"t {quote(state_str)} {terminal_idx} \"\" {{ {utils} }}\n"
terminal_idx += 1
return
if state.is_chance_node():
ret += f"c {quote(state_str)} {chance_idx} \"\" {{ "
for action, prob in state.chance_outcomes():
action_str = state.action_to_string(state.current_player(), action)
ret += f"{quote(action_str)} {prob:.16f} "
ret += " } 0\n"
chance_idx += 1
else: # player node
player = state.current_player()
gambit_player = player + 1 # cannot be indexed from 0
infoset = state.information_state_string()
infoset_idx = infoset_tables[player][infoset]
ret += f"p {quote(state_str)} {gambit_player} {infoset_idx} \"\" {{ "
for action in state.legal_actions():
action_str = state.action_to_string(state.current_player(), action)
ret += f"{quote(action_str)} "
ret += " } 0\n"
for action in state.legal_actions():
child = state.child(action)
build_tree(child, depth + 1)
build_tree(game.new_initial_state(), 0)
return ret
| open_spiel-master | open_spiel/python/algorithms/gambit.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Projected Replicator Dynamics Algorithm.
This is an N-player implementation of the Projected Replicator Dynamics
algorithm described in Lanctot et al., 2017: https://arxiv.org/abs/1711.00832.
"""
import numpy as np
from open_spiel.python.algorithms import nfg_utils
def _partial_multi_dot(player_payoff_tensor, strategies, index_avoided):
"""Computes a generalized dot product avoiding one dimension.
This is used to directly get the expected return of a given action, given
other players' strategies, for the player indexed by index_avoided.
Note that the numpy.dot function is used to compute this product, as it ended
up being (Slightly) faster in performance tests than np.tensordot. Using the
reduce function proved slower for both np.dot and np.tensordot.
Args:
player_payoff_tensor: payoff tensor for player[index_avoided], of dimension
(dim(vector[0]), dim(vector[1]), ..., dim(vector[-1])).
strategies: Meta strategy probabilities for each player.
index_avoided: Player for which we do not compute the dot product.
Returns:
Vector of expected returns for each action of player [the player indexed by
index_avoided].
"""
new_axis_order = [index_avoided] + [
i for i in range(len(strategies)) if (i != index_avoided)
]
accumulator = np.transpose(player_payoff_tensor, new_axis_order)
for i in range(len(strategies) - 1, -1, -1):
if i != index_avoided:
accumulator = np.dot(accumulator, strategies[i])
return accumulator
def _project_distribution(updated_strategy, gamma):
"""Projects the distribution in updated_x to have minimal probabilities.
Minimal probabilities are set as gamma, and the probabilities are then
renormalized to sum to 1.
Args:
updated_strategy: New distribution value after being updated by update rule.
gamma: minimal probability value when divided by number of actions.
Returns:
Projected distribution.
"""
# Epsilon approximation of L2-norm projection onto the Delta_gamma space.
updated_strategy[updated_strategy < gamma] = gamma
updated_strategy = updated_strategy / np.sum(updated_strategy)
return updated_strategy
def _approx_simplex_projection(updated_strategy, gamma=0.0):
"""Approximately projects the distribution in updated_x to have minimal probabilities.
Minimal probabilities are set as gamma, and the probabilities are then
renormalized to sum to 1.
Args:
updated_strategy: New distribution value after being updated by update rule.
gamma: minimal probability value when divided by number of actions.
Returns:
Projected distribution.
"""
# Epsilon approximation of L2-norm projection onto the Delta_gamma space.
updated_strategy[updated_strategy < gamma] = gamma
updated_strategy = updated_strategy / np.sum(updated_strategy)
return updated_strategy
def _simplex_projection(updated_strategy, gamma=0.0):
"""Project updated_strategy on the closest point in L2-norm on gamma-simplex.
Based on: https://eng.ucmerced.edu/people/wwang5/papers/SimplexProj.pdf
Args:
updated_strategy: New distribution value after being updated by update rule.
gamma: minimal probability value when divided by number of actions.
Returns:
Projected distribution
Algorithm description:
It aims to find a scalar lam to be substracted by each dimension of v
with the restriction that the resulted quantity should lie in [gamma, 1]
until the resulted vector summed up to 1
Example: [0.4, 0.7, 0.6], 0.2 -- > find lam=0.25
--> [max(0.4-0.25, 0.2), max(0.7-0.25, 0.2), max(0.6-0.25, 0.2)]
--> [0.2, 0.45, 0.35]
"""
n = len(updated_strategy)
idx = np.arange(1, n + 1)
u = np.sort(updated_strategy)[::-1]
u_tmp = (1 - np.cumsum(u) - (n - idx) * gamma) / idx
rho = np.searchsorted(u + u_tmp <= gamma, True)
return np.maximum(updated_strategy + u_tmp[rho - 1], gamma)
def _projected_replicator_dynamics_step(payoff_tensors, strategies, dt, gamma,
use_approx=False):
"""Does one step of the projected replicator dynamics algorithm.
Args:
payoff_tensors: List of payoff tensors for each player.
strategies: List of the strategies used by each player.
dt: Update amplitude term.
gamma: Minimum exploratory probability term.
use_approx: use approximate simplex projection.
Returns:
A list of updated strategies for each player.
"""
# TODO(author4): Investigate whether this update could be fully vectorized.
new_strategies = []
for player in range(len(payoff_tensors)):
current_payoff_tensor = payoff_tensors[player]
current_strategy = strategies[player]
values_per_strategy = _partial_multi_dot(current_payoff_tensor, strategies,
player)
average_return = np.dot(values_per_strategy, current_strategy)
delta = current_strategy * (values_per_strategy - average_return)
updated_strategy = current_strategy + dt * delta
updated_strategy = (
_approx_simplex_projection(updated_strategy, gamma) if use_approx
else _simplex_projection(updated_strategy, gamma))
new_strategies.append(updated_strategy)
return new_strategies
def projected_replicator_dynamics(payoff_tensors,
prd_initial_strategies=None,
prd_iterations=int(1e5),
prd_dt=1e-3,
prd_gamma=1e-6,
average_over_last_n_strategies=None,
use_approx=False,
**unused_kwargs):
"""The Projected Replicator Dynamics algorithm.
Args:
payoff_tensors: List of payoff tensors for each player.
prd_initial_strategies: Initial list of the strategies used by each player,
if any. Could be used to speed up the search by providing a good initial
solution.
prd_iterations: Number of algorithmic steps to take before returning an
answer.
prd_dt: Update amplitude term.
prd_gamma: Minimum exploratory probability term.
average_over_last_n_strategies: Running average window size for average
policy computation. If None, use the whole trajectory.
use_approx: use the approximate simplex projection.
**unused_kwargs: Convenient way of exposing an API compatible with other
methods with possibly different arguments.
Returns:
PRD-computed strategies.
"""
number_players = len(payoff_tensors)
# Number of actions available to each player.
action_space_shapes = payoff_tensors[0].shape
# If no initial starting position is given, start with uniform probabilities.
new_strategies = prd_initial_strategies or [
np.ones(action_space_shapes[k]) / action_space_shapes[k]
for k in range(number_players)
]
averager = nfg_utils.StrategyAverager(number_players, action_space_shapes,
average_over_last_n_strategies)
averager.append(new_strategies)
for _ in range(prd_iterations):
new_strategies = _projected_replicator_dynamics_step(
payoff_tensors, new_strategies, prd_dt, prd_gamma, use_approx)
averager.append(new_strategies)
return averager.average_strategies()
| open_spiel-master | open_spiel/python/algorithms/projected_replicator_dynamics.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for LP solvers."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import lp_solver
import pyspiel
class LPSolversTest(absltest.TestCase):
def test_rock_paper_scissors(self):
p0_sol, p1_sol, p0_sol_val, p1_sol_val = (
lp_solver.solve_zero_sum_matrix_game(
pyspiel.create_matrix_game(
[[0.0, -1.0, 1.0], [1.0, 0.0, -1.0], [-1.0, 1.0, 0.0]],
[[0.0, 1.0, -1.0], [-1.0, 0.0, 1.0], [1.0, -1.0, 0.0]])))
self.assertLen(p0_sol, 3)
self.assertLen(p1_sol, 3)
for i in range(3):
self.assertAlmostEqual(p0_sol[i], 1.0 / 3.0)
self.assertAlmostEqual(p1_sol[i], 1.0 / 3.0)
self.assertAlmostEqual(p0_sol_val, 0.0)
self.assertAlmostEqual(p1_sol_val, 0.0)
def test_biased_rock_paper_scissors(self):
# See sec 6.2 of Bosansky et al. 2016. Algorithms for Computing Strategies
# in Two-Player Simultaneous Move Games
# http://mlanctot.info/files/papers/aij-2psimmove.pdf
p0_sol, p1_sol, p0_sol_val, p1_sol_val = (
lp_solver.solve_zero_sum_matrix_game(
pyspiel.create_matrix_game(
[[0.0, -0.25, 0.5], [0.25, 0.0, -0.05], [-0.5, 0.05, 0.0]],
[[0.0, 0.25, -0.5], [-0.25, 0.0, 0.05], [0.5, -0.05, 0.0]])))
self.assertLen(p0_sol, 3)
self.assertLen(p1_sol, 3)
self.assertAlmostEqual(p0_sol[0], 1.0 / 16.0, places=4)
self.assertAlmostEqual(p1_sol[0], 1.0 / 16.0, places=4)
self.assertAlmostEqual(p0_sol[1], 10.0 / 16.0, places=4)
self.assertAlmostEqual(p1_sol[1], 10.0 / 16.0, places=4)
self.assertAlmostEqual(p0_sol[2], 5.0 / 16.0, places=4)
self.assertAlmostEqual(p1_sol[2], 5.0 / 16.0, places=4)
self.assertAlmostEqual(p0_sol_val, 0.0)
self.assertAlmostEqual(p1_sol_val, 0.0)
def test_asymmetric_pure_nonzero_val(self):
# c0 c1 c2
# r0 | 2, -2 | 1, -1 | 5, -5
# r1 |-3, 3 | -4, 4 | -2, 2
#
# Pure eq (r0,c1) for a value of (1, -1)
# 2nd row is dominated, and then second player chooses 2nd col.
p0_sol, p1_sol, p0_sol_val, p1_sol_val = (
lp_solver.solve_zero_sum_matrix_game(
pyspiel.create_matrix_game([[2.0, 1.0, 5.0], [-3.0, -4.0, -2.0]],
[[-2.0, -1.0, -5.0], [3.0, 4.0, 2.0]])))
self.assertLen(p0_sol, 2)
self.assertLen(p1_sol, 3)
self.assertAlmostEqual(p0_sol[0], 1.0)
self.assertAlmostEqual(p0_sol[1], 0.0)
self.assertAlmostEqual(p1_sol[0], 0.0)
self.assertAlmostEqual(p1_sol[1], 1.0)
self.assertAlmostEqual(p0_sol_val, 1.0)
self.assertAlmostEqual(p1_sol_val, -1.0)
def test_solve_blotto(self):
blotto_matrix_game = pyspiel.load_matrix_game("blotto")
p0_sol, p1_sol, p0_sol_val, p1_sol_val = (
lp_solver.solve_zero_sum_matrix_game(blotto_matrix_game))
self.assertLen(p0_sol, blotto_matrix_game.num_rows())
self.assertLen(p1_sol, blotto_matrix_game.num_cols())
# Symmetric game, must be zero
self.assertAlmostEqual(p0_sol_val, 0.0)
self.assertAlmostEqual(p1_sol_val, 0.0)
def _assert_dominated(self, *args, **kwargs):
self.assertTrue(lp_solver.is_dominated(*args, **kwargs))
def _assert_undominated(self, *args, **kwargs):
self.assertFalse(lp_solver.is_dominated(*args, **kwargs))
def test_dominance(self):
self._assert_undominated(0, [[1., 1.], [2., 0.], [0., 2.]], 0,
lp_solver.DOMINANCE_STRICT)
self._assert_undominated(0, [[1., 1.], [2., 0.], [0., 2.]], 0,
lp_solver.DOMINANCE_WEAK)
self._assert_dominated(0, [[1., 1.], [2.1, 0.], [0., 2.]], 0,
lp_solver.DOMINANCE_STRICT)
self._assert_undominated(0, [[1., 1., 1.], [2., 0., 1.], [0., 2., 2.]], 0,
lp_solver.DOMINANCE_STRICT)
self._assert_dominated(0, [[1., 1., 1.], [2., 0., 1.], [0., 2., 2.]], 0,
lp_solver.DOMINANCE_WEAK)
self._assert_dominated(0, [[1., 1., 1.], [2., 0., 1.], [0., 2., 2.]], 0,
lp_solver.DOMINANCE_VERY_WEAK)
self._assert_dominated(0, [[1., 1., 1.], [2.1, 0., 1.], [0., 2., 2.]], 0,
lp_solver.DOMINANCE_STRICT)
self._assert_dominated(0, [[1., 1., 1.], [2.1, 0., 1.], [0., 2., 2.]], 0,
lp_solver.DOMINANCE_WEAK)
self._assert_dominated(0, [[1., 1., 1.], [2.1, 0., 1.], [0., 2., 2.]], 0,
lp_solver.DOMINANCE_VERY_WEAK)
self._assert_undominated(0, [[1., 1., 1.], [2., 0., 2.], [0., 2., 0.]], 0,
lp_solver.DOMINANCE_STRICT)
self._assert_undominated(0, [[1., 1., 1.], [2., 0., 2.], [0., 2., 0.]], 0,
lp_solver.DOMINANCE_WEAK)
self._assert_dominated(0, [[1., 1., 1.], [2., 0., 2.], [0., 2., 0.]], 0,
lp_solver.DOMINANCE_VERY_WEAK)
self._assert_undominated(0, [[1., 1.1, 1.], [2., 0., 2.], [0., 2., 0.]], 0,
lp_solver.DOMINANCE_STRICT)
self._assert_undominated(0, [[1., 1.1, 1.], [2., 0., 2.], [0., 2., 0.]], 0,
lp_solver.DOMINANCE_WEAK)
self._assert_undominated(0, [[1., 1.1, 1.], [2., 0., 2.], [0., 2., 0.]], 0,
lp_solver.DOMINANCE_VERY_WEAK)
def test_dominance_3player(self):
self._assert_undominated(0,
[[[1., 1., 1.], [2., 0., 1.], [0., 2., 2.]]] * 3,
1, lp_solver.DOMINANCE_STRICT)
self._assert_dominated(0, [[[1., 1., 1.], [2., 0., 1.], [0., 2., 2.]]] * 3,
1, lp_solver.DOMINANCE_WEAK)
self._assert_dominated(0, [[[1., 1., 1.], [2., 0., 1.], [0., 2., 2.]]] * 3,
1, lp_solver.DOMINANCE_VERY_WEAK)
def test_dominance_prisoners_dilemma(self):
self._assert_dominated(0, pyspiel.load_matrix_game("matrix_pd"), 1,
lp_solver.DOMINANCE_STRICT)
self._assert_undominated(1, pyspiel.load_matrix_game("matrix_pd"), 1,
lp_solver.DOMINANCE_VERY_WEAK)
def test_dominance_mixture(self):
mixture = lp_solver.is_dominated(
0, [[1., 1., 1.], [2., 0., 1.], [0., 2., 2.]],
0,
lp_solver.DOMINANCE_WEAK,
return_mixture=True)
self.assertAlmostEqual(mixture[0], 0)
self.assertAlmostEqual(mixture[1], 0.5)
self.assertAlmostEqual(mixture[2], 0.5)
def _checked_iterated_dominance(self, *args, **kwargs):
reduced_game, live_actions = lp_solver.iterated_dominance(*args, **kwargs)
if isinstance(reduced_game, pyspiel.MatrixGame):
payoffs_shape = [2, reduced_game.num_rows(), reduced_game.num_cols()]
else:
payoffs_shape = list(reduced_game.shape)
self.assertLen(live_actions, payoffs_shape[0])
self.assertListEqual(payoffs_shape[1:], [
np.sum(live_actions_for_player)
for live_actions_for_player in live_actions
])
return reduced_game, live_actions
def test_iterated_dominance_prisoners_dilemma(self):
# find the strictly dominant (D, D) strategy
pd = pyspiel.load_matrix_game("matrix_pd")
pd_dom, pd_live = self._checked_iterated_dominance(
pd, lp_solver.DOMINANCE_STRICT)
self.assertEqual(pd_dom.num_rows(), 1)
self.assertEqual(pd_dom.num_cols(), 1)
self.assertEqual(pd_dom.row_action_name(0), "Defect")
self.assertEqual(pd_dom.col_action_name(0), "Defect")
self.assertListEqual(pd_live[0].tolist(), [False, True])
self.assertListEqual(pd_live[1].tolist(), [False, True])
def test_iterated_dominance_auction(self):
# find a strategy through iterated dominance that's not strictly dominant
auction = pyspiel.extensive_to_matrix_game(
pyspiel.load_game("first_sealed_auction(max_value=3)"))
auction_dom, auction_live = self._checked_iterated_dominance(
auction, lp_solver.DOMINANCE_STRICT)
# there's just one non-dominated action
self.assertEqual(auction_dom.num_rows(), 1)
self.assertEqual(auction_dom.num_cols(), 1)
best_action = [
auction.row_action_name(row) for row in range(auction.num_rows())
].index(auction_dom.row_action_name(0))
self.assertTrue(auction_live[0][best_action])
# other actions are all weakly but not all strictly dominated
self.assertNotIn(False, [
lp_solver.is_dominated(action, auction, 0, lp_solver.DOMINANCE_WEAK)
for action in range(6)
if action != best_action
])
self.assertIn(False, [
lp_solver.is_dominated(action, auction, 0, lp_solver.DOMINANCE_STRICT)
for action in range(6)
if action != best_action
])
def test_iterated_dominance_ordering(self):
for _ in range(100):
game = np.random.randint(5, size=(2, 3, 3))
unused_reduced_strict, live_strict = self._checked_iterated_dominance(
game, lp_solver.DOMINANCE_STRICT)
unused_reduced_weak, live_weak = self._checked_iterated_dominance(
game, lp_solver.DOMINANCE_WEAK)
unused_reduced_vweak, live_vweak = self._checked_iterated_dominance(
game, lp_solver.DOMINANCE_VERY_WEAK)
for player in range(2):
self.assertTrue((live_strict[player] >= live_weak[player]).all())
self.assertTrue((live_strict[player] >= live_vweak[player]).all())
self.assertIn(True, live_vweak[player])
def test_iterated_dominance_strict_invariance(self):
for _ in range(100):
game = np.random.randint(5, size=(3, 2, 2, 3))
unused_reduced, live = self._checked_iterated_dominance(
game, lp_solver.DOMINANCE_STRICT)
perms = [np.random.permutation(size) for size in game.shape]
game_perm = game[tuple(np.meshgrid(
*perms, indexing="ij"))].transpose([0] + list(1 + perms[0]))
unused_reduced_perm, live_perm = self._checked_iterated_dominance(
game_perm, lp_solver.DOMINANCE_STRICT)
for player in range(3):
perm_player = perms[0][player]
self.assertListEqual(live_perm[player].tolist(),
live[perm_player][perms[1 + perm_player]].tolist())
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/lp_solver_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.get_all_states."""
from absl.testing import absltest
from open_spiel.python.algorithms import value_iteration
import pyspiel
class ValueIterationTest(absltest.TestCase):
def test_solve_tic_tac_toe(self):
game = pyspiel.load_game("tic_tac_toe")
values = value_iteration.value_iteration(
game, depth_limit=-1, threshold=0.01)
initial_state = "...\n...\n..."
cross_win_state = "...\n...\n.ox"
naught_win_state = "x..\noo.\nxx."
self.assertEqual(values[initial_state], 0)
self.assertEqual(values[cross_win_state], 1)
self.assertEqual(values[naught_win_state], -1)
def test_solve_small_goofspiel(self):
# TODO(author5): This test fails with num_cards = 4 with a new version of
# LAPACK (3.10.0), which is used by cvxopt. Might be a bug or bad assumption
# about the handling of numerical error. Look into this.
game = pyspiel.load_game("goofspiel", {"num_cards": 3})
values = value_iteration.value_iteration(
game, depth_limit=-1, threshold=1e-6)
initial_state = game.new_initial_state()
assert initial_state.is_chance_node()
root_value = 0
for action, action_prob in initial_state.chance_outcomes():
next_state = initial_state.child(action)
root_value += action_prob * values[str(next_state)]
# Symmetric game: value is 0
self.assertAlmostEqual(root_value, 0)
def test_solve_small_oshi_zumo(self):
# Oshi-Zumo(5, 2, 0)
game = pyspiel.load_game("oshi_zumo", {"coins": 5, "size": 2})
values = value_iteration.value_iteration(
game, depth_limit=-1, threshold=1e-6, cyclic_game=True)
initial_state = game.new_initial_state()
# Symmetric game: value is 0
self.assertAlmostEqual(values[str(initial_state)], 0)
# Oshi-Zumo(5, 2, 1)
game = pyspiel.load_game("oshi_zumo", {"coins": 5, "size": 2, "min_bid": 1})
values = value_iteration.value_iteration(
game, depth_limit=-1, threshold=1e-6, cyclic_game=False)
initial_state = game.new_initial_state()
# Symmetric game: value is 0
self.assertAlmostEqual(values[str(initial_state)], 0)
def test_solve_small_pig(self):
game = pyspiel.load_game("pig", {"winscore": 20})
values = value_iteration.value_iteration(
game, depth_limit=-1, threshold=1e-6, cyclic_game=True)
initial_state = game.new_initial_state()
print("Value of Pig(20): ", values[str(initial_state)])
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/value_iteration_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute the value of action given a policy vs a best responder."""
import collections
from open_spiel.python import policy
from open_spiel.python.algorithms import action_value
from open_spiel.python.algorithms import get_all_states
from open_spiel.python.algorithms import policy_utils
import pyspiel
def _transitions(state, policies):
"""Returns a list of (action, prob) pairs from the specified state."""
if state.is_chance_node():
return state.chance_outcomes()
else:
pl = state.current_player()
return list(policies[pl].action_probabilities(state).items())
def _tuples_from_policy(policy_vector):
return [
(action, probability) for action, probability in enumerate(policy_vector)
]
_CalculatorReturn = collections.namedtuple(
"_CalculatorReturn",
[
# The exploitability of the opponent strategy, i.e. the value of the
# best-responder player BR.
"exploitability",
# An array of shape `[len(info_states), game.num_distinct_actions()]`
# giving the value of each action vs the best response.
# Will be zero for invalid actions.
"values_vs_br",
# The player's counterfactual reach probability of this infostate when
# playing against the BR, as a list of shape [num_info_states].
"counterfactual_reach_probs_vs_br",
# The reach probability of the current player at the infostates when
# playing against the BR, as list shape [num_info_states].
# This is the product of the current player probs along *one* trajectory
# leading to this info-state (this number should be the same along
# any trajectory leading to this info-state because of perfect recall).
"player_reach_probs_vs_br",
])
class Calculator(object):
"""Class to orchestrate the calculation."""
def __init__(self, game):
if game.num_players() != 2:
raise ValueError("Only supports 2-player games.")
self.game = game
self._num_players = game.num_players()
self._num_actions = game.num_distinct_actions()
self._action_value_calculator = action_value.TreeWalkCalculator(game)
# best_responder[i] is a best response to the provided policy for player i.
# It is therefore a policy for player (1-i).
self._best_responder = {0: None, 1: None}
self._all_states = None
def __call__(self, player, player_policy, info_states):
"""Computes action values per state for the player.
Args:
player: The id of the player (0 <= player < game.num_players()). This
player will play `player_policy`, while the opponent will play a best
response.
player_policy: A `policy.Policy` object.
info_states: A list of info state strings.
Returns:
A `_CalculatorReturn` nametuple. See its docstring for the documentation.
"""
self.player = player
opponent = 1 - player
def best_response_policy(state):
infostate = state.information_state_string(opponent)
action = best_response_actions[infostate]
return [(action, 1.0)]
# If the policy is a TabularPolicy, we can directly copy the infostate
# strings & values from the class. This is significantly faster than having
# to create the infostate strings.
if isinstance(player_policy, policy.TabularPolicy):
tabular_policy = {
key: _tuples_from_policy(player_policy.policy_for_key(key))
for key in player_policy.state_lookup
}
# Otherwise, we have to calculate all the infostate strings everytime. This
# is ~2x slower.
else:
# We cache these as they are expensive to compute & do not change.
if self._all_states is None:
self._all_states = get_all_states.get_all_states(
self.game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False)
self._state_to_information_state = {
state: self._all_states[state].information_state_string()
for state in self._all_states
}
tabular_policy = policy_utils.policy_to_dict(
player_policy, self.game, self._all_states,
self._state_to_information_state)
# When constructed, TabularBestResponse does a lot of work; we can save that
# work by caching it.
if self._best_responder[player] is None:
self._best_responder[player] = pyspiel.TabularBestResponse(
self.game, opponent, tabular_policy)
else:
self._best_responder[player].set_policy(tabular_policy)
# Computing the value at the root calculates best responses everywhere.
best_response_value = self._best_responder[player].value_from_state(
self.game.new_initial_state())
best_response_actions = self._best_responder[
player].get_best_response_actions()
# Compute action values
self._action_value_calculator.compute_all_states_action_values({
player:
player_policy,
opponent:
policy.tabular_policy_from_callable(
self.game, best_response_policy, [opponent]),
})
obj = self._action_value_calculator._get_tabular_statistics( # pylint: disable=protected-access
((player, s) for s in info_states))
# Return values
return _CalculatorReturn(
exploitability=best_response_value,
values_vs_br=obj.action_values,
counterfactual_reach_probs_vs_br=obj.counterfactual_reach_probs,
player_reach_probs_vs_br=obj.player_reach_probs)
| open_spiel-master | open_spiel/python/algorithms/action_value_vs_best_response.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Double Oracle algorithm.
Solves two-player zero-sum games, for more information see:
McMahan et al. (2003). Planning in the presence of cost functions controlled by
an adversary. In Proceedings of the 20th International Conference on Machine
Learning (ICML-03) (pp. 536-543).
"""
import numpy as np
from open_spiel.python.algorithms import lp_solver
from open_spiel.python.egt import utils
import pyspiel
def lens(lists):
"""Returns the sizes of lists in a list."""
return list(map(len, lists))
def solve_subgame(subgame_payoffs):
"""Solves the subgame using OpenSpiel's LP solver."""
p0_sol, p1_sol, _, _ = lp_solver.solve_zero_sum_matrix_game(
pyspiel.create_matrix_game(*subgame_payoffs))
p0_sol, p1_sol = np.asarray(p0_sol), np.asarray(p1_sol)
return [p0_sol / p0_sol.sum(), p1_sol / p1_sol.sum()]
class DoubleOracleSolver(object):
"""Double Oracle solver."""
def __init__(self, game, enforce_symmetry=False):
"""Initializes the Double Oracle solver.
Args:
game: pyspiel.MatrixGame (zero-sum).
enforce_symmetry: If True, enforces symmetry in the strategies appended by
each player, by using the first player's best response for the second
player as well; also asserts the game is symmetric and that players are
seeded with identical initial_strategies, default: False.
"""
assert isinstance(game, pyspiel.MatrixGame)
assert game.get_type().utility == pyspiel.GameType.Utility.ZERO_SUM
# convert matrix game to numpy.ndarray of shape [2,rows,columns]
self.payoffs = utils.game_payoffs_array(game)
self.subgame_strategies = [[], []]
self.enforce_symmetry = enforce_symmetry
if self.enforce_symmetry:
assert utils.is_symmetric_matrix_game(self.payoffs), (
"enforce_symmetry is True, but payoffs are asymmetric!")
def subgame_payoffs(self):
# Select payoffs from the full game according to the subgame strategies.
assert all(lens(self.subgame_strategies)), "Need > 0 strategies per player."
subgame_payoffs = np.copy(self.payoffs)
for player, indices in enumerate(self.subgame_strategies):
subgame_payoffs = np.take(subgame_payoffs, indices, axis=player + 1)
return subgame_payoffs
def oracle(self, subgame_solution):
"""Computes the best responses.
Args:
subgame_solution: List of subgame solution policies.
Returns:
best_response: For both players from the original set of pure strategies.
best_response_utility: Corresponding utility for both players.
"""
assert lens(subgame_solution) == lens(self.subgame_strategies), (
f"{lens(subgame_solution)} != {lens(self.subgame_strategies)}")
best_response = [None, None]
best_response_utility = [None, None]
n_best_responders = 1 if self.enforce_symmetry else 2
for player in range(n_best_responders):
opponent = 1 - player
# collect relevant payoff entries
payoffs = np.take(
self.payoffs[player],
self.subgame_strategies[opponent],
axis=opponent)
# transpose to move player to leading dimension
payoffs = np.transpose(payoffs, [player, opponent])
avg_payoffs = (payoffs @ subgame_solution[opponent]).squeeze()
best_response[player] = np.argmax(avg_payoffs)
best_response_utility[player] = avg_payoffs[best_response[player]]
if self.enforce_symmetry:
best_response[1] = best_response[0]
best_response_utility[1] = best_response_utility[0]
return best_response, best_response_utility
def step(self):
"""Performs one iteration."""
subgame_payoffs = self.subgame_payoffs()
subgame_solution = solve_subgame(subgame_payoffs)
best_response, best_response_utility = self.oracle(subgame_solution)
# Add best responses to the subgame strategies (if not included yet).
self.subgame_strategies = [
sorted(set(strategies + [br]))
for strategies, br in zip(self.subgame_strategies, best_response)
]
return best_response, best_response_utility
def solve_yield(self,
initial_strategies,
max_steps,
tolerance,
verbose,
yield_subgame=False):
"""Solves game using Double Oracle, yielding intermediate results.
Args:
initial_strategies: List of pure strategies for both players, optional.
max_steps: Maximum number of iterations, default: 20.
tolerance: Stop if the estimated value of the game is below the tolerance.
verbose: If False, no warning is shown, default: True.
yield_subgame: If True, yields the subgame on each iteration. Otherwise,
yields the final results only, default: False.
Yields:
solution: Policies for both players.
iteration: The number of iterations performed.
value: Estimated value of the game.
"""
if self.enforce_symmetry and initial_strategies:
assert np.array_equal(initial_strategies[0], initial_strategies[1]), (
f"Players must use same initial_strategies as symmetry is enforced."
f"\ninitial_strategies[0]: {initial_strategies[0]}, "
f"\ninitial_strategies[1]: {initial_strategies[1]}")
self.subgame_strategies = (initial_strategies if initial_strategies
else [[0], [0]])
iteration = 0
while iteration < max_steps:
if yield_subgame:
yield None, iteration, None, self.subgame_payoffs()
iteration += 1
last_subgame_size = lens(self.subgame_strategies)
_, best_response_utility = self.step()
value = sum(best_response_utility)
if abs(value) < tolerance:
if verbose:
print("Last iteration={}; value below tolerance {} < {}."
.format(iteration, value, tolerance))
break
if lens(self.subgame_strategies) == last_subgame_size:
if verbose:
print(
"Last iteration={}; no strategies added, increase tolerance={} or check subgame solver."
.format(iteration, tolerance))
break
# Compute subgame solution and return solution in original strategy space.
subgame_solution = solve_subgame(self.subgame_payoffs())
solution = [np.zeros(k) for k in self.payoffs.shape[1:]]
for p in range(2):
solution[p][self.subgame_strategies[p]] = subgame_solution[p].squeeze()
yield solution, iteration, value, self.subgame_payoffs()
def solve(self,
initial_strategies=None,
max_steps=20,
tolerance=5e-5,
verbose=True):
"""Solves the game using Double Oracle, returning the final solution."""
solution, iteration, value = None, None, None
generator = self.solve_yield(initial_strategies, max_steps, tolerance,
verbose, yield_subgame=False)
for solution, iteration, value, _ in generator:
pass
return solution, iteration, value
| open_spiel-master | open_spiel/python/algorithms/double_oracle.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.deep_cfr."""
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
from open_spiel.python import policy
from open_spiel.python.algorithms import deep_cfr
from open_spiel.python.algorithms import exploitability
import pyspiel
# Temporarily disable TF2 behavior until we update the code.
tf.disable_v2_behavior()
class DeepCFRTest(parameterized.TestCase):
@parameterized.parameters('leduc_poker', 'kuhn_poker', 'liars_dice')
def test_deep_cfr_runs(self, game_name):
game = pyspiel.load_game(game_name)
with tf.Session() as sess:
deep_cfr_solver = deep_cfr.DeepCFRSolver(
sess,
game,
policy_network_layers=(8, 4),
advantage_network_layers=(4, 2),
num_iterations=2,
num_traversals=2,
learning_rate=1e-3,
batch_size_advantage=None,
batch_size_strategy=None,
memory_capacity=1e7)
sess.run(tf.global_variables_initializer())
deep_cfr_solver.solve()
def test_matching_pennies_3p(self):
# We don't expect Deep CFR to necessarily converge on 3-player games but
# it's nonetheless interesting to see this result.
game = pyspiel.load_game_as_turn_based('matching_pennies_3p')
with tf.Session() as sess:
deep_cfr_solver = deep_cfr.DeepCFRSolver(
sess,
game,
policy_network_layers=(16, 8),
advantage_network_layers=(32, 16),
num_iterations=2,
num_traversals=2,
learning_rate=1e-3,
batch_size_advantage=None,
batch_size_strategy=None,
memory_capacity=1e7)
sess.run(tf.global_variables_initializer())
deep_cfr_solver.solve()
conv = exploitability.nash_conv(
game,
policy.tabular_policy_from_callable(
game, deep_cfr_solver.action_probabilities))
print('Deep CFR in Matching Pennies 3p. NashConv: {}'.format(conv))
if __name__ == '__main__':
tf.test.main()
| open_spiel-master | open_spiel/python/algorithms/deep_cfr_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Value iteration algorithm for solving a game."""
from open_spiel.python.algorithms import get_all_states
from open_spiel.python.algorithms import lp_solver
import pyspiel
def _get_future_states(possibilities, state, reach=1.0):
"""Does a lookahead over chance nodes to all next states after (s,a).
Also works if there are no chance nodes (i.e. base case).
Arguments:
possibilities: an empty list, that will be filled with (str(next_state),
transition probability) pairs for all possible next states
state: the state following some s.apply_action(a), can be a chance node
reach: chance reach probability of getting to this point from (s,a)
Returns: nothing.
"""
if not state.is_chance_node() or state.is_terminal():
# Base case
possibilities.append((str(state), reach))
else:
assert state.is_chance_node()
for outcome, prob in state.chance_outcomes():
next_state = state.child(outcome)
_get_future_states(possibilities, next_state, reach * prob)
def _add_transition(transitions, key, state):
"""Adds action transitions from given state."""
if state.is_simultaneous_node():
for p0action in state.legal_actions(0):
for p1action in state.legal_actions(1):
next_state = state.clone()
next_state.apply_actions([p0action, p1action])
possibilities = []
_get_future_states(possibilities, next_state)
transitions[(key, p0action, p1action)] = possibilities
else:
for action in state.legal_actions():
next_state = state.child(action)
possibilities = []
_get_future_states(possibilities, next_state)
transitions[(key, action)] = possibilities
def _initialize_maps(states, values, transitions):
"""Initialize the value and transition maps."""
for key, state in states.items():
if state.is_terminal():
values[key] = state.player_return(0)
else:
values[key] = 0
_add_transition(transitions, key, state)
def value_iteration(game, depth_limit, threshold, cyclic_game=False):
"""Solves for the optimal value function of a game.
For small games only! Solves the game using value iteration,
with the maximum error for the value function less than threshold.
This algorithm works for sequential 1-player games or 2-player zero-sum
games, with or without chance nodes.
Arguments:
game: The game to analyze, as returned by `load_game`.
depth_limit: How deeply to analyze the game tree. Negative means no limit, 0
means root-only, etc.
threshold: Maximum error for state values..
cyclic_game: set to True if the game has cycles (from state A we can get to
state B, and from state B we can get back to state A).
Returns:
A `dict` with string keys and float values, mapping string encoding of
states to the values of those states.
"""
assert game.num_players() in (1,
2), ("Game must be a 1-player or 2-player game")
if game.num_players() == 2:
assert game.get_type().utility == pyspiel.GameType.Utility.ZERO_SUM, (
"2-player games must be zero sum games")
# Must be perfect information or one-shot (not imperfect information).
assert (game.get_type().information == pyspiel.GameType.Information.ONE_SHOT
or game.get_type().information ==
pyspiel.GameType.Information.PERFECT_INFORMATION)
# We expect Value Iteration to be used with perfect information games, in
# which `str` is assumed to display the state of the game.
states = get_all_states.get_all_states(
game,
depth_limit,
True,
False,
to_string=str,
stop_if_encountered=cyclic_game)
values = {}
transitions = {}
_initialize_maps(states, values, transitions)
error = threshold + 1 # A value larger than threshold
min_utility = game.min_utility()
while error > threshold:
error = 0
for key, state in states.items():
if state.is_terminal():
continue
elif state.is_simultaneous_node():
# Simultaneous node. Assemble a matrix game from the child utilities.
# and solve it using a matrix game solver.
p0_utils = [] # row player
p1_utils = [] # col player
row = 0
for p0action in state.legal_actions(0):
# new row
p0_utils.append([])
p1_utils.append([])
for p1action in state.legal_actions(1):
# loop from left-to-right of columns
next_states = transitions[(key, p0action, p1action)]
joint_q_value = sum(
p * values[next_state] for next_state, p in next_states)
p0_utils[row].append(joint_q_value)
p1_utils[row].append(-joint_q_value)
row += 1
stage_game = pyspiel.create_matrix_game(p0_utils, p1_utils)
solution = lp_solver.solve_zero_sum_matrix_game(stage_game)
value = solution[2]
else:
# Regular decision node
player = state.current_player()
value = min_utility if player == 0 else -min_utility
for action in state.legal_actions():
next_states = transitions[(key, action)]
q_value = sum(p * values[next_state] for next_state, p in next_states)
if player == 0:
value = max(value, q_value)
else:
value = min(value, q_value)
error = max(abs(values[key] - value), error)
values[key] = value
return values
| open_spiel-master | open_spiel/python/algorithms/value_iteration.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.discounted_cfr."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import discounted_cfr
from open_spiel.python.algorithms import expected_game_score
import pyspiel
class DiscountedCfrTest(absltest.TestCase):
def test_discounted_cfr_on_kuhn(self):
game = pyspiel.load_game("kuhn_poker")
solver = discounted_cfr.DCFRSolver(game)
for _ in range(300):
solver.evaluate_and_update_policy()
average_policy = solver.average_policy()
average_policy_values = expected_game_score.policy_value(
game.new_initial_state(), [average_policy] * 2)
# 1/18 is the Nash value. See https://en.wikipedia.org/wiki/Kuhn_poker
np.testing.assert_allclose(
average_policy_values, [-1 / 18, 1 / 18], atol=1e-3)
def test_discounted_cfr_runs_against_leduc(self):
game = pyspiel.load_game("leduc_poker")
solver = discounted_cfr.DCFRSolver(game)
for _ in range(10):
solver.evaluate_and_update_policy()
solver.average_policy()
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/discounted_cfr_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example algorithm to get all states from a game.
The algorithm does not support mean field games where the game evolution depends
on the mean field distribution.
"""
import itertools
from open_spiel.python import games # pylint:disable=unused-import
import pyspiel
def _get_subgames_states(state, all_states, depth_limit, depth,
include_terminals, include_chance_states,
include_mean_field_states, to_string,
stop_if_encountered):
"""Extract non-chance states for a subgame into the all_states dict."""
if state.is_terminal():
if include_terminals:
# Include if not already present and then terminate recursion.
state_str = to_string(state)
if state_str not in all_states:
all_states[state_str] = state.clone()
return
if depth > depth_limit >= 0:
return
is_mean_field = state.current_player() == pyspiel.PlayerId.MEAN_FIELD
if (state.is_chance_node() and
include_chance_states) or (is_mean_field and
include_mean_field_states) or not (
state.is_chance_node() or is_mean_field):
# Add only if not already present
state_str = to_string(state)
if state_str not in all_states:
all_states[state_str] = state.clone()
else:
# We already saw this one. Stop the recursion if the flag is set
if stop_if_encountered:
return
if is_mean_field:
support = state.distribution_support()
state_for_search = state.clone()
support_length = len(support)
# update with a dummy distribution
state_for_search.update_distribution(
[1.0 / support_length for _ in range(support_length)])
_get_subgames_states(state_for_search, all_states, depth_limit, depth + 1,
include_terminals, include_chance_states,
include_mean_field_states, to_string,
stop_if_encountered)
elif state.is_simultaneous_node():
joint_legal_actions = [
state.legal_actions(player)
for player in range(state.get_game().num_players())
]
for joint_actions in itertools.product(*joint_legal_actions):
state_for_search = state.clone()
state_for_search.apply_actions(list(joint_actions))
_get_subgames_states(state_for_search, all_states, depth_limit, depth + 1,
include_terminals, include_chance_states,
include_mean_field_states, to_string,
stop_if_encountered)
else:
for action in state.legal_actions():
state_for_search = state.child(action)
_get_subgames_states(state_for_search, all_states, depth_limit, depth + 1,
include_terminals, include_chance_states,
include_mean_field_states, to_string,
stop_if_encountered)
def get_all_states(game,
depth_limit=-1,
include_terminals=True,
include_chance_states=False,
include_mean_field_states=False,
to_string=lambda s: s.history_str(),
stop_if_encountered=True):
"""Gets all states in the game, indexed by their string representation.
For small games only! Useful for methods that solve the games explicitly,
i.e. value iteration. Use this default implementation with caution as it does
a recursive tree walk of the game and could easily fill up memory for larger
games or games with long horizons.
Currently only works for sequential games.
Arguments:
game: The game to analyze, as returned by `load_game`.
depth_limit: How deeply to analyze the game tree. Negative means no limit, 0
means root-only, etc.
include_terminals: If True, include terminal states.
include_chance_states: If True, include chance node states.
include_mean_field_states: If True, include mean field node states.
to_string: The serialization function. We expect this to be
`lambda s: s.history_str()` as this enforces perfect recall, but for
historical reasons, using `str` is also supported, but the goal is to
remove this argument.
stop_if_encountered: if this is set, do not keep recursively adding states
if this state is already in the list. This allows support for games that
have cycles.
Returns:
A `dict` with `to_string(state)` keys and `pyspiel.State` values containing
all states encountered traversing the game tree up to the specified depth.
"""
root_states = game.new_initial_states()
all_states = dict()
for root in root_states:
# Then, do a recursive tree walk to fill up the map.
_get_subgames_states(
state=root,
all_states=all_states,
depth_limit=depth_limit,
depth=0,
include_terminals=include_terminals,
include_chance_states=include_chance_states,
include_mean_field_states=include_mean_field_states,
to_string=to_string,
stop_if_encountered=stop_if_encountered)
if not all_states:
raise ValueError("GetSubgameStates returned 0 states!")
return all_states
| open_spiel-master | open_spiel/python/algorithms/get_all_states.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the MCTS Agent."""
from absl.testing import absltest
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import mcts
from open_spiel.python.algorithms import mcts_agent
class MCTSAgentTest(absltest.TestCase):
def test_tic_tac_toe_episode(self):
env = rl_environment.Environment("tic_tac_toe", include_full_state=True)
num_players = env.num_players
num_actions = env.action_spec()["num_actions"]
# Create the MCTS bot. Both agents can share the same bot in this case since
# there is no state kept between searches. See mcts.py for more info about
# the arguments.
mcts_bot = mcts.MCTSBot(env.game, 1.5, 100, mcts.RandomRolloutEvaluator())
agents = [
mcts_agent.MCTSAgent(player_id=idx, num_actions=num_actions,
mcts_bot=mcts_bot)
for idx in range(num_players)
]
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(time_step)
time_step = env.step([agent_output.action])
for agent in agents:
agent.step(time_step)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/mcts_agent_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regret-Matching Algorithm.
This is an N-player implementation of the regret-matching algorithm described in
Hart & Mas-Colell 2000:
https://onlinelibrary.wiley.com/doi/abs/10.1111/1468-0262.00153
"""
import numpy as np
from open_spiel.python.algorithms import nfg_utils
# Start with initial regrets of 1 / denom
INITIAL_REGRET_DENOM = 1e6
def _partial_multi_dot(player_payoff_tensor, strategies, index_avoided):
"""Computes a generalized dot product avoiding one dimension.
This is used to directly get the expected return of a given action, given
other players' strategies, for the player indexed by index_avoided.
Note that the numpy.dot function is used to compute this product, as it ended
up being (Slightly) faster in performance tests than np.tensordot. Using the
reduce function proved slower for both np.dot and np.tensordot.
Args:
player_payoff_tensor: payoff tensor for player[index_avoided], of dimension
(dim(vector[0]), dim(vector[1]), ..., dim(vector[-1])).
strategies: Meta strategy probabilities for each player.
index_avoided: Player for which we do not compute the dot product.
Returns:
Vector of expected returns for each action of player [the player indexed by
index_avoided].
"""
new_axis_order = [index_avoided] + [
i for i in range(len(strategies)) if (i != index_avoided)
]
accumulator = np.transpose(player_payoff_tensor, new_axis_order)
for i in range(len(strategies) - 1, -1, -1):
if i != index_avoided:
accumulator = np.dot(accumulator, strategies[i])
return accumulator
def _regret_matching_step(payoff_tensors, strategies, regrets, gamma):
"""Does one step of the projected replicator dynamics algorithm.
Args:
payoff_tensors: List of payoff tensors for each player.
strategies: List of the strategies used by each player.
regrets: List of cumulative regrets used by each player.
gamma: Minimum exploratory probability term.
Returns:
A list of updated strategies for each player.
"""
# TODO(author4): Investigate whether this update could be fully vectorized.
new_strategies = []
for player in range(len(payoff_tensors)):
current_payoff_tensor = payoff_tensors[player]
current_strategy = strategies[player]
values_per_strategy = _partial_multi_dot(current_payoff_tensor, strategies,
player)
average_return = np.dot(values_per_strategy, current_strategy)
regrets[player] += values_per_strategy - average_return
updated_strategy = regrets[player].copy()
updated_strategy[updated_strategy < 0] = 0.0
sum_regret = updated_strategy.sum()
uniform_strategy = np.ones(len(updated_strategy)) / len(updated_strategy)
if sum_regret > 0:
updated_strategy /= sum_regret
updated_strategy = gamma * uniform_strategy + (1 -
gamma) * updated_strategy
else:
updated_strategy = uniform_strategy
new_strategies.append(updated_strategy)
return new_strategies
def regret_matching(payoff_tensors,
initial_strategies=None,
iterations=int(1e5),
gamma=1e-6,
average_over_last_n_strategies=None,
**unused_kwargs):
"""Runs regret-matching for the stated number of iterations.
Args:
payoff_tensors: List of payoff tensors for each player.
initial_strategies: Initial list of the strategies used by each player, if
any. Could be used to speed up the search by providing a good initial
solution.
iterations: Number of algorithmic steps to take before returning an answer.
gamma: Minimum exploratory probability term.
average_over_last_n_strategies: Running average window size for average
policy computation. If None, use the whole trajectory.
**unused_kwargs: Convenient way of exposing an API compatible with other
methods with possibly different arguments.
Returns:
RM-computed strategies.
"""
number_players = len(payoff_tensors)
# Number of actions available to each player.
action_space_shapes = payoff_tensors[0].shape
# If no initial starting position is given, start with uniform probabilities.
new_strategies = initial_strategies or [
np.ones(action_space_shapes[k]) / action_space_shapes[k]
for k in range(number_players)
]
regrets = [
np.ones(action_space_shapes[k]) / INITIAL_REGRET_DENOM
for k in range(number_players)
]
averager = nfg_utils.StrategyAverager(number_players, action_space_shapes,
average_over_last_n_strategies)
averager.append(new_strategies)
for _ in range(iterations):
new_strategies = _regret_matching_step(payoff_tensors, new_strategies,
regrets, gamma)
averager.append(new_strategies)
return averager.average_strategies()
| open_spiel-master | open_spiel/python/algorithms/regret_matching.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.mip_nash."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms.mip_nash import mip_nash
import pyspiel
class MIPNash(absltest.TestCase):
def test_simple_games(self):
# prisoners' dilemma
pd_game = pyspiel.create_matrix_game(
[[-2.0, -10.0], [0.0, -5.0]], [[-2.0, 0.0], [-10.0, -5.0]]
)
pd_eq = (np.array([0, 1]), np.array([0, 1]))
computed_eq = mip_nash(pd_game, objective="MAX_SOCIAL_WELFARE")
with self.subTest("pd"):
np.testing.assert_array_almost_equal(computed_eq[0], pd_eq[0])
np.testing.assert_array_almost_equal(computed_eq[1], pd_eq[1])
# stag hunt
sh_game = pyspiel.create_matrix_game(
[[10.0, 1.0], [8.0, 5.0]], [[10.0, 8.0], [1.0, 5.0]]
)
sh_eq = (np.array([1, 0]), np.array([1, 0]))
computed_eq = mip_nash(sh_game, objective="MAX_SOCIAL_WELFARE")
with self.subTest("sh"):
np.testing.assert_array_almost_equal(computed_eq[0], sh_eq[0])
np.testing.assert_array_almost_equal(computed_eq[1], sh_eq[1])
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/mip_nash_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import nfg_utils
class NfgUtilsTest(absltest.TestCase):
def test_strategy_averager_len_smaller_than_window(self):
averager = nfg_utils.StrategyAverager(2, [2, 2], window_size=50)
averager.append([np.array([1.0, 0.0]), np.array([0.0, 1.0])])
averager.append([np.array([0.0, 1.0]), np.array([1.0, 0.0])])
avg_strategies = averager.average_strategies()
self.assertLen(avg_strategies, 2)
self.assertAlmostEqual(avg_strategies[0][0], 0.5)
self.assertAlmostEqual(avg_strategies[0][1], 0.5)
self.assertAlmostEqual(avg_strategies[1][0], 0.5)
self.assertAlmostEqual(avg_strategies[1][1], 0.5)
def test_strategy_averager(self):
first_action_strat = np.array([1.0, 0.0])
second_action_strat = np.array([0.0, 1.0])
averager_full = nfg_utils.StrategyAverager(2, [2, 2])
averager_window5 = nfg_utils.StrategyAverager(2, [2, 2], window_size=5)
averager_window6 = nfg_utils.StrategyAverager(2, [2, 2], window_size=6)
for _ in range(5):
averager_full.append([first_action_strat, first_action_strat])
averager_window5.append([first_action_strat, first_action_strat])
averager_window6.append([first_action_strat, first_action_strat])
for _ in range(5):
averager_full.append([second_action_strat, second_action_strat])
averager_window5.append([second_action_strat, second_action_strat])
averager_window6.append([second_action_strat, second_action_strat])
avg_full = averager_full.average_strategies()
avg_window5 = averager_window5.average_strategies()
avg_window6 = averager_window6.average_strategies()
self.assertAlmostEqual(avg_full[0][1], 0.5)
self.assertAlmostEqual(avg_window5[0][1], 5.0 / 5.0)
self.assertAlmostEqual(avg_window6[0][1], 5.0 / 6.0)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/nfg_utils_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.tabular_multiagent_qlearner."""
from absl.testing import absltest
import numpy as np
from open_spiel.python import rl_environment
from open_spiel.python.algorithms.tabular_qlearner import QLearner
from open_spiel.python.algorithms.wolf_phc import WoLFPHC
SEED = 18763511
class WoLFTest(absltest.TestCase):
def test_simple_pathfinding_run(self):
env = rl_environment.Environment(
"pathfinding", grid="B.A\n...\na.b", players=2, step_reward=-1.)
with self.subTest("wolf_phc"):
qlearner = QLearner(0, env.game.num_distinct_actions())
wolflearner = WoLFPHC(1, env.game.num_distinct_actions())
time_step = env.reset()
step_cnt = 0
while not time_step.last():
actions = [
qlearner.step(time_step).action,
wolflearner.step(time_step).action
]
time_step = env.step(actions)
step_cnt += 1
self.assertLess(step_cnt, 500)
def test_rps_run(self):
env = rl_environment.Environment("matrix_rps")
wolf0 = WoLFPHC(0, env.game.num_distinct_actions())
wolf1 = WoLFPHC(1, env.game.num_distinct_actions())
for _ in range(1000):
time_step = env.reset()
actions = [wolf0.step(time_step).action, wolf1.step(time_step).action]
time_step = env.step(actions)
wolf0.step(time_step)
wolf1.step(time_step)
with self.subTest("correct_rps_strategy"):
time_step = env.reset()
learner0_strategy, learner1_strategy = wolf0.step(
time_step).probs, wolf1.step(time_step).probs
np.testing.assert_array_almost_equal(
np.asarray([1 / 3, 1 / 3, 1 / 3]),
learner0_strategy.reshape(-1),
decimal=4)
np.testing.assert_array_almost_equal(
np.asarray([1 / 3, 1 / 3, 1 / 3]),
learner1_strategy.reshape(-1),
decimal=4)
if __name__ == "__main__":
np.random.seed(SEED)
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/wolf_phc_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.exploitability_descent."""
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python.algorithms import exploitability_descent
import pyspiel
# Temporary disabling of v2 behavior until code is updated.
tf.disable_v2_behavior()
class ExploitabilityDescentTest(tf.test.TestCase):
def test_solver_kuhn_poker_first_10_steps_convergence(self):
solver = exploitability_descent.Solver(pyspiel.load_game("kuhn_poker"))
with self.session() as session:
session.run(tf.global_variables_initializer())
nash_conv = [solver.step(session, learning_rate=1.0) for _ in range(11)]
# Nash conv is 2x exploitability. Values taken from test run, not
# independently verified.
np.testing.assert_allclose(nash_conv, [
0.91666666666666652, 0.67893004801213452, 0.48109148836354743,
0.40061420923255808, 0.36617242161468722, 0.33676996443499557,
0.30925081512398128, 0.28827843035940964, 0.26830042206858751,
0.24418597846799289, 0.22168699344791482
])
def test_solver_leduc_poker_first_10_steps_convergence(self):
solver = exploitability_descent.Solver(pyspiel.load_game("leduc_poker"))
with self.session() as session:
session.run(tf.global_variables_initializer())
nash_conv = [solver.step(session, learning_rate=1.0) for _ in range(11)]
# Nash conv is 2x exploitability. Values taken from test run, not
# independently verified.
np.testing.assert_allclose(nash_conv, [
4.7472224, 4.3147216, 3.9900389, 3.7576618, 3.5771275, 3.4414644,
3.3272073, 3.1898201, 3.1089299, 3.0108435, 2.8992782
])
if __name__ == "__main__":
tf.test.main()
| open_spiel-master | open_spiel/python/algorithms/exploitability_descent_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.sample_some_states."""
from absl.testing import absltest
from open_spiel.python.algorithms import sample_some_states
import pyspiel
class SampleSomeStatesTest(absltest.TestCase):
def test_sampling_in_simple_games(self):
matrix_mp_num_states = 1 + 2 + 4
game = pyspiel.load_game_as_turn_based("matrix_mp")
for n in range(1, matrix_mp_num_states + 1):
states = sample_some_states.sample_some_states(game, max_states=n)
self.assertLen(states, n)
states = sample_some_states.sample_some_states(game, max_states=1)
self.assertLen(states, 1)
states = sample_some_states.sample_some_states(
game, max_states=matrix_mp_num_states + 1)
self.assertLen(states, matrix_mp_num_states)
coordinated_mp_num_states = 1 + 2 + 4 + 8
game = pyspiel.load_game_as_turn_based("coordinated_mp")
for n in range(1, coordinated_mp_num_states + 1):
states = sample_some_states.sample_some_states(game, max_states=n)
self.assertLen(states, n)
states = sample_some_states.sample_some_states(
game, max_states=coordinated_mp_num_states + 1)
self.assertLen(states, coordinated_mp_num_states)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/sample_some_states_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Functions to manipulate game playthoughs.
Used by examples/playthrough.py and tests/playthrough_test.py.
Note that not all states are fully represented in the playthrough.
See the logic in ShouldDisplayStateTracker for details.
"""
import collections
import os
import re
from typing import Optional
from absl import flags
import numpy as np
from open_spiel.python import games # pylint: disable=unused-import
from open_spiel.python.mfg import games as mfgs # pylint: disable=unused-import
from open_spiel.python.observation import make_observation
import pyspiel
_USE_ACTION_IDS = flags.DEFINE_bool(
"playthough_use_action_ids", default=True,
help="Whether to use action names or ids when regenerating playthroughs")
# Precision can differ depending on the system and context where the playthrough
# is generated versus where they are re-generated for testing purposes. To
# ensure that tests don't fail due to precision, we set the tolarance
# accordingly.
_FLOAT_DECIMAL_PLACES = 6
def _escape(x):
"""Returns a newline-free backslash-escaped version of the given string."""
x = x.replace("\\", R"\\")
x = x.replace("\n", R"\n")
return x
def _format_value(v):
"""Format a single value."""
if v == 0:
return "β―"
elif v == 1:
return "β"
else:
return ValueError("Values must all be 0 or 1")
def _format_vec(vec):
"""Returns a readable format for a vector."""
full_fmt = "".join(_format_value(v) for v in vec)
short_fmt = None
max_len = 250
vec2int = lambda vec: int("".join("1" if b else "0" for b in vec), 2)
if len(vec) > max_len:
if all(v == 0 for v in vec):
short_fmt = f"zeros({len(vec)})"
elif all(v in (0, 1) for v in vec):
sz = (len(vec) + 15) // 16
# To reconstruct the original vector:
# binvec = lambda n, x: [int(x) for x in f"{x:0>{n}b}"]
short_fmt = f"binvec({len(vec)}, 0x{vec2int(vec):0>{sz}x})"
if short_fmt and len(short_fmt) < len(full_fmt):
return short_fmt
else:
return full_fmt
def _format_matrix(mat):
return np.char.array([_format_vec(row) for row in mat])
def _format_float(x):
return ("{:." + str(_FLOAT_DECIMAL_PLACES) + "g}").format(x)
def _format_float_vector(v):
return "[" + ", ".join([_format_float(x) for x in v]) + "]"
def _format_chance_outcomes(chance_outcomes):
return "[" + ", ".join(["({},{})".format(outcome, _format_float(prob))
for (outcome, prob) in chance_outcomes]) + "]"
def _format_tensor(tensor, tensor_name, max_cols=120):
"""Formats a tensor in an easy-to-view format as a list of lines."""
if ((not tensor.shape) or (tensor.shape == (0,)) or (len(tensor.shape) > 3) or
not np.logical_or(tensor == 0, tensor == 1).all()):
vec = ", ".join(str(round(v, 5)) for v in tensor.ravel())
return ["{} = [{}]".format(tensor_name, vec)]
elif len(tensor.shape) == 1:
return ["{}: {}".format(tensor_name, _format_vec(tensor))]
elif len(tensor.shape) == 2:
if len(tensor_name) + tensor.shape[1] + 2 < max_cols:
lines = ["{}: {}".format(tensor_name, _format_vec(tensor[0]))]
prefix = " " * (len(tensor_name) + 2)
else:
lines = ["{}:".format(tensor_name), _format_vec(tensor[0])]
prefix = ""
for row in tensor[1:]:
lines.append(prefix + _format_vec(row))
return lines
elif len(tensor.shape) == 3:
lines = ["{}:".format(tensor_name)]
rows = []
for m in tensor:
formatted_matrix = _format_matrix(m)
if (not rows) or (len(rows[-1][0] + formatted_matrix[0]) + 2 > max_cols):
rows.append(formatted_matrix)
else:
rows[-1] = rows[-1] + " " + formatted_matrix
for i, big_row in enumerate(rows):
if i > 0:
lines.append("")
for row in big_row:
lines.append("".join(row))
return lines
def playthrough(game_string,
action_sequence,
alsologtostdout=False,
observation_params_string=None,
seed: Optional[int] = None):
"""Returns a playthrough of the specified game as a single text.
Actions are selected uniformly at random, including chance actions.
Args:
game_string: string, e.g. 'markov_soccer', with possible optional params,
e.g. 'go(komi=4.5,board_size=19)'.
action_sequence: A (possibly partial) list of action choices to make.
alsologtostdout: Whether to also print the trace to stdout. This can be
useful when an error occurs, to still be able to get context information.
observation_params_string: Optional observation parameters for constructing
an observer.
seed: A(n optional) seed to initialize the random number generator from.
"""
lines = playthrough_lines(game_string, alsologtostdout, action_sequence,
observation_params_string, seed)
return "\n".join(lines) + "\n"
def format_shapes(d):
"""Returns a string representing the shapes of a dict of tensors."""
if len(d) == 1:
return str(list(d[min(d)].shape))
else:
return ", ".join(f"{key}: {list(value.shape)}" for key, value in d.items())
def _format_params(d, as_game=False):
"""Format a collection of params."""
def fmt(val):
if isinstance(val, dict):
return _format_params(val, as_game=True)
else:
return _escape(str(val))
if as_game:
return d["name"] + "(" + ",".join(
"{}={}".format(key, fmt(value))
for key, value in sorted(d.items())
if key != "name") + ")"
else:
return "{" + ",".join(
"{}={}".format(key, fmt(value))
for key, value in sorted(d.items())) + "}"
class ShouldDisplayStateTracker:
"""Determines whether a state is interesting enough to display."""
def __init__(self):
self.states_by_player = collections.defaultdict(int)
def __call__(self, state) -> bool:
"""Returns True if a state is sufficiently interesting to display."""
player = state.current_player()
count = self.states_by_player[player]
self.states_by_player[player] += 1
if count == 0:
# Always display the first state for a player
return True
elif player == -1:
# For chance moves, display the first two only
return count < 2
else:
# For regular player moves, display the first three and selected others
return (count < 3) or (count % 10 == 0)
def playthrough_lines(game_string, alsologtostdout=False, action_sequence=None,
observation_params_string=None,
seed: Optional[int] = None):
"""Returns a playthrough of the specified game as a list of lines.
Actions are selected uniformly at random, including chance actions.
Args:
game_string: string, e.g. 'markov_soccer' or 'kuhn_poker(players=4)'.
alsologtostdout: Whether to also print the trace to stdout. This can be
useful when an error occurs, to still be able to get context information.
action_sequence: A (possibly partial) list of action choices to make.
observation_params_string: Optional observation parameters for constructing
an observer.
seed: A(n optional) seed to initialize the random number generator from.
"""
should_display_state_fn = ShouldDisplayStateTracker()
lines = []
action_sequence = action_sequence or []
should_display = True
def add_line(v, force=False):
if force or should_display:
if alsologtostdout:
print(v)
lines.append(v)
game = pyspiel.load_game(game_string)
add_line("game: {}".format(game_string))
if observation_params_string:
add_line("observation_params: {}".format(observation_params_string))
if seed is None:
seed = np.random.randint(2**32 - 1)
game_type = game.get_type()
observation_params = (
pyspiel.game_parameters_from_string(observation_params_string)
if observation_params_string
else None
)
default_observation = make_observation(
game,
imperfect_information_observation_type=None,
params=observation_params,
)
infostate_observation = make_observation(
game, pyspiel.IIGObservationType(perfect_recall=True)
)
public_observation = None
private_observation = None
# Instantiate factored observations only for imperfect information games,
# as it would yield unncessarily redundant information for perfect info games.
# The default observation is the same as the public observation, while private
# observations are always empty.
if game_type.information == game_type.Information.IMPERFECT_INFORMATION:
public_observation = make_observation(
game,
pyspiel.IIGObservationType(
public_info=True,
perfect_recall=False,
private_info=pyspiel.PrivateInfoType.NONE,
),
)
private_observation = make_observation(
game,
pyspiel.IIGObservationType(
public_info=False,
perfect_recall=False,
private_info=pyspiel.PrivateInfoType.SINGLE_PLAYER,
),
)
add_line("")
add_line("GameType.chance_mode = {}".format(game_type.chance_mode))
add_line("GameType.dynamics = {}".format(game_type.dynamics))
add_line("GameType.information = {}".format(game_type.information))
add_line("GameType.long_name = {}".format('"{}"'.format(game_type.long_name)))
add_line("GameType.max_num_players = {}".format(game_type.max_num_players))
add_line("GameType.min_num_players = {}".format(game_type.min_num_players))
add_line("GameType.parameter_specification = {}".format("[{}]".format(
", ".join('"{}"'.format(param)
for param in sorted(game_type.parameter_specification)))))
add_line("GameType.provides_information_state_string = {}".format(
game_type.provides_information_state_string))
add_line("GameType.provides_information_state_tensor = {}".format(
game_type.provides_information_state_tensor))
add_line("GameType.provides_observation_string = {}".format(
game_type.provides_observation_string))
add_line("GameType.provides_observation_tensor = {}".format(
game_type.provides_observation_tensor))
add_line("GameType.provides_factored_observation_string = {}".format(
game_type.provides_factored_observation_string))
add_line("GameType.reward_model = {}".format(game_type.reward_model))
add_line("GameType.short_name = {}".format('"{}"'.format(
game_type.short_name)))
add_line("GameType.utility = {}".format(game_type.utility))
add_line("")
add_line("NumDistinctActions() = {}".format(game.num_distinct_actions()))
add_line("PolicyTensorShape() = {}".format(game.policy_tensor_shape()))
add_line("MaxChanceOutcomes() = {}".format(game.max_chance_outcomes()))
add_line("GetParameters() = {}".format(_format_params(game.get_parameters())))
add_line("NumPlayers() = {}".format(game.num_players()))
add_line("MinUtility() = {:.5}".format(game.min_utility()))
add_line("MaxUtility() = {:.5}".format(game.max_utility()))
add_line("UtilitySum() = {}".format(game.utility_sum()))
if infostate_observation and infostate_observation.tensor is not None:
add_line("InformationStateTensorShape() = {}".format(
format_shapes(infostate_observation.dict)))
add_line("InformationStateTensorLayout() = {}".format(
game.information_state_tensor_layout()))
add_line("InformationStateTensorSize() = {}".format(
len(infostate_observation.tensor)))
if default_observation and default_observation.tensor is not None:
add_line("ObservationTensorShape() = {}".format(
format_shapes(default_observation.dict)))
add_line("ObservationTensorLayout() = {}".format(
game.observation_tensor_layout()))
add_line("ObservationTensorSize() = {}".format(
len(default_observation.tensor)))
add_line("MaxGameLength() = {}".format(game.max_game_length()))
add_line('ToString() = "{}"'.format(str(game)))
players = list(range(game.num_players()))
# Arbitrarily pick the last possible initial states (for all games
# but multi-population MFGs, there will be a single initial state).
state = game.new_initial_states()[-1]
state_idx = 0
rng = np.random.RandomState(seed)
while True:
should_display = should_display_state_fn(state)
add_line("", force=True)
add_line("# State {}".format(state_idx), force=True)
for line in str(state).splitlines():
add_line("# {}".format(line).rstrip())
add_line("IsTerminal() = {}".format(state.is_terminal()))
add_line("History() = {}".format([int(a) for a in state.history()]))
add_line('HistoryString() = "{}"'.format(state.history_str()))
add_line("IsChanceNode() = {}".format(state.is_chance_node()))
add_line("IsSimultaneousNode() = {}".format(state.is_simultaneous_node()))
add_line("CurrentPlayer() = {}".format(state.current_player()))
if infostate_observation:
for player in players:
s = infostate_observation.string_from(state, player)
if s is not None:
add_line(f'InformationStateString({player}) = "{_escape(s)}"')
if infostate_observation and infostate_observation.tensor is not None:
for player in players:
infostate_observation.set_from(state, player)
for name, tensor in infostate_observation.dict.items():
label = f"InformationStateTensor({player})"
label += f".{name}" if name != "info_state" else ""
for line in _format_tensor(tensor, label):
add_line(line)
if default_observation:
for player in players:
s = default_observation.string_from(state, player)
if s is not None:
add_line(f'ObservationString({player}) = "{_escape(s)}"')
if public_observation:
s = public_observation.string_from(state, 0)
if s is not None:
add_line('PublicObservationString() = "{}"'.format(_escape(s)))
for player in players:
s = private_observation.string_from(state, player)
if s is not None:
add_line(f'PrivateObservationString({player}) = "{_escape(s)}"')
if default_observation and default_observation.tensor is not None:
for player in players:
default_observation.set_from(state, player)
for name, tensor in default_observation.dict.items():
label = f"ObservationTensor({player})"
label += f".{name}" if name != "observation" else ""
for line in _format_tensor(tensor, label):
add_line(line)
if game_type.chance_mode == pyspiel.GameType.ChanceMode.SAMPLED_STOCHASTIC:
add_line('SerializeState() = "{}"'.format(_escape(state.serialize())))
if not state.is_chance_node():
add_line("Rewards() = {}".format(_format_float_vector(state.rewards())))
add_line("Returns() = {}".format(_format_float_vector(state.returns())))
if state.is_terminal():
break
if state.is_chance_node():
add_line("ChanceOutcomes() = {}".format(
_format_chance_outcomes(state.chance_outcomes())))
if state.is_mean_field_node():
add_line("DistributionSupport() = {}".format(
state.distribution_support()))
num_states = len(state.distribution_support())
state.update_distribution(
[1. / num_states] * num_states if num_states else [])
if state_idx < len(action_sequence):
assert action_sequence[state_idx] == "update_distribution", (
f"Unexpected action at MFG node: {action_sequence[state_idx]}, "
f"state: {state}, action_sequence: {action_sequence}")
add_line("")
add_line("# Set mean field distribution to be uniform", force=True)
add_line("action: update_distribution", force=True)
elif state.is_simultaneous_node():
for player in players:
add_line("LegalActions({}) = [{}]".format(
player, ", ".join(str(x) for x in state.legal_actions(player))))
for player in players:
add_line("StringLegalActions({}) = [{}]".format(
player, ", ".join('"{}"'.format(state.action_to_string(player, x))
for x in state.legal_actions(player))))
if state_idx < len(action_sequence):
actions = action_sequence[state_idx]
for i, a in enumerate(actions):
if isinstance(a, str):
actions[i] = state.string_to_action(i, a)
else:
actions = []
for pl in players:
legal_actions = state.legal_actions(pl)
actions.append(0 if not legal_actions else rng.choice(legal_actions))
add_line("")
add_line("# Apply joint action [{}]".format(
format(", ".join(
'"{}"'.format(state.action_to_string(player, action))
for player, action in enumerate(actions)))), force=True)
add_line("actions: [{}]".format(", ".join(
str(action) for action in actions)), force=True)
state.apply_actions(actions)
else:
add_line("LegalActions() = [{}]".format(", ".join(
str(x) for x in state.legal_actions())))
add_line("StringLegalActions() = [{}]".format(", ".join(
'"{}"'.format(state.action_to_string(state.current_player(), x))
for x in state.legal_actions())))
if state_idx < len(action_sequence):
action = action_sequence[state_idx]
if isinstance(action, str):
action = state.string_to_action(state.current_player(), action)
else:
action = rng.choice(state.legal_actions())
add_line("")
add_line('# Apply action "{}"'.format(
state.action_to_string(state.current_player(), action)), force=True)
add_line("action: {}".format(action), force=True)
state.apply_action(action)
state_idx += 1
return lines
def content_lines(lines):
"""Return lines with content."""
return [line for line in lines if line and line[0] == "#"]
def _playthrough_params(lines):
"""Returns the playthrough parameters from a playthrough record.
Args:
lines: The playthrough as a list of lines.
Returns:
A `dict` with entries:
game_string: string, e.g. 'markov_soccer'.
action_sequence: a list of action choices made in the playthrough.
Suitable for passing to playthrough to re-generate the playthrough.
Raises:
ValueError if the playthrough is not valid.
"""
params = {"action_sequence": []}
use_action_ids = _USE_ACTION_IDS.value
for line in lines:
match_game = re.fullmatch(r"game: (.*)", line)
match_observation_params = re.fullmatch(r"observation_params: (.*)", line)
match_update_distribution = (line == "action: update_distribution")
if use_action_ids:
match_action = re.fullmatch(r"action: (.*)", line)
match_actions = re.fullmatch(r"actions: \[(.*)\]", line)
else:
match_action = re.fullmatch(r'# Apply action "(.*)"', line)
match_actions = re.fullmatch(r"# Apply joint action \[(.*)\]", line)
if match_game:
params["game_string"] = match_game.group(1)
elif match_observation_params:
params["observation_params_string"] = match_observation_params.group(1)
elif match_update_distribution:
params["action_sequence"].append("update_distribution")
elif match_action:
matched = match_action.group(1)
if use_action_ids:
params["action_sequence"].append(int(matched))
else:
params["action_sequence"].append(matched)
elif match_actions:
if use_action_ids:
params["action_sequence"].append(
[int(x) for x in match_actions.group(1).split(", ")])
else:
params["action_sequence"].append(
[x[1:-1] for x in match_actions.group(1).split(", ")])
if "game_string" in params:
return params
raise ValueError("Could not find params")
def _read_playthrough(filename):
"""Returns the content and the parsed arguments of a playthrough file."""
with open(filename, "r", encoding="utf-8") as f:
original = f.read()
kwargs = _playthrough_params(original.splitlines())
return original, kwargs
def replay(filename):
"""Re-runs the playthrough in the specified file. Returns (original, new)."""
original, kwargs = _read_playthrough(filename)
return (original, playthrough(**kwargs))
def update_path(path, shard_index=0, num_shards=1):
"""Regenerates all playthroughs in the path."""
if os.path.isfile(path):
file_list = [path]
else:
file_list = sorted(os.listdir(path))
for filename in file_list[shard_index::num_shards]:
try:
original, kwargs = _read_playthrough(os.path.join(path, filename))
try:
pyspiel.load_game(kwargs["game_string"])
except pyspiel.SpielError as e:
if "Unknown game" in str(e):
print(f"\x1b[0J[Skipped] Skipping game {filename} as ",
f"{kwargs['game_string']} is not available.")
continue
else:
raise
new = playthrough(**kwargs)
if original == new:
print(f"\x1b[0J {filename}", end="\r")
else:
with open(os.path.join(path, filename), "w") as f:
f.write(new)
print(f"\x1b[0JUpdated {filename}")
except Exception as e: # pylint: disable=broad-except
print(f"\x1b[0J{filename} failed: {e}")
raise
| open_spiel-master | open_spiel/python/algorithms/generate_playthrough.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of sequence-form linear programming.
This is a classic algorithm for solving two-player zero-sum games with imperfect
information. For a general introduction to the concepts, see Sec 5.2.3 of
Shoham & Leyton-Brown '09, Multiagent Systems: Algorithmic, Game-Theoretic, and
Logical Foundations http://www.masfoundations.org/mas.pdf.
In this implementation, we follow closely the construction in Koller, Megiddo,
and von Stengel, Fast Algorithms for Finding Randomized Strategies in Game Trees
http://theory.stanford.edu/~megiddo/pdf/stoc94.pdf. Specifically, we construct
and solve equations (8) and (9) from this paper.
"""
from open_spiel.python import policy
from open_spiel.python.algorithms import lp_solver
import pyspiel
_DELIMITER = " -=- "
_EMPTY_INFOSET_KEYS = ["***EMPTY_INFOSET_P0***", "***EMPTY_INFOSET_P1***"]
_EMPTY_INFOSET_ACTION_KEYS = [
"***EMPTY_INFOSET_ACTION_P0***", "***EMPTY_INFOSET_ACTION_P1***"
]
def _construct_lps(state, infosets, infoset_actions, infoset_action_maps,
chance_reach, lps, parent_is_keys, parent_isa_keys):
"""Build the linear programs recursively from this state.
Args:
state: an open spiel state (root of the game tree)
infosets: a list of dicts, one per player, that maps infostate to an id. The
dicts are filled by this function and should initially only contain root
values.
infoset_actions: a list of dicts, one per player, that maps a string of
(infostate, action) pair to an id. The dicts are filled by this function
and should inirially only contain the root values
infoset_action_maps: a list of dicts, one per player, that maps each
info_state to a list of (infostate, action) string
chance_reach: the contribution of chance's reach probability (should start
at 1).
lps: a list of linear programs, one per player. The first one will be
constructred as in Eq (8) of Koller, Megiddo and von Stengel. The second
lp is Eq (9). Initially these should contain only the root-level
constraints and variables.
parent_is_keys: a list of parent information state keys for this state
parent_isa_keys: a list of parent (infostate, action) keys
"""
if state.is_terminal():
returns = state.returns()
# Left-most term of: -Ay + E^t p >= 0
lps[0].add_or_reuse_constraint(parent_isa_keys[0], lp_solver.CONS_TYPE_GEQ)
lps[0].add_to_cons_coeff(parent_isa_keys[0], parent_isa_keys[1],
-1.0 * returns[0] * chance_reach)
# Right-most term of: -Ay + E^t p >= 0
lps[0].set_cons_coeff(parent_isa_keys[0], parent_is_keys[0], 1.0)
# Left-most term of: x^t (-A) - q^t F <= 0
lps[1].add_or_reuse_constraint(parent_isa_keys[1], lp_solver.CONS_TYPE_LEQ)
lps[1].add_to_cons_coeff(parent_isa_keys[1], parent_isa_keys[0],
-1.0 * returns[0] * chance_reach)
# Right-most term of: x^t (-A) - q^t F <= 0
lps[1].set_cons_coeff(parent_isa_keys[1], parent_is_keys[1], -1.0)
return
if state.is_chance_node():
for action, prob in state.chance_outcomes():
new_state = state.child(action)
_construct_lps(new_state, infosets, infoset_actions, infoset_action_maps,
prob * chance_reach, lps, parent_is_keys, parent_isa_keys)
return
player = state.current_player()
info_state = state.information_state_string(player)
legal_actions = state.legal_actions(player)
# p and q variables, inequality constraints, and part of equality constraints
if player == 0:
# p
lps[0].add_or_reuse_variable(info_state)
# -Ay + E^t p >= 0
lps[0].add_or_reuse_constraint(parent_isa_keys[0], lp_solver.CONS_TYPE_GEQ)
lps[0].set_cons_coeff(parent_isa_keys[0], parent_is_keys[0], 1.0)
lps[0].set_cons_coeff(parent_isa_keys[0], info_state, -1.0)
# x^t E^t = e^t
lps[1].add_or_reuse_constraint(info_state, lp_solver.CONS_TYPE_EQ)
lps[1].set_cons_coeff(info_state, parent_isa_keys[0], -1.0)
else:
# q
lps[1].add_or_reuse_variable(info_state)
# x^t (-A) - q^t F <= 0
lps[1].add_or_reuse_constraint(parent_isa_keys[1], lp_solver.CONS_TYPE_LEQ)
lps[1].set_cons_coeff(parent_isa_keys[1], parent_is_keys[1], -1.0)
lps[1].set_cons_coeff(parent_isa_keys[1], info_state, 1.0)
# -Fy = -f
lps[0].add_or_reuse_constraint(info_state, lp_solver.CONS_TYPE_EQ)
lps[0].set_cons_coeff(info_state, parent_isa_keys[1], -1.0)
# Add to the infostate maps
if info_state not in infosets[player]:
infosets[player][info_state] = len(infosets[player])
if info_state not in infoset_action_maps[player]:
infoset_action_maps[player][info_state] = []
new_parent_is_keys = parent_is_keys[:]
new_parent_is_keys[player] = info_state
for action in legal_actions:
isa_key = info_state + _DELIMITER + str(action)
if isa_key not in infoset_actions[player]:
infoset_actions[player][isa_key] = len(infoset_actions[player])
if isa_key not in infoset_action_maps[player][info_state]:
infoset_action_maps[player][info_state].append(isa_key)
# x and y variables, and finish equality constraints coeff
if player == 0:
lps[1].add_or_reuse_variable(isa_key, lb=0) # x
lps[1].set_cons_coeff(info_state, isa_key, 1.0) # x^t E^t = e^t
else:
lps[0].add_or_reuse_variable(isa_key, lb=0) # y
lps[0].set_cons_coeff(info_state, isa_key, 1.0) # -Fy = -f
new_parent_isa_keys = parent_isa_keys[:]
new_parent_isa_keys[player] = isa_key
new_state = state.child(action)
_construct_lps(new_state, infosets, infoset_actions, infoset_action_maps,
chance_reach, lps, new_parent_is_keys, new_parent_isa_keys)
def solve_zero_sum_game(game, solver=None):
"""Solve the two-player zero-sum game using sequence-form LPs.
Args:
game: the spiel game tp solve (must be zero-sum, sequential, and have chance
mode of deterministic or explicit stochastic).
solver: a specific solver to use, sent to cvxopt (i.e. 'lapack', 'blas',
'glpk'). A value of None uses cvxopt's default solver.
Returns:
A 4-tuple containing:
- player 0 value
- player 1 value
- player 0 policy: a policy.TabularPolicy for player 0
- player 1 policy: a policy.TabularPolicy for player 1
"""
assert game.num_players() == 2
assert game.get_type().utility == pyspiel.GameType.Utility.ZERO_SUM
assert game.get_type().dynamics == pyspiel.GameType.Dynamics.SEQUENTIAL
assert (
game.get_type().chance_mode == pyspiel.GameType.ChanceMode.DETERMINISTIC
or game.get_type().chance_mode ==
pyspiel.GameType.ChanceMode.EXPLICIT_STOCHASTIC)
# There are several import matrices and vectors that form the LPs that
# are built by this function:
#
# A is expected payoff to p1 of each (infoset0,action0) + (infoset1,action1)
# belong to p1 and p2 respectively, which lead to a terminal state. It has
# dimensions (infoset-actions0) x (infoset-actions1)
# E,F are p1 / p2's strategy matrices (infosets) x (infoset-actions)
# e,f are infosets+ x 1 column vector of (1 0 0 ... 0)
# p,q are unconstrained variables each with infosets x 1.
# x,y are realization plans of size infoset-actions
#
# In each of the computations above there is a special "root infoset" and
# "root infoset-action" denote \emptyset. So the values are actually equal to
# number of infosets + 1 and infoset-actions + 1.
#
# Equation (8) is min_{y,p} e^T p
#
# s.t. -Ay + E^t p >= 0
# -Fy = -f
# y >= 0
#
# Equation (9) is max_{x,q} -q^T f
#
# s.t. x^t(-A) - q^t F <= 0
# x^t E^t = e^t
# x >= 0
#
# So, the first LP has:
# - |y| + |p| variables (infoset-actions1 + infosets0)
# - infoset-actions0 inequality constraints (other than var lower-bounds)
# - infosets1 equality constraints
#
# And the second LP has:
# - |x| + |q| variables (infoset-actions0 + infosets1)
# - infoset-actions1 inequality constraints (other than var lower-bounds)
# - infosets0 equality constraints
infosets = [{_EMPTY_INFOSET_KEYS[0]: 0}, {_EMPTY_INFOSET_KEYS[1]: 0}]
infoset_actions = [{
_EMPTY_INFOSET_ACTION_KEYS[0]: 0
}, {
_EMPTY_INFOSET_ACTION_KEYS[1]: 0
}]
infoset_action_maps = [{}, {}]
lps = [
lp_solver.LinearProgram(lp_solver.OBJ_MIN), # Eq. (8)
lp_solver.LinearProgram(lp_solver.OBJ_MAX) # Eq. (9)
]
# Root-level variables and constraints.
lps[0].add_or_reuse_variable(_EMPTY_INFOSET_ACTION_KEYS[1], lb=0) # y root
lps[0].add_or_reuse_variable(_EMPTY_INFOSET_KEYS[0]) # p root
lps[1].add_or_reuse_variable(_EMPTY_INFOSET_ACTION_KEYS[0], lb=0) # x root
lps[1].add_or_reuse_variable(_EMPTY_INFOSET_KEYS[1]) # q root
# objective coefficients
lps[0].set_obj_coeff(_EMPTY_INFOSET_KEYS[0], 1.0) # e^t p
lps[1].set_obj_coeff(_EMPTY_INFOSET_KEYS[1], -1.0) # -q^t f
# y_root = 1 (-Fy = -f)
lps[0].add_or_reuse_constraint(_EMPTY_INFOSET_KEYS[1], lp_solver.CONS_TYPE_EQ)
lps[0].set_cons_coeff(_EMPTY_INFOSET_KEYS[1], _EMPTY_INFOSET_ACTION_KEYS[1],
-1.0)
lps[0].set_cons_rhs(_EMPTY_INFOSET_KEYS[1], -1.0)
# x_root = 1 (x^t E^t = e^t)
lps[1].add_or_reuse_constraint(_EMPTY_INFOSET_KEYS[0], lp_solver.CONS_TYPE_EQ)
lps[1].set_cons_coeff(_EMPTY_INFOSET_KEYS[0], _EMPTY_INFOSET_ACTION_KEYS[0],
1.0)
lps[1].set_cons_rhs(_EMPTY_INFOSET_KEYS[0], 1.0)
_construct_lps(game.new_initial_state(), infosets, infoset_actions,
infoset_action_maps, 1.0, lps, _EMPTY_INFOSET_KEYS[:],
_EMPTY_INFOSET_ACTION_KEYS[:])
# Solve the programs.
solutions = [lps[0].solve(solver=solver), lps[1].solve(solver=solver)]
# Extract the policies (convert from realization plan to behavioral form).
policies = [policy.TabularPolicy(game), policy.TabularPolicy(game)]
for i in range(2):
for info_state in infoset_action_maps[i]:
total_weight = 0
num_actions = 0
for isa_key in infoset_action_maps[i][info_state]:
total_weight += solutions[1 - i][lps[1 - i].get_var_id(isa_key)]
num_actions += 1
unif_pr = 1.0 / num_actions
state_policy = policies[i].policy_for_key(info_state)
for isa_key in infoset_action_maps[i][info_state]:
# The 1 - i here is due to Eq (8) yielding a solution for player 1 and
# Eq (9) a solution for player 0.
rel_weight = solutions[1 - i][lps[1 - i].get_var_id(isa_key)]
_, action_str = isa_key.split(_DELIMITER)
action = int(action_str)
pr_action = rel_weight / total_weight if total_weight > 0 else unif_pr
state_policy[action] = pr_action
return (solutions[0][lps[0].get_var_id(_EMPTY_INFOSET_KEYS[0])],
solutions[1][lps[1].get_var_id(_EMPTY_INFOSET_KEYS[1])], policies[0],
policies[1])
| open_spiel-master | open_spiel/python/algorithms/sequence_form_lp.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements an Ephemeral Value Adjustment Agent.
See https://arxiv.org/abs/1810.08163.
The algorithm queries trajectories from a replay buffer based on similarities
to embedding representations and uses a parametric model to compute values for
counterfactual state-action pairs when integrating across those trajectories.
Finally, a weighted average between the parametric (DQN in this case) and the
non-parametric model is used to compute the policy.
"""
import collections
import copy
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python import rl_agent
from open_spiel.python import simple_nets
from open_spiel.python.algorithms import dqn
# Temporarily disable TF2 behavior until we update the code.
tf.disable_v2_behavior()
MEM_KEY_NAME = "embedding"
ValueBufferElement = collections.namedtuple("ValueElement", "embedding value")
ReplayBufferElement = collections.namedtuple(
"ReplayElement", "embedding info_state action reward next_info_state "
"is_final_step legal_actions_mask")
# TODO(author3) Refactor into data structures lib.
class QueryableFixedSizeRingBuffer(dqn.ReplayBuffer):
"""ReplayBuffer of fixed size with a FIFO replacement policy.
Stored transitions can be sampled uniformly. This extends the DQN replay
buffer by allowing the contents to be fetched by L2 proximity to a query
value.
The underlying datastructure is a ring buffer, allowing 0(1) adding and
sampling.
"""
def knn(self, key, key_name, k, trajectory_len=1):
"""Computes top-k neighbours based on L2 distance.
Args:
key: (np.array) key value to query memory.
key_name: (str) attribute name of key in memory elements.
k: (int) number of neighbours to fetch.
trajectory_len: (int) length of trajectory to fetch from replay buffer.
Returns:
List of tuples (L2 negative distance, BufferElement) sorted in increasing
order by the negative L2 distqances from the key.
"""
distances = [(np.linalg.norm(getattr(sample, key_name) - key, 2,
axis=0), sample) for sample in self._data]
return sorted(distances, key=lambda v: -v[0])[:k]
class EVAAgent(object):
"""Implements a solver for Ephemeral VAlue Adjustment.
See https://arxiv.org/abs/1810.08163.
Define all networks and sampling buffers/memories. Derive losses & learning
steps. Initialize the game state and algorithmic variables.
"""
def __init__(self,
session,
game,
player_id,
state_size,
num_actions,
embedding_network_layers=(128,),
embedding_size=16,
dqn_hidden_layers=(128, 128),
batch_size=16,
trajectory_len=10,
num_neighbours=5,
learning_rate=1e-4,
mixing_parameter=0.9,
memory_capacity=int(1e6),
discount_factor=1.0,
update_target_network_every=1000,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_duration=int(1e4),
embedding_as_parametric_input=False):
"""Initialize the Ephemeral VAlue Adjustment algorithm.
Args:
session: (tf.Session) TensorFlow session.
game: (rl_environment.Environment) Open Spiel game.
player_id: (int) Player id for this player.
state_size: (int) Size of info state vector.
num_actions: (int) number of actions.
embedding_network_layers: (list[int]) Layer sizes of strategy net MLP.
embedding_size: (int) Size of memory embeddings.
dqn_hidden_layers: (list(int)) MLP layer sizes of DQN network.
batch_size: (int) Size of batches for DQN learning steps.
trajectory_len: (int) Length of trajectories from replay buffer.
num_neighbours: (int) Number of neighbours to fetch from replay buffer.
learning_rate: (float) Learning rate.
mixing_parameter: (float) Value mixing parameter between 0 and 1.
memory_capacity: Number af samples that can be stored in memory.
discount_factor: (float) Discount factor for Q-Learning.
update_target_network_every: How often to update DQN target network.
epsilon_start: (float) Starting epsilon-greedy value.
epsilon_end: (float) Final epsilon-greedy value.
epsilon_decay_duration: (float) Number of steps over which epsilon decays.
embedding_as_parametric_input: (bool) Whether we use embeddings as input
to the parametric model.
"""
assert (mixing_parameter >= 0 and mixing_parameter <= 1)
self._game = game
self._session = session
self.player_id = player_id
self._env = game
self._num_actions = num_actions
self._info_state_size = state_size
self._embedding_size = embedding_size
self._lambda = mixing_parameter
self._trajectory_len = trajectory_len
self._num_neighbours = num_neighbours
self._discount = discount_factor
self._epsilon_start = epsilon_start
self._epsilon_end = epsilon_end
self._epsilon_decay_duration = epsilon_decay_duration
self._last_time_step = None
self._last_action = None
self._embedding_as_parametric_input = embedding_as_parametric_input
# Create required TensorFlow placeholders to perform the Q-network updates.
self._info_state_ph = tf.placeholder(
shape=[None, self._info_state_size],
dtype=tf.float32,
name="info_state_ph")
self._embedding_network = simple_nets.MLP(self._info_state_size,
list(embedding_network_layers),
embedding_size)
self._embedding = self._embedding_network(self._info_state_ph)
# The DQN agent requires this be an integer.
if not isinstance(memory_capacity, int):
raise ValueError("Memory capacity not an integer.")
# Initialize the parametric & non-parametric Q-networks.
self._agent = dqn.DQN(
session,
player_id,
state_representation_size=self._info_state_size,
num_actions=self._num_actions,
hidden_layers_sizes=list(dqn_hidden_layers),
replay_buffer_capacity=memory_capacity,
replay_buffer_class=QueryableFixedSizeRingBuffer,
batch_size=batch_size,
learning_rate=learning_rate,
update_target_network_every=update_target_network_every,
learn_every=batch_size,
discount_factor=1.0,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_duration=int(1e6))
# Initialize Value Buffers - Fetch Replay buffers from agents.
self._value_buffer = QueryableFixedSizeRingBuffer(memory_capacity)
self._replay_buffer = self._agent.replay_buffer
# Initialize non-parametric & EVA Q-values.
self._v_np = collections.defaultdict(float)
self._q_np = collections.defaultdict(lambda: [0] * self._num_actions)
self._q_eva = collections.defaultdict(lambda: [0] * self._num_actions)
@property
def env(self):
return self._env
@property
def loss(self):
return self._agent.loss
def _add_transition_value(self, infostate_embedding, value):
"""Adds the embedding and value to the ValueBuffer.
Args:
infostate_embedding: (np.array) embeddig vector.
value: (float) Value associated with state embeding.
"""
transition = ValueBufferElement(embedding=infostate_embedding, value=value)
self._value_buffer.add(transition)
def _add_transition_replay(self, infostate_embedding, time_step):
"""Adds the new transition using `time_step` to the replay buffer.
Adds the transition from `self._prev_timestep` to `time_step` by
`self._prev_action`.
Args:
infostate_embedding: embeddig vector.
time_step: an instance of rl_environment.TimeStep.
"""
prev_timestep = self._last_time_step
assert prev_timestep is not None
legal_actions = (
prev_timestep.observations["legal_actions"][self.player_id])
legal_actions_mask = np.zeros(self._num_actions)
legal_actions_mask[legal_actions] = 1.0
reward = time_step.rewards[self.player_id] if time_step.rewards else 0.0
transition = ReplayBufferElement(
embedding=infostate_embedding,
info_state=(prev_timestep.observations["info_state"][self.player_id]),
action=self._last_action,
reward=reward,
next_info_state=time_step.observations["info_state"][self.player_id],
is_final_step=float(time_step.last()),
legal_actions_mask=legal_actions_mask)
self._replay_buffer.add(transition)
def step(self, time_step, is_evaluation=False):
"""Returns the action to be taken and updates the value functions.
Args:
time_step: an instance of rl_environment.TimeStep.
is_evaluation: bool, whether this is a training or evaluation call.
Returns:
A `rl_agent.StepOutput` containing the action probs and chosen action.
"""
# Act step: don't act at terminal info states.
if not time_step.last():
info_state = time_step.observations["info_state"][self.player_id]
legal_actions = time_step.observations["legal_actions"][self.player_id]
epsilon = self._get_epsilon(self._agent.step_counter, is_evaluation)
# Sample an action from EVA via epsilon greedy policy.
action, probs = self._epsilon_greedy(self._q_eva[tuple(info_state)],
legal_actions, epsilon)
# Update Step: Only with transitions and not when evaluating.
if (not is_evaluation and self._last_time_step is not None):
info_state = self._last_time_step.observations["info_state"][
self.player_id]
legal_actions = self._last_time_step.observations["legal_actions"][
self.player_id]
epsilon = self._get_epsilon(self._agent.step_counter, is_evaluation)
# Get embedding.
infostate_embedding = self._session.run(
self._embedding,
feed_dict={self._info_state_ph: np.expand_dims(info_state,
axis=0)})[0]
neighbours_value = self._value_buffer.knn(infostate_embedding,
MEM_KEY_NAME,
self._num_neighbours, 1)
# collect trace values of knn from L (value buffer) .. Q_np(s_k)
neighbours_replay = self._replay_buffer.knn(infostate_embedding,
MEM_KEY_NAME,
self._num_neighbours,
self._trajectory_len)
# Take a step with the parametric model and get q-values. Use embedding as
# input to the parametric meodel.
# TODO(author6) Recompute embeddings for buffers on learning steps.
if self._embedding_as_parametric_input:
last_time_step_copy = copy.deepcopy(self._last_time_step)
last_time_step_copy.observations["info_state"][
self.player_id] = infostate_embedding
self._agent.step(last_time_step_copy, add_transition_record=False)
else:
self._agent.step(self._last_time_step, add_transition_record=False)
q_values = self._session.run(
self._agent.q_values,
feed_dict={
self._agent.info_state_ph: np.expand_dims(info_state, axis=0)
})[0]
# Update EVA: Q_eva = lambda q_theta(s_t) + (1-lambda) sum(Q_np(s_k, .))/K
for a in legal_actions:
q_theta = q_values[a]
self._q_eva[tuple(info_state)][a] = (
self._lambda * q_theta + (1 - self._lambda) *
sum([elem[1].value
for elem in neighbours_value]) / self._num_neighbours)
# Append (e,s,a,r,s') to Replay Buffer
self._add_transition_replay(infostate_embedding, time_step)
# update Q_np with Traces using TCP
self._trajectory_centric_planning(neighbours_replay)
# Append Q_np(s, a) to Value Buffer
self._add_transition_value(
infostate_embedding, self._q_np[tuple(info_state)][self._last_action])
# Prepare for the next episode.
if time_step.last():
self._last_time_step = None
self._last_action = None
return
self._last_time_step = time_step
self._last_action = action
return rl_agent.StepOutput(action=action, probs=probs)
def _trajectory_centric_planning(self, trajectories):
"""Performs trajectory centric planning.
Uses trajectories from the replay buffer to update the non-parametric values
while supplying counter-factual values with the parametric model.
Args:
trajectories: Current OpenSpiel game state.
"""
# Calculate non-parametric values over the trajectories.
# Iterate backward through trajectories
for t in range(len(trajectories) - 1, 0, -1):
elem = trajectories[t][1]
s_tp1 = tuple(elem.next_info_state)
s_t = tuple(elem.info_state)
a_t = elem.action
r_t = elem.reward
legal_actions = elem.legal_actions_mask
if t < len(trajectories) - 1:
for action in range(len(legal_actions)):
if not legal_actions[action]:
continue
if action == elem.action:
self._q_np[s_t][a_t] = (r_t + self._discount * self._v_np[s_tp1])
else:
q_values_parametric = self._session.run(
self._agent.q_values,
feed_dict={
self._agent.info_state_ph:
np.expand_dims(elem.info_state, axis=0)
})
self._q_np[s_t][a_t] = q_values_parametric[0][action]
# Set V(s_t)
if t == len(trajectories) - 1:
# Sample from the parametric model.
q_values_parametric = self._session.run(
self._agent.q_values,
feed_dict={
self._agent.info_state_ph:
np.expand_dims(elem.info_state, axis=0)
})
self._v_np[s_t] = np.max(q_values_parametric)
else:
self._v_np[s_t] = max(self._q_np[s_t])
def _epsilon_greedy(self, q_values, legal_actions, epsilon):
"""Returns a valid epsilon-greedy action and valid action probs.
Action probabilities are given by a softmax over legal q-values.
Args:
q_values: list of Q-values by action.
legal_actions: list of legal actions at `info_state`.
epsilon: float, probability of taking an exploratory action.
Returns:
A valid epsilon-greedy action and valid action probabilities.
"""
probs = np.zeros(self._num_actions)
q_values = np.array(q_values)
if np.random.rand() < epsilon:
action = np.random.choice(legal_actions)
probs[legal_actions] = 1.0 / len(legal_actions)
else:
legal_q_values = q_values[legal_actions]
action = legal_actions[np.argmax(legal_q_values)]
# Reduce max_q for numerical stability. Result is the same.
max_q = np.max(legal_q_values)
e_x = np.exp(legal_q_values - max_q)
probs[legal_actions] = e_x / e_x.sum(axis=0)
return action, probs
def _get_epsilon(self, step_counter, is_evaluation):
"""Returns the evaluation or decayed epsilon value."""
if is_evaluation:
return 0.0
decay_steps = min(step_counter, self._epsilon_decay_duration)
decayed_epsilon = (
self._epsilon_end + (self._epsilon_start - self._epsilon_end) *
(1 - decay_steps / self._epsilon_decay_duration))
return decayed_epsilon
def action_probabilities(self, state):
"""Returns action probabilites dict for a single batch."""
# TODO(author3, author6): Refactor this to expect pre-normalized form.
if hasattr(state, "information_state_tensor"):
state_rep = tuple(state.information_state_tensor(self.player_id))
elif hasattr(state, "observation_tensor"):
state_rep = tuple(state.observation_tensor(self.player_id))
else:
raise AttributeError("Unable to extract normalized state vector.")
legal_actions = state.legal_actions(self.player_id)
if legal_actions:
_, probs = self._epsilon_greedy(
self._q_eva[state_rep], legal_actions, epsilon=0.0)
return {a: probs[a] for a in range(self._num_actions)}
else:
raise ValueError("Node has no legal actions to take.")
| open_spiel-master | open_spiel/python/algorithms/eva.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes a Best-Response policy.
The goal if this file is to be the main entry-point for BR APIs in Python.
TODO(author2): Also include computation using the more efficient C++
`TabularBestResponse` implementation.
"""
import collections
import itertools
import numpy as np
from open_spiel.python import games # pylint:disable=unused-import
from open_spiel.python import policy as openspiel_policy
from open_spiel.python.algorithms import get_all_states
from open_spiel.python.algorithms import noisy_policy
from open_spiel.python.algorithms import policy_utils
import pyspiel
def _memoize_method(key_fn=lambda x: x):
"""Memoize a single-arg instance method using an on-object cache."""
def memoizer(method):
cache_name = "cache_" + method.__name__
def wrap(self, arg):
key = key_fn(arg)
cache = vars(self).setdefault(cache_name, {})
if key not in cache:
cache[key] = method(self, arg)
return cache[key]
return wrap
return memoizer
def compute_states_and_info_states_if_none(game,
all_states=None,
state_to_information_state=None):
"""Returns all_states and/or state_to_information_state for the game.
To recompute everything, pass in None for both all_states and
state_to_information_state. Otherwise, this function will use the passed in
values to reconstruct either of them.
Args:
game: The open_spiel game.
all_states: The result of calling get_all_states.get_all_states. Cached for
improved performance.
state_to_information_state: A dict mapping state.history_str() to
state.information_state for every state in the game. Cached for improved
performance.
"""
if all_states is None:
all_states = get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False)
if state_to_information_state is None:
state_to_information_state = {
state: all_states[state].information_state_string()
for state in all_states
}
return all_states, state_to_information_state
class BestResponsePolicy(openspiel_policy.Policy):
"""Computes the best response to a specified strategy."""
def __init__(self,
game,
player_id,
policy,
root_state=None,
cut_threshold=0.0):
"""Initializes the best-response calculation.
Args:
game: The game to analyze.
player_id: The player id of the best-responder.
policy: A `policy.Policy` object.
root_state: The state of the game at which to start analysis. If `None`,
the game root state is used.
cut_threshold: The probability to cut when calculating the value.
Increasing this value will trade off accuracy for speed.
"""
self._num_players = game.num_players()
self._player_id = player_id
self._policy = policy
if root_state is None:
root_state = game.new_initial_state()
self._root_state = root_state
self.infosets = self.info_sets(root_state)
self._cut_threshold = cut_threshold
def info_sets(self, state):
"""Returns a dict of infostatekey to list of (state, cf_probability)."""
infosets = collections.defaultdict(list)
for s, p in self.decision_nodes(state):
infosets[s.information_state_string(self._player_id)].append((s, p))
return dict(infosets)
def decision_nodes(self, parent_state):
"""Yields a (state, cf_prob) pair for each descendant decision node."""
if not parent_state.is_terminal():
if (parent_state.current_player() == self._player_id or
parent_state.is_simultaneous_node()):
yield (parent_state, 1.0)
for action, p_action in self.transitions(parent_state):
for state, p_state in self.decision_nodes(
openspiel_policy.child(parent_state, action)):
yield (state, p_state * p_action)
def joint_action_probabilities_counterfactual(self, state):
"""Get list of action, probability tuples for simultaneous node.
Counterfactual reach probabilities exclude the best-responder's actions,
the sum of the probabilities is equal to the number of actions of the
player _player_id.
Args:
state: the current state of the game.
Returns:
list of action, probability tuples. An action is a tuple of individual
actions for each player of the game.
"""
actions_per_player, probs_per_player = (
openspiel_policy.joint_action_probabilities_aux(state, self._policy))
probs_per_player[self._player_id] = [
1.0 for _ in probs_per_player[self._player_id]
]
return [(list(actions), np.prod(probs)) for actions, probs in zip(
itertools.product(
*actions_per_player), itertools.product(*probs_per_player))]
def transitions(self, state):
"""Returns a list of (action, cf_prob) pairs from the specified state."""
if state.current_player() == self._player_id:
# Counterfactual reach probabilities exclude the best-responder's actions,
# hence return probability 1.0 for every action.
return [(action, 1.0) for action in state.legal_actions()]
elif state.is_chance_node():
return state.chance_outcomes()
elif state.is_simultaneous_node():
return self.joint_action_probabilities_counterfactual(state)
else:
return list(self._policy.action_probabilities(state).items())
@_memoize_method(key_fn=lambda state: state.history_str())
def value(self, state):
"""Returns the value of the specified state to the best-responder."""
if state.is_terminal():
return state.player_return(self._player_id)
elif (state.current_player() == self._player_id or
state.is_simultaneous_node()):
action = self.best_response_action(
state.information_state_string(self._player_id))
return self.q_value(state, action)
else:
return sum(p * self.q_value(state, a)
for a, p in self.transitions(state)
if p > self._cut_threshold)
def q_value(self, state, action):
"""Returns the value of the (state, action) to the best-responder."""
if state.is_simultaneous_node():
def q_value_sim(sim_state, sim_actions):
child = sim_state.clone()
# change action of _player_id
sim_actions[self._player_id] = action
child.apply_actions(sim_actions)
return self.value(child)
actions, probabilities = zip(*self.transitions(state))
return sum(p * q_value_sim(state, a)
for a, p in zip(actions, probabilities / sum(probabilities))
if p > self._cut_threshold)
else:
return self.value(state.child(action))
@_memoize_method()
def best_response_action(self, infostate):
"""Returns the best response for this information state."""
infoset = self.infosets[infostate]
# Get actions from the first (state, cf_prob) pair in the infoset list.
# Return the best action by counterfactual-reach-weighted state-value.
return max(
infoset[0][0].legal_actions(self._player_id),
key=lambda a: sum(cf_p * self.q_value(s, a) for s, cf_p in infoset))
def action_probabilities(self, state, player_id=None):
"""Returns the policy for a player in a state.
Args:
state: A `pyspiel.State` object.
player_id: Optional, the player id for whom we want an action. Optional
unless this is a simultaneous state at which multiple players can act.
Returns:
A `dict` of `{action: probability}` for the specified player in the
supplied state.
"""
if player_id is None:
if state.is_simultaneous_node():
player_id = self._player_id
else:
player_id = state.current_player()
return {
self.best_response_action(state.information_state_string(player_id)): 1
}
class CPPBestResponsePolicy(openspiel_policy.Policy):
"""Computes best response action_probabilities using open_spiel's C++ backend.
May have better performance than best_response.py for large games.
"""
def __init__(self,
game,
best_responder_id,
policy,
all_states=None,
state_to_information_state=None,
best_response_processor=None,
cut_threshold=0.0):
"""Constructor.
Args:
game: The game to analyze.
best_responder_id: The player id of the best-responder.
policy: A policy.Policy object representing the joint policy, taking a
state and returning a list of (action, probability) pairs. This could be
aggr_policy, for instance.
all_states: The result of calling get_all_states.get_all_states. Cached
for improved performance.
state_to_information_state: A dict mapping state.history_str to
state.information_state for every state in the game. Cached for improved
performance.
best_response_processor: A TabularBestResponse object, used for processing
the best response actions.
cut_threshold: The probability to cut when calculating the value.
Increasing this value will trade off accuracy for speed.
"""
(self.all_states, self.state_to_information_state) = (
compute_states_and_info_states_if_none(game, all_states,
state_to_information_state))
policy_to_dict = policy_utils.policy_to_dict(
policy, game, self.all_states, self.state_to_information_state)
# pylint: disable=g-complex-comprehension
# Cache TabularBestResponse for players, due to their costly construction
# TODO(b/140426861): Use a single best-responder once the code supports
# multiple player ids.
if not best_response_processor:
best_response_processor = pyspiel.TabularBestResponse(
game, best_responder_id, policy_to_dict)
self._policy = policy
self.game = game
self.best_responder_id = best_responder_id
self.tabular_best_response_map = (
best_response_processor.get_best_response_actions())
self._cut_threshold = cut_threshold
def decision_nodes(self, parent_state):
"""Yields a (state, cf_prob) pair for each descendant decision node."""
if not parent_state.is_terminal():
if parent_state.current_player() == self.best_responder_id:
yield (parent_state, 1.0)
for action, p_action in self.transitions(parent_state):
for state, p_state in self.decision_nodes(parent_state.child(action)):
yield (state, p_state * p_action)
def transitions(self, state):
"""Returns a list of (action, cf_prob) pairs from the specified state."""
if state.current_player() == self.best_responder_id:
# Counterfactual reach probabilities exclude the best-responder's actions,
# hence return probability 1.0 for every action.
return [(action, 1.0) for action in state.legal_actions()]
elif state.is_chance_node():
return state.chance_outcomes()
else:
return list(self._policy.action_probabilities(state).items())
@_memoize_method(key_fn=lambda state: state.history_str())
def value(self, state):
"""Returns the value of the specified state to the best-responder."""
if state.is_terminal():
return state.player_return(self.best_responder_id)
elif state.current_player() == self.best_responder_id:
action = self.best_response_action(
state.information_state_string(self.best_responder_id))
return self.q_value(state, action)
else:
return sum(p * self.q_value(state, a)
for a, p in self.transitions(state)
if p > self._cut_threshold)
def q_value(self, state, action):
"""Returns the value of the (state, action) to the best-responder."""
return self.value(state.child(action))
@_memoize_method()
def best_response_action(self, infostate):
"""Returns the best response for this information state."""
action = self.tabular_best_response_map[infostate]
return action
def action_probabilities(self, state, player_id=None):
"""Returns the policy for a player in a state.
Args:
state: A `pyspiel.State` object.
player_id: Optional, the player id for whom we want an action. Optional
unless this is a simultabeous state at which multiple players can act.
Returns:
A `dict` of `{action: probability}` for the specified player in the
supplied state.
"""
# Send the best-response probabilities for the best-responder
if state.current_player() == self.best_responder_id:
probs = {action_id: 0. for action_id in state.legal_actions()}
info_state = self.state_to_information_state[state.history_str()]
probs[self.tabular_best_response_map[info_state]] = 1.
return probs
# Send the default probabilities for all other players
return self._policy.action_probabilities(state, player_id)
@property
def policy(self):
return self._policy
def copy_with_noise(self, alpha=0.0, beta=0.0):
"""Copies this policy and adds noise, making it a Noisy Best Response.
The policy's new probabilities P' on each state s become
P'(s) = alpha * epsilon + (1-alpha) * P(s)
With P the former policy's probabilities, and epsilon ~ Softmax(beta *
Uniform)
Args:
alpha: First mixture component
beta: Softmax 1/temperature component
Returns:
Noisy copy of best response.
"""
return noisy_policy.NoisyPolicy(self, alpha, beta, self.all_states)
| open_spiel-master | open_spiel/python/algorithms/best_response.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python implementation of the counterfactual regret minimization algorithm.
One iteration of CFR consists of:
1) Compute current strategy from regrets (e.g. using Regret Matching).
2) Compute values using the current strategy
3) Compute regrets from these values
The average policy is what converges to a Nash Equilibrium.
"""
import collections
import attr
import numpy as np
from open_spiel.python import policy
import pyspiel
@attr.s
class _InfoStateNode(object):
"""An object wrapping values associated to an information state."""
# The list of the legal actions.
legal_actions = attr.ib()
index_in_tabular_policy = attr.ib()
# Map from information states string representations and actions to the
# counterfactual regrets, accumulated over the policy iterations
cumulative_regret = attr.ib(factory=lambda: collections.defaultdict(float))
# Same as above for the cumulative of the policy probabilities computed
# during the policy iterations
cumulative_policy = attr.ib(factory=lambda: collections.defaultdict(float))
def _apply_regret_matching_plus_reset(info_state_nodes):
"""Resets negative cumulative regrets to 0.
Regret Matching+ corresponds to the following cumulative regrets update:
cumulative_regrets = max(cumulative_regrets + regrets, 0)
This must be done at the level of the information set, and thus cannot be
done during the tree traversal (which is done on histories). It is thus
performed as an additional step.
This function is a module level function to be reused by both CFRSolver and
CFRBRSolver.
Args:
info_state_nodes: A dictionary {`info_state_str` -> `_InfoStateNode`}.
"""
for info_state_node in info_state_nodes.values():
action_to_cum_regret = info_state_node.cumulative_regret
for action, cumulative_regret in action_to_cum_regret.items():
if cumulative_regret < 0:
action_to_cum_regret[action] = 0
def _update_current_policy(current_policy, info_state_nodes):
"""Updates in place `current_policy` from the cumulative regrets.
This function is a module level function to be reused by both CFRSolver and
CFRBRSolver.
Args:
current_policy: A `policy.TabularPolicy` to be updated in-place.
info_state_nodes: A dictionary {`info_state_str` -> `_InfoStateNode`}.
"""
for info_state, info_state_node in info_state_nodes.items():
state_policy = current_policy.policy_for_key(info_state)
for action, value in _regret_matching(
info_state_node.cumulative_regret,
info_state_node.legal_actions).items():
state_policy[action] = value
def _update_average_policy(average_policy, info_state_nodes):
"""Updates in place `average_policy` to the average of all policies iterated.
This function is a module level function to be reused by both CFRSolver and
CFRBRSolver.
Args:
average_policy: A `policy.TabularPolicy` to be updated in-place.
info_state_nodes: A dictionary {`info_state_str` -> `_InfoStateNode`}.
"""
for info_state, info_state_node in info_state_nodes.items():
info_state_policies_sum = info_state_node.cumulative_policy
state_policy = average_policy.policy_for_key(info_state)
probabilities_sum = sum(info_state_policies_sum.values())
if probabilities_sum == 0:
num_actions = len(info_state_node.legal_actions)
for action in info_state_node.legal_actions:
state_policy[action] = 1 / num_actions
else:
for action, action_prob_sum in info_state_policies_sum.items():
state_policy[action] = action_prob_sum / probabilities_sum
class _CFRSolverBase(object):
r"""A base class for both CFR and CFR-BR.
The main iteration loop is implemented in `evaluate_and_update_policy`:
```python
game = pyspiel.load_game("game_name")
initial_state = game.new_initial_state()
solver = Solver(game)
for i in range(num_iterations):
solver.evaluate_and_update_policy()
solver.current_policy() # Access the current policy
solver.average_policy() # Access the average policy
```
"""
def __init__(self, game, alternating_updates, linear_averaging,
regret_matching_plus):
# pyformat: disable
"""Initializer.
Args:
game: The `pyspiel.Game` to run on.
alternating_updates: If `True`, alternating updates are performed: for
each player, we compute and update the cumulative regrets and policies.
In that case, and when the policy is frozen during tree traversal, the
cache is reset after each update for one player.
Otherwise, the update is simultaneous.
linear_averaging: Whether to use linear averaging, i.e.
cumulative_policy[info_state][action] += (
iteration_number * reach_prob * action_prob)
or not:
cumulative_policy[info_state][action] += reach_prob * action_prob
regret_matching_plus: Whether to use Regret Matching+:
cumulative_regrets = max(cumulative_regrets + regrets, 0)
or simply regret matching:
cumulative_regrets = cumulative_regrets + regrets
"""
# pyformat: enable
assert game.get_type().dynamics == pyspiel.GameType.Dynamics.SEQUENTIAL, (
"CFR requires sequential games. If you're trying to run it " +
"on a simultaneous (or normal-form) game, please first transform it " +
"using turn_based_simultaneous_game.")
self._game = game
self._num_players = game.num_players()
self._root_node = self._game.new_initial_state()
# This is for returning the current policy and average policy to a caller
self._current_policy = policy.TabularPolicy(game)
self._average_policy = self._current_policy.__copy__()
self._info_state_nodes = {}
self._initialize_info_state_nodes(self._root_node)
self._iteration = 0 # For possible linear-averaging.
self._linear_averaging = linear_averaging
self._alternating_updates = alternating_updates
self._regret_matching_plus = regret_matching_plus
def _initialize_info_state_nodes(self, state):
"""Initializes info_state_nodes.
Create one _InfoStateNode per infoset. We could also initialize the node
when we try to access it and it does not exist.
Args:
state: The current state in the tree walk. This should be the root node
when we call this function from a CFR solver.
"""
if state.is_terminal():
return
if state.is_chance_node():
for action, unused_action_prob in state.chance_outcomes():
self._initialize_info_state_nodes(state.child(action))
return
current_player = state.current_player()
info_state = state.information_state_string(current_player)
info_state_node = self._info_state_nodes.get(info_state)
if info_state_node is None:
legal_actions = state.legal_actions(current_player)
info_state_node = _InfoStateNode(
legal_actions=legal_actions,
index_in_tabular_policy=self._current_policy.state_lookup[info_state])
self._info_state_nodes[info_state] = info_state_node
for action in info_state_node.legal_actions:
self._initialize_info_state_nodes(state.child(action))
def current_policy(self):
"""Returns the current policy as a TabularPolicy.
WARNING: The same object, updated in-place will be returned! You can copy
it (or its `action_probability_array` field).
For CFR/CFR+, this policy does not necessarily have to converge. It
converges with high probability for CFR-BR.
"""
return self._current_policy
def average_policy(self):
"""Returns the average of all policies iterated.
WARNING: The same object, updated in-place will be returned! You can copy
it (or its `action_probability_array` field).
This average policy converges to a Nash policy as the number of iterations
increases.
The policy is computed using the accumulated policy probabilities computed
using `evaluate_and_update_policy`.
Returns:
A `policy.TabularPolicy` object (shared between calls) giving the (linear)
time averaged policy (weighted by player reach probabilities) for both
players.
"""
_update_average_policy(self._average_policy, self._info_state_nodes)
return self._average_policy
def _compute_counterfactual_regret_for_player(self, state, policies,
reach_probabilities, player):
"""Increments the cumulative regrets and policy for `player`.
Args:
state: The initial game state to analyze from.
policies: A list of `num_players` callables taking as input an
`info_state_node` and returning a {action: prob} dictionary. For CFR,
this is simply returning the current policy, but this can be used in
the CFR-BR solver, to prevent code duplication. If None,
`_get_infostate_policy` is used.
reach_probabilities: The probability for each player of reaching `state`
as a numpy array [prob for player 0, for player 1,..., for chance].
`player_reach_probabilities[player]` will work in all cases.
player: The 0-indexed player to update the values for. If `None`, the
update for all players will be performed.
Returns:
The utility of `state` for all players, assuming all players follow the
current policy defined by `self.Policy`.
"""
if state.is_terminal():
return np.asarray(state.returns())
if state.is_chance_node():
state_value = 0.0
for action, action_prob in state.chance_outcomes():
assert action_prob > 0
new_state = state.child(action)
new_reach_probabilities = reach_probabilities.copy()
new_reach_probabilities[-1] *= action_prob
state_value += action_prob * self._compute_counterfactual_regret_for_player(
new_state, policies, new_reach_probabilities, player)
return state_value
current_player = state.current_player()
info_state = state.information_state_string(current_player)
# No need to continue on this history branch as no update will be performed
# for any player.
# The value we return here is not used in practice. If the conditional
# statement is True, then the last taken action has probability 0 of
# occurring, so the returned value is not impacting the parent node value.
if all(reach_probabilities[:-1] == 0):
return np.zeros(self._num_players)
state_value = np.zeros(self._num_players)
# The utilities of the children states are computed recursively. As the
# regrets are added to the information state regrets for each state in that
# information state, the recursive call can only be made once per child
# state. Therefore, the utilities are cached.
children_utilities = {}
info_state_node = self._info_state_nodes[info_state]
if policies is None:
info_state_policy = self._get_infostate_policy(info_state)
else:
info_state_policy = policies[current_player](info_state)
for action in state.legal_actions():
action_prob = info_state_policy.get(action, 0.)
new_state = state.child(action)
new_reach_probabilities = reach_probabilities.copy()
new_reach_probabilities[current_player] *= action_prob
child_utility = self._compute_counterfactual_regret_for_player(
new_state,
policies=policies,
reach_probabilities=new_reach_probabilities,
player=player)
state_value += action_prob * child_utility
children_utilities[action] = child_utility
# If we are performing alternating updates, and the current player is not
# the current_player, we skip the cumulative values update.
# If we are performing simultaneous updates, we do update the cumulative
# values.
simulatenous_updates = player is None
if not simulatenous_updates and current_player != player:
return state_value
reach_prob = reach_probabilities[current_player]
counterfactual_reach_prob = (
np.prod(reach_probabilities[:current_player]) *
np.prod(reach_probabilities[current_player + 1:]))
state_value_for_player = state_value[current_player]
for action, action_prob in info_state_policy.items():
cfr_regret = counterfactual_reach_prob * (
children_utilities[action][current_player] - state_value_for_player)
info_state_node.cumulative_regret[action] += cfr_regret
if self._linear_averaging:
info_state_node.cumulative_policy[
action] += self._iteration * reach_prob * action_prob
else:
info_state_node.cumulative_policy[action] += reach_prob * action_prob
return state_value
def _get_infostate_policy(self, info_state_str):
"""Returns an {action: prob} dictionary for the policy on `info_state`."""
info_state_node = self._info_state_nodes[info_state_str]
prob_vec = self._current_policy.action_probability_array[
info_state_node.index_in_tabular_policy]
return {
action: prob_vec[action] for action in info_state_node.legal_actions
}
def _regret_matching(cumulative_regrets, legal_actions):
"""Returns an info state policy by applying regret-matching.
Args:
cumulative_regrets: A {action: cumulative_regret} dictionary.
legal_actions: the list of legal actions at this state.
Returns:
A dict of action -> prob for all legal actions.
"""
regrets = cumulative_regrets.values()
sum_positive_regrets = sum((regret for regret in regrets if regret > 0))
info_state_policy = {}
if sum_positive_regrets > 0:
for action in legal_actions:
positive_action_regret = max(0.0, cumulative_regrets[action])
info_state_policy[action] = (
positive_action_regret / sum_positive_regrets)
else:
for action in legal_actions:
info_state_policy[action] = 1.0 / len(legal_actions)
return info_state_policy
class _CFRSolver(_CFRSolverBase):
r"""Implements the Counterfactual Regret Minimization (CFR) algorithm.
The algorithm computes an approximate Nash policy for 2 player zero-sum games.
CFR can be view as a policy iteration algorithm. Importantly, the policies
themselves do not converge to a Nash policy, but their average does.
The main iteration loop is implemented in `evaluate_and_update_policy`:
```python
game = pyspiel.load_game("game_name")
initial_state = game.new_initial_state()
cfr_solver = CFRSolver(game)
for i in range(num_iterations):
cfr.evaluate_and_update_policy()
```
Once the policy has converged, the average policy (which converges to the Nash
policy) can be computed:
```python
average_policy = cfr_solver.ComputeAveragePolicy()
```
# Policy and average policy
policy(0) and average_policy(0) are not technically defined, but these
methods will return arbitrarily the uniform_policy.
Then, we are expected to have:
```
for t in range(1, N):
cfr_solver.evaluate_and_update_policy()
policy(t) = RM or RM+ of cumulative regrets
avg_policy(t)(s, a) ~ \sum_{k=1}^t player_reach_prob(t)(s) * policy(k)(s, a)
With Linear Averaging, the avg_policy is proportional to:
\sum_{k=1}^t k * player_reach_prob(t)(s) * policy(k)(s, a)
```
"""
def evaluate_and_update_policy(self):
"""Performs a single step of policy evaluation and policy improvement."""
self._iteration += 1
if self._alternating_updates:
for player in range(self._game.num_players()):
self._compute_counterfactual_regret_for_player(
self._root_node,
policies=None,
reach_probabilities=np.ones(self._game.num_players() + 1),
player=player)
if self._regret_matching_plus:
_apply_regret_matching_plus_reset(self._info_state_nodes)
_update_current_policy(self._current_policy, self._info_state_nodes)
else:
self._compute_counterfactual_regret_for_player(
self._root_node,
policies=None,
reach_probabilities=np.ones(self._game.num_players() + 1),
player=None)
if self._regret_matching_plus:
_apply_regret_matching_plus_reset(self._info_state_nodes)
_update_current_policy(self._current_policy, self._info_state_nodes)
class CFRPlusSolver(_CFRSolver):
"""CFR+ implementation.
The algorithm computes an approximate Nash policy for 2 player zero-sum games.
More generally, it should approach a no-regret set, which corresponds to the
set of coarse-correlated equilibria. See https://arxiv.org/abs/1305.0034
CFR can be view as a policy iteration algorithm. Importantly, the policies
themselves do not converge to a Nash policy, but their average does.
See https://poker.cs.ualberta.ca/publications/2015-ijcai-cfrplus.pdf
CFR+ is CFR with the following modifications:
- use Regret Matching+ instead of Regret Matching.
- use alternating updates instead of simultaneous updates.
- use linear averaging.
Usage:
```python
game = pyspiel.load_game("game_name")
initial_state = game.new_initial_state()
cfr_solver = CFRSolver(game)
for i in range(num_iterations):
cfr.evaluate_and_update_policy()
```
Once the policy has converged, the average policy (which converges to the Nash
policy) can be computed:
```python
average_policy = cfr_solver.ComputeAveragePolicy()
```
"""
def __init__(self, game):
super(CFRPlusSolver, self).__init__(
game,
regret_matching_plus=True,
alternating_updates=True,
linear_averaging=True)
class CFRSolver(_CFRSolver):
"""Implements the Counterfactual Regret Minimization (CFR) algorithm.
See https://poker.cs.ualberta.ca/publications/NIPS07-cfr.pdf
NOTE: We use alternating updates (which was not the case in the original
paper) because it has been proved to be far more efficient.
"""
def __init__(self, game):
super(CFRSolver, self).__init__(
game,
regret_matching_plus=False,
alternating_updates=True,
linear_averaging=False)
| open_spiel-master | open_spiel/python/algorithms/cfr.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Solving strong Stackelberg equilibrium based on linear programming.
Based on [1] "Computing the Optimal Strategy to Commit to", Conitzer & Sandholm,
EC'06
"""
import numpy as np
from open_spiel.python.algorithms import lp_solver
from open_spiel.python.algorithms.projected_replicator_dynamics import _simplex_projection
from open_spiel.python.egt.utils import game_payoffs_array
def solve_stackelberg(game, is_first_leader=True):
"""Solves the optimal mixed strategty to commit to for the leader.
Args:
game: a pyspiel game,
is_first_leader: if true, then player 0 is the leader, o.w. player 1 is
the leader.
Returns:
(player0 strategy, player1 strategy, player0 payoff, player1 payoff) at an
SSE.
"""
p_mat = game_payoffs_array(game)
assert len(p_mat) == 2
if is_first_leader:
leader_payoff, follower_payoff = p_mat[0], p_mat[1]
else:
leader_payoff, follower_payoff = p_mat[1].T, p_mat[0].T
num_leader_strategies, num_follower_strategies = leader_payoff.shape
leader_eq_value = -float("inf")
follower_eq_value = None
leader_eq_strategy = None
follower_eq_strategy = None
for t in range(num_follower_strategies):
lp = lp_solver.LinearProgram(objective=lp_solver.OBJ_MAX)
for s in range(num_leader_strategies):
lp.add_or_reuse_variable("s_{}".format(s))
lp.set_obj_coeff("s_{}".format(s), leader_payoff[s, t])
for t_ in range(num_follower_strategies):
if t_ == t:
continue
lp.add_or_reuse_constraint("t_{}".format(t_), lp_solver.CONS_TYPE_GEQ)
for s in range(num_leader_strategies):
lp.set_cons_coeff("t_{}".format(t_), "s_{}".format(s),
follower_payoff[s, t] - follower_payoff[s, t_])
lp.set_cons_rhs("t_{}".format(t_), 0.0)
lp.add_or_reuse_constraint("sum_to_one", lp_solver.CONS_TYPE_EQ)
for s in range(num_leader_strategies):
lp.set_cons_coeff("sum_to_one", "s_{}".format(s), 1.0)
lp.set_cons_rhs("sum_to_one", 1.0)
try:
leader_strategy = np.array(lp.solve())
leader_strategy = _simplex_projection(
leader_strategy.reshape(-1)).reshape(-1, 1)
leader_value = leader_strategy.T.dot(leader_payoff)[0, t]
if leader_value > leader_eq_value:
leader_eq_strategy = leader_strategy
follower_eq_strategy = t
leader_eq_value = leader_value
follower_eq_value = leader_strategy.T.dot(follower_payoff)[0, t]
except: # pylint: disable=bare-except
continue
if is_first_leader:
return (leader_eq_strategy.reshape(-1), np.identity(
num_follower_strategies)[follower_eq_strategy],
leader_eq_value, follower_eq_value)
else:
return (np.identity(num_follower_strategies)[follower_eq_strategy],
leader_eq_strategy.reshape(-1), follower_eq_value, leader_eq_value)
| open_spiel-master | open_spiel/python/algorithms/stackelberg_lp.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.dqn."""
from absl.testing import absltest
import tensorflow.compat.v1 as tf
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import dqn
import pyspiel
# Temporarily disable TF2 behavior until code is updated.
tf.disable_v2_behavior()
# A simple two-action game encoded as an EFG game. Going left gets -1, going
# right gets a +1.
SIMPLE_EFG_DATA = """
EFG 2 R "Simple single-agent problem" { "Player 1" } ""
p "ROOT" 1 1 "ROOT" { "L" "R" } 0
t "L" 1 "Outcome L" { -1.0 }
t "R" 2 "Outcome R" { 1.0 }
"""
class DQNTest(tf.test.TestCase):
def test_simple_game(self):
game = pyspiel.load_efg_game(SIMPLE_EFG_DATA)
env = rl_environment.Environment(game=game)
with self.session() as sess:
agent = dqn.DQN(sess, 0,
state_representation_size=
game.information_state_tensor_shape()[0],
num_actions=game.num_distinct_actions(),
hidden_layers_sizes=[16],
replay_buffer_capacity=100,
batch_size=5,
epsilon_start=0.02,
epsilon_end=0.01)
total_reward = 0
sess.run(tf.global_variables_initializer())
for _ in range(100):
time_step = env.reset()
while not time_step.last():
agent_output = agent.step(time_step)
time_step = env.step([agent_output.action])
total_reward += time_step.rewards[0]
agent.step(time_step)
self.assertGreaterEqual(total_reward, 75)
def test_run_tic_tac_toe(self):
env = rl_environment.Environment("tic_tac_toe")
state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
with self.session() as sess:
agents = [
dqn.DQN( # pylint: disable=g-complex-comprehension
sess,
player_id,
state_representation_size=state_size,
num_actions=num_actions,
hidden_layers_sizes=[16],
replay_buffer_capacity=10,
batch_size=5) for player_id in [0, 1]
]
sess.run(tf.global_variables_initializer())
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
current_agent = agents[current_player]
agent_output = current_agent.step(time_step)
time_step = env.step([agent_output.action])
for agent in agents:
agent.step(time_step)
@absltest.skip("Causing a segmentation fault on wheel tests")
def test_run_hanabi(self):
# Hanabi is an optional game, so check we have it before running the test.
game = "hanabi"
if game not in pyspiel.registered_names():
return
num_players = 3
env_configs = {
"players": num_players,
"max_life_tokens": 1,
"colors": 2,
"ranks": 3,
"hand_size": 2,
"max_information_tokens": 3,
"discount": 0.
}
env = rl_environment.Environment(game, **env_configs)
state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
with self.session() as sess:
agents = [
dqn.DQN( # pylint: disable=g-complex-comprehension
sess,
player_id,
state_representation_size=state_size,
num_actions=num_actions,
hidden_layers_sizes=[16],
replay_buffer_capacity=10,
batch_size=5) for player_id in range(num_players)
]
sess.run(tf.global_variables_initializer())
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
agent_output = [agent.step(time_step) for agent in agents]
time_step = env.step([agent_output[current_player].action])
for agent in agents:
agent.step(time_step)
if __name__ == "__main__":
tf.test.main()
| open_spiel-master | open_spiel/python/algorithms/dqn_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.boltzmann_tabular_qlearner."""
from absl.testing import absltest
import numpy as np
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import boltzmann_tabular_qlearner
import pyspiel
# Fixed seed to make test non stochastic.
SEED = 10000
# A simple two-action game encoded as an EFG game. Going left gets -1, going
# right gets a +1.
SIMPLE_EFG_DATA = """
EFG 2 R "Simple single-agent problem" { "Player 1" } ""
p "ROOT" 1 1 "ROOT" { "L" "R" } 0
t "L" 1 "Outcome L" { -1.0 }
t "R" 2 "Outcome R" { 1.0 }
"""
class BoltzmannQlearnerTest(absltest.TestCase):
def test_simple_game(self):
game = pyspiel.load_efg_game(SIMPLE_EFG_DATA)
env = rl_environment.Environment(game=game)
agent = boltzmann_tabular_qlearner.BoltzmannQLearner(
0, game.num_distinct_actions())
total_reward = 0
for _ in range(100):
total_eval_reward = 0
for _ in range(1000):
time_step = env.reset()
while not time_step.last():
agent_output = agent.step(time_step)
time_step = env.step([agent_output.action])
total_reward += time_step.rewards[0]
agent.step(time_step)
self.assertGreaterEqual(total_reward, 75)
for _ in range(1000):
time_step = env.reset()
while not time_step.last():
agent_output = agent.step(time_step, is_evaluation=True)
time_step = env.step([agent_output.action])
total_eval_reward += time_step.rewards[0]
self.assertGreaterEqual(total_eval_reward, 250)
if __name__ == "__main__":
np.random.seed(SEED)
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/boltzmann_tabular_qlearner_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tabular Q-learning agent."""
import collections
import numpy as np
from open_spiel.python import rl_agent
from open_spiel.python import rl_tools
def valuedict():
# The default factory is called without arguments to produce a new value when
# a key is not present, in __getitem__ only. This value is added to the dict,
# so modifying it will modify the dict.
return collections.defaultdict(float)
class QLearner(rl_agent.AbstractAgent):
"""Tabular Q-Learning agent.
See open_spiel/python/examples/tic_tac_toe_qlearner.py for an usage example.
"""
def __init__(self,
player_id,
num_actions,
step_size=0.1,
epsilon_schedule=rl_tools.ConstantSchedule(0.2),
discount_factor=1.0,
centralized=False):
"""Initialize the Q-Learning agent."""
self._player_id = player_id
self._num_actions = num_actions
self._step_size = step_size
self._epsilon_schedule = epsilon_schedule
self._epsilon = epsilon_schedule.value
self._discount_factor = discount_factor
self._centralized = centralized
self._q_values = collections.defaultdict(valuedict)
self._prev_info_state = None
self._last_loss_value = None
def _epsilon_greedy(self, info_state, legal_actions, epsilon):
"""Returns a valid epsilon-greedy action and valid action probs.
If the agent has not been to `info_state`, a valid random action is chosen.
Args:
info_state: hashable representation of the information state.
legal_actions: list of actions at `info_state`.
epsilon: float, prob of taking an exploratory action.
Returns:
A valid epsilon-greedy action and valid action probabilities.
"""
probs = np.zeros(self._num_actions)
greedy_q = max([self._q_values[info_state][a] for a in legal_actions])
greedy_actions = [
a for a in legal_actions if self._q_values[info_state][a] == greedy_q
]
probs[legal_actions] = epsilon / len(legal_actions)
probs[greedy_actions] += (1 - epsilon) / len(greedy_actions)
action = np.random.choice(range(self._num_actions), p=probs)
return action, probs
def _get_action_probs(self, info_state, legal_actions, epsilon):
"""Returns a selected action and the probabilities of legal actions.
To be overwritten by subclasses that implement other action selection
methods.
Args:
info_state: hashable representation of the information state.
legal_actions: list of actions at `info_state`.
epsilon: float: current value of the epsilon schedule or 0 in case
evaluation. QLearner uses it as the exploration parameter in
epsilon-greedy, but subclasses are free to interpret in different ways
(e.g. as temperature in softmax).
"""
return self._epsilon_greedy(info_state, legal_actions, epsilon)
def step(self, time_step, is_evaluation=False):
"""Returns the action to be taken and updates the Q-values if needed.
Args:
time_step: an instance of rl_environment.TimeStep.
is_evaluation: bool, whether this is a training or evaluation call.
Returns:
A `rl_agent.StepOutput` containing the action probs and chosen action.
"""
if self._centralized:
info_state = str(time_step.observations["info_state"])
else:
info_state = str(time_step.observations["info_state"][self._player_id])
legal_actions = time_step.observations["legal_actions"][self._player_id]
# Prevent undefined errors if this agent never plays until terminal step
action, probs = None, None
# Act step: don't act at terminal states.
if not time_step.last():
epsilon = 0.0 if is_evaluation else self._epsilon
action, probs = self._get_action_probs(info_state, legal_actions, epsilon)
# Learn step: don't learn during evaluation or at first agent steps.
if self._prev_info_state and not is_evaluation:
target = time_step.rewards[self._player_id]
if not time_step.last(): # Q values are zero for terminal.
target += self._discount_factor * max(
[self._q_values[info_state][a] for a in legal_actions])
prev_q_value = self._q_values[self._prev_info_state][self._prev_action]
self._last_loss_value = target - prev_q_value
self._q_values[self._prev_info_state][self._prev_action] += (
self._step_size * self._last_loss_value)
# Decay epsilon, if necessary.
self._epsilon = self._epsilon_schedule.step()
if time_step.last(): # prepare for the next episode.
self._prev_info_state = None
return
# Don't mess up with the state during evaluation.
if not is_evaluation:
self._prev_info_state = info_state
self._prev_action = action
return rl_agent.StepOutput(action=action, probs=probs)
@property
def loss(self):
return self._last_loss_value
| open_spiel-master | open_spiel/python/algorithms/tabular_qlearner.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.action_value.py."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python import policy
from open_spiel.python.algorithms import action_value
import pyspiel
class ActionValuesTest(parameterized.TestCase):
@parameterized.parameters([["kuhn_poker", 2], ["kuhn_poker", 3],
["leduc_poker", 2]])
def test_runs_with_uniform_policies(self, game_name, num_players):
game = pyspiel.load_game(game_name, {"players": num_players})
calc = action_value.TreeWalkCalculator(game)
uniform_policy = policy.TabularPolicy(game)
calc.compute_all_states_action_values([uniform_policy] * num_players)
def test_kuhn_poker_always_pass_p0(self):
game = pyspiel.load_game("kuhn_poker")
calc = action_value.TreeWalkCalculator(game)
uniform_policy = policy.TabularPolicy(game)
always_pass_policy = policy.FirstActionPolicy(game).to_tabular()
returned_values = calc([always_pass_policy, uniform_policy],
always_pass_policy)
root_node_values = calc.get_root_node_values(
[always_pass_policy, uniform_policy])
self.assertTrue(
np.allclose(root_node_values, returned_values.root_node_values))
# Action 0 == Pass. Action 1 == Bet
# Some values are 0 because the states are not reached, thus the expected
# value of that node is undefined.
np.testing.assert_array_almost_equal(
np.asarray([
# Player 0 states
[-1.0, -0.5], # '0'
[-1.0, -2.0], # '0pb'
[-0.5, 0.5], # '1'
[-1.0, 0.0], # '1pb'
[0.0, 1.5], # '2'
[-1.0, 2.0], # '2pb'
# Player 1 states
[0.0, 1.0], # '1p'
[0, 0], # Unreachable
[1.0, 1.0], # '2p'
[0, 0], # Unreachable
[-1.0, 1.0], # '0p'
[0, 0], # Unreachable
]), returned_values.action_values)
np.testing.assert_array_almost_equal(
np.asarray([
# Player 0 states
1 / 3, # '0'
1 / 6, # '0pb'
1 / 3, # '1'
1 / 6, # '1pb'
1 / 3, # '2'
1 / 6, # '2pb'
# Player 1 states
1 / 3, # '1p'
0.0, # '1b': zero because player 0 always play pass
1 / 3, # 2p'
0.0, # '2b': zero because player 0 always play pass
1 / 3, # '0p'
0.0, # '0b': zero because player 0 always play pass
]),
returned_values.counterfactual_reach_probs)
# The reach probabilities are always one, even though we have player 0
# who only plays pass, because the unreachable nodes for player 0 are
# terminal nodes: e.g. 'x x b b p' has a player 0 reach of 0, but it is
# a terminal node, thus it does not appear in the tabular policy
# states.
np.testing.assert_array_equal(
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
returned_values.player_reach_probs)
np.testing.assert_array_almost_equal(
np.asarray([
np.array([-1/3, -1/6]),
np.array([-1/6, -1/3]),
np.array([-1/6, 1/6]),
np.array([-1/6, 0.]),
np.array([0., 0.5]),
np.array([-1/6, 1/3]),
np.array([0., 1/3]),
np.array([0., 0.]),
np.array([1/3, 1/3]),
np.array([0., 0.]),
np.array([-1/3, 1/3]),
np.array([0., 0.])
]), returned_values.sum_cfr_reach_by_action_value)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/action_value_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the IS-MCTS Agent."""
from absl.testing import absltest
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import ismcts
from open_spiel.python.algorithms import mcts
from open_spiel.python.algorithms import mcts_agent
class MCTSAgentTest(absltest.TestCase):
def test_tic_tac_toe_episode(self):
env = rl_environment.Environment("kuhn_poker", include_full_state=True)
num_players = env.num_players
num_actions = env.action_spec()["num_actions"]
# Create the MCTS bot. Both agents can share the same bot in this case since
# there is no state kept between searches. See mcts.py for more info about
# the arguments.
ismcts_bot = ismcts.ISMCTSBot(
game=env.game,
uct_c=1.5,
max_simulations=100,
evaluator=mcts.RandomRolloutEvaluator())
agents = [
mcts_agent.MCTSAgent(
player_id=idx, num_actions=num_actions, mcts_bot=ismcts_bot)
for idx in range(num_players)
]
time_step = env.reset()
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = agents[player_id].step(time_step)
time_step = env.step([agent_output.action])
for agent in agents:
agent.step(time_step)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/ismcts_agent_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute the exploitability of a bot / strategy in a 2p sequential game.
This computes the value that a policy achieves against a worst-case opponent.
The policy applies to both player 1 and player 2, and hence we have a 2-player
symmetric zero-sum game, so the game value is zero for both players, and hence
value-vs-best-response is equal to exploitability.
We construct information sets, each consisting of a list of (state, probability)
pairs where probability is a counterfactual reach probability, i.e. the
probability that the state would be reached if the best responder (the current
player) played to reach it. This is the product of the probabilities of the
necessary chance events and opponent action choices required to reach the node.
These probabilities give us the correct weighting for possible states of the
world when considering our best response for a particular information set.
The values we calculate are values of being in the specific state. Unlike in a
CFR algorithm, they are not weighted by reach probabilities. These values
take into account the whole state, so they may depend on information which is
unknown to the best-responding player.
"""
import collections
import numpy as np
from open_spiel.python import policy as policy_lib
from open_spiel.python.algorithms import best_response as pyspiel_best_response
import pyspiel
def _state_values(state, num_players, policy):
"""Value of a state for every player given a policy."""
if state.is_terminal():
return np.array(state.returns())
else:
if state.is_simultaneous_node():
p_action = tuple(policy_lib.joint_action_probabilities(state, policy))
else:
p_action = (
state.chance_outcomes()
if state.is_chance_node()
else policy.action_probabilities(state).items()
)
return sum(
prob
* _state_values(policy_lib.child(state, action), num_players, policy)
for action, prob in p_action
)
def best_response(game, policy, player_id):
"""Returns information about the specified player's best response.
Given a game and a policy for every player, computes for a single player their
best unilateral strategy. Returns the value improvement that player would
get, the action they should take in each information state, and the value
of each state when following their unilateral policy.
Args:
game: An open_spiel game, e.g. kuhn_poker
policy: A `policy.Policy` object. This policy should depend only on the
information state available to the current player, but this is not
enforced.
player_id: The integer id of a player in the game for whom the best response
will be computed.
Returns:
A dictionary of values, with keys:
best_response_action: The best unilateral strategy for `player_id` as a
map from infostatekey to action_id.
best_response_state_value: The value obtained for `player_id` when
unilaterally switching strategy, for each state.
best_response_value: The value obtained for `player_id` when unilaterally
switching strategy.
info_sets: A dict of info sets, mapping info state key to a list of
`(state, counterfactual_reach_prob)` pairs.
nash_conv: `best_response_value - on_policy_value`
on_policy_value: The value for `player_id` when all players follow the
policy
on_policy_values: The value for each player when all players follow the
policy
"""
root_state = game.new_initial_state()
br = pyspiel_best_response.BestResponsePolicy(game, player_id, policy,
root_state)
on_policy_values = _state_values(root_state, game.num_players(), policy)
best_response_value = br.value(root_state)
# Get best response action for unvisited states
for infostate in set(br.infosets) - set(br.cache_best_response_action):
br.best_response_action(infostate)
return {
"best_response_action": br.cache_best_response_action,
"best_response_state_value": br.cache_value,
"best_response_value": best_response_value,
"info_sets": br.infosets,
"nash_conv": best_response_value - on_policy_values[player_id],
"on_policy_value": on_policy_values[player_id],
"on_policy_values": on_policy_values,
}
def exploitability(game, policy):
"""Returns the exploitability of the policy in the game.
This is implemented only for 2 players constant-sum games, and is equivalent
to NashConv / num_players in that case. Prefer using `nash_conv`.
Args:
game: An open_spiel game, e.g. kuhn_poker
policy: A `policy.Policy` object. This policy should depend only on the
information state available to the current player, but this is not
enforced.
Returns:
The value that this policy achieves when playing against the worst-case
non-cheating opponent, averaged across both starting positions. It has a
minimum of zero (assuming the supplied policy is non-cheating) and
this bound is achievable in a 2p game.
Raises:
ValueError if the game is not a two-player constant-sum turn-based game.
"""
if game.num_players() != 2:
raise ValueError("Game must be a 2-player game")
game_info = game.get_type()
if game_info.dynamics != pyspiel.GameType.Dynamics.SEQUENTIAL:
raise ValueError("The game must be turn-based, not {}".format(
game_info.dynamics))
if game_info.utility not in (pyspiel.GameType.Utility.ZERO_SUM,
pyspiel.GameType.Utility.CONSTANT_SUM):
raise ValueError("The game must be constant- or zero-sum, not {}".format(
game_info.utility))
root_state = game.new_initial_state()
nash_conv_value = (
sum(
pyspiel_best_response.CPPBestResponsePolicy(
game, best_responder, policy).value(root_state)
for best_responder in range(game.num_players())) - game.utility_sum())
return nash_conv_value / game.num_players()
_NashConvReturn = collections.namedtuple("_NashConvReturn",
["nash_conv", "player_improvements"])
def nash_conv(game, policy, return_only_nash_conv=True, use_cpp_br=False):
r"""Returns a measure of closeness to Nash for a policy in the game.
See https://arxiv.org/pdf/1711.00832.pdf for the NashConv definition.
Args:
game: An open_spiel game, e.g. kuhn_poker
policy: A `policy.Policy` object. This policy should depend only on the
information state available to the current player, but this is not
enforced.
return_only_nash_conv: Whether to only return the NashConv value, or a
namedtuple containing additional statistics. Prefer using `False`, as we
hope to change the default to that value.
use_cpp_br: if True, compute the best response in c++
Returns:
Returns a object with the following attributes:
- player_improvements: A `[num_players]` numpy array of the improvement
for players (i.e. value_player_p_versus_BR - value_player_p).
- nash_conv: The sum over all players of the improvements in value that each
player could obtain by unilaterally changing their strategy, i.e.
sum(player_improvements).
"""
root_state = game.new_initial_state()
if use_cpp_br:
best_response_values = np.array([
pyspiel_best_response.CPPBestResponsePolicy(
game, best_responder, policy).value(root_state)
for best_responder in range(game.num_players())
])
else:
best_response_values = np.array([
pyspiel_best_response.BestResponsePolicy(
game, best_responder, policy).value(root_state)
for best_responder in range(game.num_players())
])
on_policy_values = _state_values(root_state, game.num_players(), policy)
player_improvements = best_response_values - on_policy_values
nash_conv_ = sum(player_improvements)
if return_only_nash_conv:
return nash_conv_
else:
return _NashConvReturn(
nash_conv=nash_conv_, player_improvements=player_improvements)
| open_spiel-master | open_spiel/python/algorithms/exploitability.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.evaluate_bots."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python import policy
from open_spiel.python.algorithms import evaluate_bots
from open_spiel.python.bots import uniform_random
from open_spiel.python.bots.policy import PolicyBot
import pyspiel
GAME = pyspiel.load_game("kuhn_poker")
def policy_bots():
random_policy = policy.UniformRandomPolicy(GAME)
py_bot = PolicyBot(0, np.random.RandomState(4321), random_policy)
cpp_bot = pyspiel.make_policy_bot(
GAME, 1, 1234,
policy.python_policy_to_pyspiel_policy(random_policy.to_tabular()))
return [py_bot, cpp_bot]
class EvaluateBotsTest(parameterized.TestCase):
@parameterized.parameters([([
pyspiel.make_uniform_random_bot(0, 1234),
uniform_random.UniformRandomBot(1, np.random.RandomState(4321))
],), (policy_bots(),)])
def test_cpp_vs_python(self, bots):
results = np.array([
evaluate_bots.evaluate_bots(GAME.new_initial_state(), bots, np.random)
for _ in range(10000)
])
average_results = np.mean(results, axis=0)
np.testing.assert_allclose(average_results, [0.125, -0.125], atol=0.1)
def test_random_vs_stateful(self):
game = pyspiel.load_game("tic_tac_toe")
bots = [
pyspiel.make_stateful_random_bot(game, 0, 1234),
uniform_random.UniformRandomBot(1, np.random.RandomState(4321))
]
for _ in range(1000):
evaluate_bots.evaluate_bots(game.new_initial_state(), bots, np.random)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/evaluate_bots_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of exploitability descent.
See "Computing Approximate Equilibria in Sequential Adversarial Games by
Exploitability Descent" https://arxiv.org/abs/1903.05614
The exploitability descent algorithm solves a game by repeatedly performing
the following update:
1. Construct a (deterministic) best response to our current strategy
2. Compute the value of every action in every state when playing our current
strategy vs the best response.
3. Update our current strategy to do better vs the current best response
by performing a policy-gradient update.
This module provides a function that returns a loss for network training, and
a Solver class that uses this loss in a tabular Exploitability Descent.
The code can be used either for a tabular exploitability descent algorithm,
as demonstrated by exploitability_descent_test, or for a neural network policy,
as in ../examples/exploitability_descent.py.
Additionally, for a minibatch version of the algorithm (which samples
uniformly across all states in the game to generate a minibatch), see the
minibatch_loss method.
"""
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python import policy
from open_spiel.python.algorithms import action_value_vs_best_response
from open_spiel.python.algorithms import masked_softmax
# Temporary disabling of v2 behavior until code is updated.
tf.disable_v2_behavior()
_NUM_PLAYERS = 2
def _create_policy_evaluator(tabular_policy, q_value_calculator):
"""Creates a function to evaluate our policy."""
def evaluate_policy(policy_values):
"""Evaluates a tabular policy; intended to be used as a tf.py_function."""
tabular_policy.action_probability_array = policy_values
evaluations = [
q_value_calculator(player, tabular_policy,
tabular_policy.states_per_player[player])
for player in range(_NUM_PLAYERS)
]
nash_conv = np.float64(sum([evaluations[p].exploitability for p in [0, 1]]))
q_values = np.concatenate(
[np.array(evaluations[p].values_vs_br, np.float64) for p in [0, 1]])
cf_reach_probabilities = np.concatenate([
np.array(evaluations[p].counterfactual_reach_probs_vs_br, np.float64)
for p in [0, 1]
])
return nash_conv, q_values, cf_reach_probabilities
return evaluate_policy
class LossCalculator(object):
"""Computes the exploitability descent loss for a two-player game."""
def __init__(self, game):
"""Initializes a loss calculation for the given game."""
if game.num_players() != _NUM_PLAYERS:
raise ValueError("Game {} does not have {} players.".format(
game, _NUM_PLAYERS))
self.tabular_policy = policy.TabularPolicy(game)
self.q_value_calculator = action_value_vs_best_response.Calculator(game)
def masked_softmax(self, logits):
"""Safe masked softmax."""
return masked_softmax.tf_masked_softmax(
logits, self.tabular_policy.legal_actions_mask)
def loss(self, policy_values):
"""Returns the exploitability descent loss given a policy."""
evaluate_policy = _create_policy_evaluator(self.tabular_policy,
self.q_value_calculator)
nash_conv, q_values, cf_reach_probabilities = tf.py_func(
evaluate_policy, [policy_values], [tf.float64, tf.float64, tf.float64])
baseline = tf.reduce_sum(policy_values * q_values, axis=-1, keepdims=True)
advantage = q_values - tf.stop_gradient(baseline)
loss_per_state = -tf.reduce_sum(policy_values * advantage, axis=-1)
return nash_conv, tf.reduce_sum(loss_per_state * cf_reach_probabilities)
def minibatch_loss(self, policy_values, q_values, indices):
"""Returns the exploitability descent loss given a policy for a subset."""
evaluate_policy = _create_policy_evaluator(self.tabular_policy,
self.q_value_calculator)
nash_conv, real_q_values, cf_reach_probabilities = tf.py_func(
evaluate_policy, [policy_values], [tf.float64, tf.float64, tf.float64])
baseline = tf.reduce_sum(policy_values * q_values, axis=-1, keepdims=True)
advantage = q_values - baseline
# We now select a minibatch from the data to propagate our loss on.
policy_values = tf.gather(policy_values, indices)
advantage = tf.gather(advantage, indices)
cf_reach_probabilities = tf.gather(cf_reach_probabilities, indices)
# The rest is the same as before.
loss_per_state = -tf.reduce_sum(
policy_values * tf.stop_gradient(advantage), axis=-1)
q_value_loss = tf.reduce_mean((q_values - real_q_values)**2, axis=1)
q_value_loss = tf.gather(q_value_loss, indices)
q_value_loss = tf.reduce_sum(q_value_loss * cf_reach_probabilities)
policy_loss = tf.reduce_sum(loss_per_state * cf_reach_probabilities)
return nash_conv, q_value_loss, policy_loss
class Solver(object):
"""Solves a two-player game using exploitability descent."""
def __init__(self, game):
"""Initializes a solver for the given game."""
self._loss_calculator = LossCalculator(game)
self._logits = tf.Variable(
np.ones_like(
self._loss_calculator.tabular_policy.action_probability_array,
dtype=np.float64),
name="logits",
use_resource=True)
self._tabular_policy = self._loss_calculator.masked_softmax(self._logits)
self._nash_conv, self._loss = self._loss_calculator.loss(
self._tabular_policy)
self._learning_rate = tf.placeholder(tf.float64, (), name="learning_rate")
self._optimizer = tf.train.GradientDescentOptimizer(self._learning_rate)
self._optimizer_step = self._optimizer.minimize(self._loss)
def step(self, session, learning_rate):
"""Takes a single exploitability descent step."""
_, nash_conv = session.run([self._optimizer_step, self._nash_conv],
feed_dict={self._learning_rate: learning_rate})
return nash_conv
| open_spiel-master | open_spiel/python/algorithms/exploitability_descent.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.nash_averaging."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python.algorithms.nash_averaging import nash_averaging
import pyspiel
# transitive game test case
game_trans = pyspiel.create_matrix_game(
[[0.0, -1.0, -1.0], [1.0, 0.0, -1.0], [1.0, 1.0, 0.0]],
[[0.0, 1.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, -1.0, 0.0]])
eq_trans = np.asarray([0., 0., 1.])
value_trans = np.asarray([-1., -1., 0.])
# rock-paper-scissors test case
game_rps = pyspiel.create_matrix_game(
[[0.0, -1.0, 1.0], [1.0, 0.0, -1.0], [-1.0, 1.0, 0.0]],
[[0.0, 1.0, -1.0], [-1.0, 0.0, 1.0], [1.0, -1.0, 0.0]])
eq_rps = np.asarray([1 / 3, 1 / 3, 1 / 3])
value_rps = np.asarray([0., 0., 0.])
# game with one dominated strategy (AvA case)
p_mat0 = np.asarray([[0.0, 234., 34., -270.], [-234., 0., -38., -464.],
[-34., 38., 0., -270.], [270., 464., 270., 0.]])
game0 = pyspiel.create_matrix_game(p_mat0, -p_mat0)
dominated_idxs0 = [0, 1, 2]
# game with one dominated strategy (AvT case)
p_mat1 = np.asarray([
[0.0, 0.0, 0.0],
[1.0, 10.0, 100.0],
[2.0, 20.0, 200.0],
[3.0, 30.0, 300.0],
])
game1 = pyspiel.create_matrix_game(p_mat1, -p_mat1)
dominated_idxs1 = [0, 1, 2]
# game with one multiple dominant strategy (AvT case)
p_mat2 = np.asarray([
[0.0, 0.0, 0.0],
[1.0, 10.0, 100.0],
[2.0, 20.0, 200.0],
[3.0, 30.0, 300.0],
[3.0, 30.0, 300.0],
])
game2 = pyspiel.create_matrix_game(p_mat2, -p_mat2)
dom_idxs2 = [3, 4]
class NashAveragingTest(parameterized.TestCase):
@parameterized.named_parameters(
("transitive_game", game_trans, eq_trans, value_trans),
("rps_game", game_rps, eq_rps, value_rps),
)
def test_simple_games(self, game, eq, value):
maxent_nash, nash_avg_value = nash_averaging(game)
with self.subTest("probability"):
np.testing.assert_array_almost_equal(eq, maxent_nash.reshape(-1))
with self.subTest("value"):
np.testing.assert_array_almost_equal(value, nash_avg_value.reshape(-1))
@parameterized.named_parameters(
("game0", game0, dominated_idxs0),)
def test_ava_games_with_dominated_strategy(self, game, dominated_idxs):
maxent_nash, _ = nash_averaging(game)
with self.subTest("dominated strategies have zero Nash probs"):
for idx in dominated_idxs:
self.assertAlmostEqual(maxent_nash[idx].item(), 0.0)
@parameterized.named_parameters(
("game1", game1, dominated_idxs1),
)
def test_avt_games_with_dominated_strategy(self, game, dominated_idxs):
(agent_strategy, _), _ = nash_averaging(game, a_v_a=False)
with self.subTest("dominated strategies have zero Nash probs"):
for idx in dominated_idxs:
self.assertAlmostEqual(agent_strategy[idx].item(), 0.0)
@parameterized.named_parameters(
("game2", game2, dom_idxs2),
)
def test_avt_games_with_multiple_dominant_strategies(self, game, dom_idxs):
(agent_strategy, _), (agent_values, _) = nash_averaging(game, a_v_a=False)
with self.subTest("dominant strategies have equal Nash probs"):
for idx in dom_idxs:
self.assertAlmostEqual(agent_strategy[idx].item(), 1 / len(dom_idxs2))
with self.subTest("dominant strategies have equal Nash values"):
values = [agent_values[idx] for idx in dom_idxs]
self.assertAlmostEqual(np.abs(np.max(values) - np.min(values)), 0.0)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/nash_averaging_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An RL agent wrapper for the MCTS bot."""
import numpy as np
from open_spiel.python import rl_agent
import pyspiel
class MCTSAgent(rl_agent.AbstractAgent):
"""MCTS agent class.
Important note: this agent requires the environment to provide the full state
in its TimeStep objects. Hence, the environment must be created with the
use_full_state flag set to True, and the state must be serializable.
"""
def __init__(self, player_id, num_actions, mcts_bot, name="mcts_agent"):
assert num_actions > 0
self._player_id = player_id
self._mcts_bot = mcts_bot
self._num_actions = num_actions
def step(self, time_step, is_evaluation=False):
# If it is the end of the episode, don't select an action.
if time_step.last():
return
assert "serialized_state" in time_step.observations
_, state = pyspiel.deserialize_game_and_state(
time_step.observations["serialized_state"])
# Call the MCTS bot's step to get the action.
probs = np.zeros(self._num_actions)
action = self._mcts_bot.step(state)
probs[action] = 1.0
return rl_agent.StepOutput(action=action, probs=probs)
| open_spiel-master | open_spiel/python/algorithms/mcts_agent.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.policy_aggregator_joint."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import policy_aggregator_joint
class JointPolicyAggregatorTest(parameterized.TestCase):
@parameterized.named_parameters(
{
"testcase_name": "kuhn_poker",
"game_name": "kuhn_poker"
}, {
"testcase_name": "leduc_poker",
"game_name": "leduc_poker"
})
def test_policy_aggregation_random(self, game_name):
env = rl_environment.Environment(game_name)
num_players = 2
num_joint_policies = 4
joint_policies = [[
policy.UniformRandomPolicy(env.game) for _ in range(num_players)
] for _ in range(num_joint_policies)]
probabilities = np.ones(len(joint_policies))
probabilities /= np.sum(probabilities)
pol_ag = policy_aggregator_joint.JointPolicyAggregator(env.game)
aggr_policy = pol_ag.aggregate([0, 1], joint_policies, probabilities)
self.assertLen(aggr_policy.policies, num_players)
for player in range(num_players):
player_policy = aggr_policy.policies[player]
self.assertNotEmpty(player_policy)
for state_action_probs in player_policy.values():
probs = list(state_action_probs.values())
expected_prob = 1. / len(probs)
for prob in probs:
self.assertAlmostEqual(expected_prob, prob, places=10)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/policy_aggregator_joint_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3.
r"""Policy Gradient based agents implemented in TensorFlow.
This class is composed of three policy gradient (PG) algorithms:
- Q-based Policy Gradient (QPG): an "all-actions" advantage actor-critic
algorithm differing from A2C in that all action values are used to estimate the
policy gradient (as opposed to only using the action taken into account):
baseline = \sum_a pi_a * Q_a
loss = - \sum_a pi_a * (Q_a - baseline)
where (Q_a - baseline) is the usual advantage. QPG is also known as Mean
Actor-Critic (https://arxiv.org/abs/1709.00503).
- Regret policy gradient (RPG): a PG algorithm inspired by counterfactual regret
minimization (CFR). Unlike standard actor-critic methods (e.g. A2C), the loss is
defined purely in terms of thresholded regrets as follows:
baseline = \sum_a pi_a * Q_a
loss = regret = \sum_a relu(Q_a - baseline)
where gradients only flow through the action value (Q_a) part and are blocked on
the baseline part (which is trained separately by usual MSE loss).
The lack of negative sign in the front of the loss represents a switch from
gradient ascent on the score to descent on the loss.
- Regret Matching Policy Gradient (RMPG): inspired by regret-matching, the
policy gradient is by weighted by the thresholded regret:
baseline = \sum_a pi_a * Q_a
loss = - \sum_a pi_a * relu(Q_a - baseline)
These algorithms were published in NeurIPS 2018. Paper title: "Actor-Critic
Policy Optimization in Partially Observable Multiagent Environment", the paper
is available at: https://arxiv.org/abs/1810.09026.
- Advantage Actor Critic (A2C): The popular advantage actor critic (A2C)
algorithm. The algorithm uses the baseline (Value function) as a control variate
to reduce variance of the policy gradient. The loss is only computed for the
actions actually taken in the episode as opposed to a loss computed for all
actions in the variants above.
advantages = returns - baseline
loss = -log(pi_a) * advantages
The algorithm can be found in the textbook:
https://incompleteideas.net/book/RLbook2018.pdf under the chapter on
`Policy Gradients`.
See open_spiel/python/algorithms/losses/rl_losses_test.py for an example of the
loss computation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python import rl_agent
from open_spiel.python import simple_nets
from open_spiel.python.algorithms.losses import rl_losses
# Temporarily disable TF2 behavior until we update the code.
tf.disable_v2_behavior()
Transition = collections.namedtuple(
"Transition", "info_state action reward discount legal_actions_mask")
class PolicyGradient(rl_agent.AbstractAgent):
"""RPG Agent implementation in TensorFlow.
See open_spiel/python/examples/single_agent_catch.py for an usage example.
"""
def __init__(self,
session,
player_id,
info_state_size,
num_actions,
loss_str="a2c",
loss_class=None,
hidden_layers_sizes=(128,),
batch_size=16,
critic_learning_rate=0.01,
pi_learning_rate=0.001,
entropy_cost=0.01,
num_critic_before_pi=8,
additional_discount_factor=1.0,
max_global_gradient_norm=None,
optimizer_str="sgd"):
"""Initialize the PolicyGradient agent.
Args:
session: Tensorflow session.
player_id: int, player identifier. Usually its position in the game.
info_state_size: int, info_state vector size.
num_actions: int, number of actions per info state.
loss_str: string or None. If string, must be one of ["rpg", "qpg", "rm",
"a2c"] and defined in `_get_loss_class`. If None, a loss class must be
passed through `loss_class`. Defaults to "a2c".
loss_class: Class or None. If Class, it must define the policy gradient
loss. If None a loss class in a string format must be passed through
`loss_str`. Defaults to None.
hidden_layers_sizes: iterable, defines the neural network layers. Defaults
to (128,), which produces a NN: [INPUT] -> [128] -> ReLU -> [OUTPUT].
batch_size: int, batch size to use for Q and Pi learning. Defaults to 128.
critic_learning_rate: float, learning rate used for Critic (Q or V).
Defaults to 0.01.
pi_learning_rate: float, learning rate used for Pi. Defaults to 0.001.
entropy_cost: float, entropy cost used to multiply the entropy loss. Can
be set to None to skip entropy computation. Defaults to 0.01.
num_critic_before_pi: int, number of Critic (Q or V) updates before each
Pi update. Defaults to 8 (every 8th critic learning step, Pi also
learns).
additional_discount_factor: float, additional discount to compute returns.
Defaults to 1.0, in which case, no extra discount is applied. None that
users must provide *only one of* `loss_str` or `loss_class`.
max_global_gradient_norm: float or None, maximum global norm of a gradient
to which the gradient is shrunk if its value is larger. Defaults to
None.
optimizer_str: String defining which optimizer to use. Supported values
are {sgd, adam}. Defaults to sgd
"""
assert bool(loss_str) ^ bool(loss_class), "Please provide only one option."
self._kwargs = locals()
loss_class = loss_class if loss_class else self._get_loss_class(loss_str)
self._loss_class = loss_class
self.player_id = player_id
self._session = session
self._num_actions = num_actions
self._layer_sizes = hidden_layers_sizes
self._batch_size = batch_size
self._extra_discount = additional_discount_factor
self._num_critic_before_pi = num_critic_before_pi
self._episode_data = []
self._dataset = collections.defaultdict(list)
self._prev_time_step = None
self._prev_action = None
# Step counters
self._step_counter = 0
self._episode_counter = 0
self._num_learn_steps = 0
# Keep track of the last training loss achieved in an update step.
self._last_loss_value = None
# Placeholders
self._info_state_ph = tf.placeholder(
shape=[None, info_state_size], dtype=tf.float32, name="info_state_ph")
self._action_ph = tf.placeholder(
shape=[None], dtype=tf.int32, name="action_ph")
self._return_ph = tf.placeholder(
shape=[None], dtype=tf.float32, name="return_ph")
# Network
# activate final as we plug logit and qvalue heads afterwards.
self._net_torso = simple_nets.MLPTorso(info_state_size, self._layer_sizes)
torso_out = self._net_torso(self._info_state_ph)
torso_out_size = self._layer_sizes[-1]
self._policy_logits_layer = simple_nets.Linear(
torso_out_size,
self._num_actions,
activate_relu=False,
name="policy_head")
# Do not remove policy_logits_network. Even if it's not used directly here,
# other code outside this file refers to it.
self.policy_logits_network = simple_nets.Sequential(
[self._net_torso, self._policy_logits_layer])
self._policy_logits = self._policy_logits_layer(torso_out)
self._policy_probs = tf.nn.softmax(self._policy_logits)
self._savers = []
# Add baseline (V) head for A2C (or Q-head for QPG / RPG / RMPG)
if loss_class.__name__ == "BatchA2CLoss":
self._baseline_layer = simple_nets.Linear(
torso_out_size, 1, activate_relu=False, name="baseline")
self._baseline = tf.squeeze(self._baseline_layer(torso_out), axis=1)
else:
self._q_values_layer = simple_nets.Linear(
torso_out_size,
self._num_actions,
activate_relu=False,
name="q_values_head")
self._q_values = self._q_values_layer(torso_out)
# Critic loss
# Baseline loss in case of A2C
if loss_class.__name__ == "BatchA2CLoss":
self._critic_loss = tf.reduce_mean(
tf.losses.mean_squared_error(
labels=self._return_ph, predictions=self._baseline))
else:
# Q-loss otherwise.
action_indices = tf.stack(
[tf.range(tf.shape(self._q_values)[0]), self._action_ph], axis=-1)
value_predictions = tf.gather_nd(self._q_values, action_indices)
self._critic_loss = tf.reduce_mean(
tf.losses.mean_squared_error(
labels=self._return_ph, predictions=value_predictions))
if optimizer_str == "adam":
self._critic_optimizer = tf.train.AdamOptimizer(
learning_rate=critic_learning_rate)
elif optimizer_str == "sgd":
self._critic_optimizer = tf.train.GradientDescentOptimizer(
learning_rate=critic_learning_rate)
else:
raise ValueError("Not implemented, choose from 'adam' and 'sgd'.")
def minimize_with_clipping(optimizer, loss):
grads_and_vars = optimizer.compute_gradients(loss)
if max_global_gradient_norm is not None:
grads, variables = zip(*grads_and_vars)
grads, _ = tf.clip_by_global_norm(grads, max_global_gradient_norm)
grads_and_vars = list(zip(grads, variables))
return optimizer.apply_gradients(grads_and_vars)
self._critic_learn_step = minimize_with_clipping(self._critic_optimizer,
self._critic_loss)
# Pi loss
pg_class = loss_class(entropy_cost=entropy_cost)
if loss_class.__name__ == "BatchA2CLoss":
self._pi_loss = pg_class.loss(
policy_logits=self._policy_logits,
baseline=self._baseline,
actions=self._action_ph,
returns=self._return_ph)
else:
self._pi_loss = pg_class.loss(
policy_logits=self._policy_logits, action_values=self._q_values)
if optimizer_str == "adam":
self._pi_optimizer = tf.train.AdamOptimizer(
learning_rate=pi_learning_rate)
elif optimizer_str == "sgd":
self._pi_optimizer = tf.train.GradientDescentOptimizer(
learning_rate=pi_learning_rate)
self._pi_learn_step = minimize_with_clipping(self._pi_optimizer,
self._pi_loss)
self._loss_str = loss_str
self._initialize()
def _get_loss_class(self, loss_str):
if loss_str == "rpg":
return rl_losses.BatchRPGLoss
elif loss_str == "qpg":
return rl_losses.BatchQPGLoss
elif loss_str == "rm":
return rl_losses.BatchRMLoss
elif loss_str == "a2c":
return rl_losses.BatchA2CLoss
def _act(self, info_state, legal_actions):
# Make a singleton batch for NN compatibility: [1, info_state_size]
info_state = np.reshape(info_state, [1, -1])
policy_probs = self._session.run(
self._policy_probs, feed_dict={self._info_state_ph: info_state})
# Remove illegal actions, re-normalize probs
probs = np.zeros(self._num_actions)
probs[legal_actions] = policy_probs[0][legal_actions]
if sum(probs) != 0:
probs /= sum(probs)
else:
probs[legal_actions] = 1 / len(legal_actions)
action = np.random.choice(len(probs), p=probs)
return action, probs
def step(self, time_step, is_evaluation=False):
"""Returns the action to be taken and updates the network if needed.
Args:
time_step: an instance of rl_environment.TimeStep.
is_evaluation: bool, whether this is a training or evaluation call.
Defaults to False.
Returns:
A `rl_agent.StepOutput` containing the action probs and chosen action.
"""
# Act step: don't act at terminal info states or if its not our turn.
if (not time_step.last()) and (
time_step.is_simultaneous_move() or
self.player_id == time_step.current_player()):
info_state = time_step.observations["info_state"][self.player_id]
legal_actions = time_step.observations["legal_actions"][self.player_id]
action, probs = self._act(info_state, legal_actions)
else:
action = None
probs = []
if not is_evaluation:
self._step_counter += 1
# Add data points to current episode buffer.
if self._prev_time_step:
self._add_transition(time_step)
# Episode done, add to dataset and maybe learn.
if time_step.last():
self._add_episode_data_to_dataset()
self._episode_counter += 1
if len(self._dataset["returns"]) >= self._batch_size:
self._critic_update()
self._num_learn_steps += 1
if self._num_learn_steps % self._num_critic_before_pi == 0:
self._pi_update()
self._dataset = collections.defaultdict(list)
self._prev_time_step = None
self._prev_action = None
return
else:
self._prev_time_step = time_step
self._prev_action = action
return rl_agent.StepOutput(action=action, probs=probs)
def _full_checkpoint_name(self, checkpoint_dir, name):
checkpoint_filename = "_".join(
[self._loss_str, name, "pid" + str(self.player_id)])
return os.path.join(checkpoint_dir, checkpoint_filename)
def _latest_checkpoint_filename(self, name):
checkpoint_filename = "_".join(
[self._loss_str, name, "pid" + str(self.player_id)])
return checkpoint_filename + "_latest"
def save(self, checkpoint_dir):
for name, saver in self._savers:
path = saver.save(
self._session,
self._full_checkpoint_name(checkpoint_dir, name),
latest_filename=self._latest_checkpoint_filename(name))
logging.info("saved to path: %s", path)
def has_checkpoint(self, checkpoint_dir):
for name, _ in self._savers:
if tf.train.latest_checkpoint(
self._full_checkpoint_name(checkpoint_dir, name),
os.path.join(checkpoint_dir,
self._latest_checkpoint_filename(name))) is None:
return False
return True
def restore(self, checkpoint_dir):
for name, saver in self._savers:
full_checkpoint_dir = self._full_checkpoint_name(checkpoint_dir, name)
logging.info("Restoring checkpoint: %s", full_checkpoint_dir)
saver.restore(self._session, full_checkpoint_dir)
@property
def loss(self):
return (self._last_critic_loss_value, self._last_pi_loss_value)
def _add_episode_data_to_dataset(self):
"""Add episode data to the buffer."""
info_states = [data.info_state for data in self._episode_data]
rewards = [data.reward for data in self._episode_data]
discount = [data.discount for data in self._episode_data]
actions = [data.action for data in self._episode_data]
# Calculate returns
returns = np.array(rewards)
for idx in reversed(range(len(rewards[:-1]))):
returns[idx] = (
rewards[idx] +
discount[idx] * returns[idx + 1] * self._extra_discount)
# Add flattened data points to dataset
self._dataset["actions"].extend(actions)
self._dataset["returns"].extend(returns)
self._dataset["info_states"].extend(info_states)
self._episode_data = []
def _add_transition(self, time_step):
"""Adds intra-episode transition to the `_episode_data` buffer.
Adds the transition from `self._prev_time_step` to `time_step`.
Args:
time_step: an instance of rl_environment.TimeStep.
"""
assert self._prev_time_step is not None
legal_actions = (
self._prev_time_step.observations["legal_actions"][self.player_id])
legal_actions_mask = np.zeros(self._num_actions)
legal_actions_mask[legal_actions] = 1.0
transition = Transition(
info_state=(
self._prev_time_step.observations["info_state"][self.player_id][:]),
action=self._prev_action,
reward=time_step.rewards[self.player_id],
discount=time_step.discounts[self.player_id],
legal_actions_mask=legal_actions_mask)
self._episode_data.append(transition)
def _critic_update(self):
"""Compute the Critic loss on sampled transitions & perform a critic update.
Returns:
The average Critic loss obtained on this batch.
"""
# TODO(author3): illegal action handling.
critic_loss, _ = self._session.run(
[self._critic_loss, self._critic_learn_step],
feed_dict={
self._info_state_ph: self._dataset["info_states"],
self._action_ph: self._dataset["actions"],
self._return_ph: self._dataset["returns"],
})
self._last_critic_loss_value = critic_loss
return critic_loss
def _pi_update(self):
"""Compute the Pi loss on sampled transitions and perform a Pi update.
Returns:
The average Pi loss obtained on this batch.
"""
# TODO(author3): illegal action handling.
pi_loss, _ = self._session.run(
[self._pi_loss, self._pi_learn_step],
feed_dict={
self._info_state_ph: self._dataset["info_states"],
self._action_ph: self._dataset["actions"],
self._return_ph: self._dataset["returns"],
})
self._last_pi_loss_value = pi_loss
return pi_loss
def get_weights(self):
variables = [self._session.run(self._net_torso.variables)]
variables.append(self._session.run(self._policy_logits_layer.variables))
if self._loss_class.__name__ == "BatchA2CLoss":
variables.append(self._session.run(self._baseline_layer.variables))
else:
variables.append(self._session.run(self._q_values_layer.variables))
return variables
def _initialize(self):
initialization_torso = tf.group(
*[var.initializer for var in self._net_torso.variables])
initialization_logit = tf.group(
*[var.initializer for var in self._policy_logits_layer.variables])
if self._loss_class.__name__ == "BatchA2CLoss":
initialization_baseline_or_q_val = tf.group(
*[var.initializer for var in self._baseline_layer.variables])
else:
initialization_baseline_or_q_val = tf.group(
*[var.initializer for var in self._q_values_layer.variables])
initialization_crit_opt = tf.group(
*[var.initializer for var in self._critic_optimizer.variables()])
initialization_pi_opt = tf.group(
*[var.initializer for var in self._pi_optimizer.variables()])
self._session.run(
tf.group(*[
initialization_torso, initialization_logit,
initialization_baseline_or_q_val, initialization_crit_opt,
initialization_pi_opt
]))
self._savers = [("torso", tf.train.Saver(self._net_torso.variables)),
("policy_head",
tf.train.Saver(self._policy_logits_layer.variables))]
if self._loss_class.__name__ == "BatchA2CLoss":
self._savers.append(
("baseline", tf.train.Saver(self._baseline_layer.variables)))
else:
self._savers.append(
("q_head", tf.train.Saver(self._q_values_layer.variables)))
def copy_with_noise(self, sigma=0.0, copy_weights=True):
"""Copies the object and perturbates its network's weights with noise.
Args:
sigma: gaussian dropout variance term : Multiplicative noise following
(1+sigma*epsilon), epsilon standard gaussian variable, multiplies each
model weight. sigma=0 means no perturbation.
copy_weights: Boolean determining whether to copy model weights (True) or
just model hyperparameters.
Returns:
Perturbated copy of the model.
"""
_ = self._kwargs.pop("self", None)
copied_object = PolicyGradient(**self._kwargs)
net_torso = getattr(copied_object, "_net_torso")
policy_logits_layer = getattr(copied_object, "_policy_logits_layer")
if hasattr(copied_object, "_q_values_layer"):
q_values_layer = getattr(copied_object, "_q_values_layer")
if hasattr(copied_object, "_baseline_layer"):
baseline_layer = getattr(copied_object, "_baseline_layer")
if copy_weights:
copy_mlp_weights = tf.group(*[
va.assign(vb * (1 + sigma * tf.random.normal(vb.shape)))
for va, vb in zip(net_torso.variables, self._net_torso.variables)
])
self._session.run(copy_mlp_weights)
copy_logit_weights = tf.group(*[
va.assign(vb * (1 + sigma * tf.random.normal(vb.shape)))
for va, vb in zip(policy_logits_layer.variables,
self._policy_logits_layer.variables)
])
self._session.run(copy_logit_weights)
if hasattr(copied_object, "_q_values_layer"):
copy_q_value_weights = tf.group(*[
va.assign(vb * (1 + sigma * tf.random.normal(vb.shape))) for va, vb
in zip(q_values_layer.variables, self._q_values_layer.variables)
])
self._session.run(copy_q_value_weights)
if hasattr(copied_object, "_baseline_layer"):
copy_baseline_weights = tf.group(*[
va.assign(vb * (1 + sigma * tf.random.normal(vb.shape))) for va, vb
in zip(baseline_layer.variables, self._baseline_layer.variables)
])
self._session.run(copy_baseline_weights)
for var in getattr(copied_object, "_critic_optimizer").variables():
self._session.run(var.initializer)
for var in getattr(copied_object, "_pi_optimizer").variables():
self._session.run(var.initializer)
return copied_object
| open_spiel-master | open_spiel/python/algorithms/policy_gradient.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.response_graph_ucb."""
import itertools
from absl.testing import absltest
# pylint: disable=g-import-not-at-top
import matplotlib
matplotlib.use('agg') # switch backend for testing
import numpy as np
from open_spiel.python.algorithms import response_graph_ucb
from open_spiel.python.algorithms import response_graph_ucb_utils
class ResponseGraphUcbTest(absltest.TestCase):
def get_example_2x2_payoffs(self):
mean_payoffs = np.random.uniform(-1, 1, size=(2, 2, 2))
mean_payoffs[0, :, :] = np.asarray([[0.5, 0.85], [0.15, 0.5]])
mean_payoffs[1, :, :] = 1 - mean_payoffs[0, :, :]
return mean_payoffs
def test_sampler(self):
mean_payoffs = self.get_example_2x2_payoffs()
game = response_graph_ucb_utils.BernoulliGameSampler(
[2, 2], mean_payoffs, payoff_bounds=[-1., 1.])
game.p_max = mean_payoffs
game.means = mean_payoffs
# Parameters to run
sampling_methods = [
'uniform-exhaustive', 'uniform', 'valence-weighted', 'count-weighted'
]
conf_methods = [
'ucb-standard', 'ucb-standard-relaxed', 'clopper-pearson-ucb',
'clopper-pearson-ucb-relaxed'
]
per_payoff_confidence = [True, False]
time_dependent_delta = [True, False]
methods = list(itertools.product(sampling_methods,
conf_methods,
per_payoff_confidence,
time_dependent_delta))
max_total_interactions = 50
for m in methods:
r_ucb = response_graph_ucb.ResponseGraphUCB(
game,
exploration_strategy=m[0],
confidence_method=m[1],
delta=0.1,
ucb_eps=1e-1,
per_payoff_confidence=m[2],
time_dependent_delta=m[3])
_ = r_ucb.run(max_total_iterations=max_total_interactions)
def test_soccer_data_import(self):
response_graph_ucb_utils.get_soccer_data()
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/response_graph_ucb_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some helpers for normal-form games."""
import collections
import numpy as np
class StrategyAverager(object):
"""A helper class for averaging strategies for players."""
def __init__(self, num_players, action_space_shapes, window_size=None):
"""Initialize the average strategy helper object.
Args:
num_players (int): the number of players in the game,
action_space_shapes: an vector of n integers, where each element
represents the size of player i's actions space,
window_size (int or None): if None, computes the players' average
strategies over the entire sequence, otherwise computes the average
strategy over a finite-sized window of the k last entries.
"""
self._num_players = num_players
self._action_space_shapes = action_space_shapes
self._window_size = window_size
self._num = 0
if self._window_size is None:
self._sum_meta_strategies = [
np.zeros(action_space_shapes[p]) for p in range(num_players)
]
else:
self._window = collections.deque(maxlen=self._window_size)
def append(self, meta_strategies):
"""Append the meta-strategies to the averaged sequence.
Args:
meta_strategies: a list of strategies, one per player.
"""
if self._window_size is None:
for p in range(self._num_players):
self._sum_meta_strategies[p] += meta_strategies[p]
else:
self._window.append(meta_strategies)
self._num += 1
def average_strategies(self):
"""Return each player's average strategy.
Returns:
The averaged strategies, as a list containing one strategy per player.
"""
if self._window_size is None:
avg_meta_strategies = [
np.copy(x) for x in self._sum_meta_strategies
]
num_strategies = self._num
else:
avg_meta_strategies = [
np.zeros(self._action_space_shapes[p])
for p in range(self._num_players)
]
for i in range(len(self._window)):
for p in range(self._num_players):
avg_meta_strategies[p] += self._window[i][p]
num_strategies = len(self._window)
for p in range(self._num_players):
avg_meta_strategies[p] /= num_strategies
return avg_meta_strategies
| open_spiel-master | open_spiel/python/algorithms/nfg_utils.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of Information Set Monte Carlo Tree Search (IS-MCTS).
See Cowling, Powley, and Whitehouse 2011.
https://ieeexplore.ieee.org/document/6203567
"""
import copy
import enum
import numpy as np
import pyspiel
UNLIMITED_NUM_WORLD_SAMPLES = -1
UNEXPANDED_VISIT_COUNT = -1
TIE_TOLERANCE = 1e-5
class ISMCTSFinalPolicyType(enum.Enum):
"""A enumeration class for final ISMCTS policy type."""
NORMALIZED_VISITED_COUNT = 1
MAX_VISIT_COUNT = 2
MAX_VALUE = 3
class ChildSelectionPolicy(enum.Enum):
"""A enumeration class for children selection in ISMCTS."""
UCT = 1
PUCT = 2
class ChildInfo(object):
"""Child node information for the search tree."""
def __init__(self, visits, return_sum, prior):
self.visits = visits
self.return_sum = return_sum
self.prior = prior
def value(self):
return self.return_sum / self.visits
class ISMCTSNode(object):
"""Node data structure for the search tree."""
def __init__(self):
self.child_info = {}
self.total_visits = 0
self.prior_map = {}
class ISMCTSBot(pyspiel.Bot):
"""Adapted from the C++ implementation."""
def __init__(self,
game,
evaluator,
uct_c,
max_simulations,
max_world_samples=UNLIMITED_NUM_WORLD_SAMPLES,
random_state=None,
final_policy_type=ISMCTSFinalPolicyType.MAX_VISIT_COUNT,
use_observation_string=False,
allow_inconsistent_action_sets=False,
child_selection_policy=ChildSelectionPolicy.PUCT):
pyspiel.Bot.__init__(self)
self._game = game
self._evaluator = evaluator
self._uct_c = uct_c
self._max_simulations = max_simulations
self._max_world_samples = max_world_samples
self._final_policy_type = final_policy_type
self._use_observation_string = use_observation_string
self._allow_inconsistent_action_sets = allow_inconsistent_action_sets
self._nodes = {}
self._node_pool = []
self._root_samples = []
self._random_state = random_state or np.random.RandomState()
self._child_selection_policy = child_selection_policy
self._resampler_cb = None
def random_number(self):
return self._random_state.uniform()
def reset(self):
self._nodes = {}
self._node_pool = []
self._root_samples = []
def get_state_key(self, state):
if self._use_observation_string:
return state.current_player(), state.observation_string()
else:
return state.current_player(), state.information_state_string()
def run_search(self, state):
self.reset()
assert state.get_game().get_type(
).dynamics == pyspiel.GameType.Dynamics.SEQUENTIAL
assert state.get_game().get_type(
).information == pyspiel.GameType.Information.IMPERFECT_INFORMATION
legal_actions = state.legal_actions()
if len(legal_actions) == 1:
return [(legal_actions[0], 1.0)]
self._root_node = self.create_new_node(state)
assert self._root_node
root_infostate_key = self.get_state_key(state)
for _ in range(self._max_simulations):
# how to sample a pyspiel.state from another pyspiel.state?
sampled_root_state = self.sample_root_state(state)
assert root_infostate_key == self.get_state_key(sampled_root_state)
assert sampled_root_state
self.run_simulation(sampled_root_state)
if self._allow_inconsistent_action_sets: # when this happens?
legal_actions = state.legal_actions()
temp_node = self.filter_illegals(self._root_node, legal_actions)
assert temp_node.total_visits > 0
return self.get_final_policy(state, temp_node)
else:
return self.get_final_policy(state, self._root_node)
def step(self, state):
action_list, prob_list = zip(*self.run_search(state))
return self._random_state.choice(action_list, p=prob_list)
def get_policy(self, state):
return self.run_search(state)
def step_with_policy(self, state):
policy = self.get_policy(state)
action_list, prob_list = zip(*policy)
sampled_action = self._random_state.choice(action_list, p=prob_list)
return policy, sampled_action
def get_final_policy(self, state, node):
assert node
if self._final_policy_type == ISMCTSFinalPolicyType.NORMALIZED_VISITED_COUNT:
assert node.total_visits > 0
total_visits = node.total_visits
policy = [(action, child.visits / total_visits)
for action, child in node.child_info.items()]
elif self._final_policy_type == ISMCTSFinalPolicyType.MAX_VISIT_COUNT:
assert node.total_visits > 0
max_visits = -float('inf')
count = 0
for action, child in node.child_info.items():
if child.visits == max_visits:
count += 1
elif child.visits > max_visits:
max_visits = child.visits
count = 1
policy = [(action, 1. / count if child.visits == max_visits else 0.0)
for action, child in node.child_info.items()]
elif self._final_policy_type == ISMCTSFinalPolicyType.MAX_VALUE:
assert node.total_visits > 0
max_value = -float('inf')
count = 0
for action, child in node.child_info.items():
if child.value() == max_value:
count += 1
elif child.value() > max_value:
max_value = child.value()
count = 1
policy = [(action, 1. / count if child.value() == max_value else 0.0)
for action, child in node.child_info.items()]
policy_size = len(policy)
legal_actions = state.legal_actions()
if policy_size < len(legal_actions): # do we really need this step?
for action in legal_actions:
if action not in node.child_info:
policy.append((action, 0.0))
return policy
def sample_root_state(self, state):
if self._max_world_samples == UNLIMITED_NUM_WORLD_SAMPLES:
return self.resample_from_infostate(state)
elif len(self._root_samples) < self._max_world_samples:
self._root_samples.append(self.resample_from_infostate(state))
return self._root_samples[-1].clone()
elif len(self._root_samples) == self._max_world_samples:
idx = self._random_state.randint(len(self._root_samples))
return self._root_samples[idx].clone()
else:
raise pyspiel.SpielError(
'Case not handled (badly set max_world_samples..?)')
def resample_from_infostate(self, state):
if self._resampler_cb:
return self._resampler_cb(state, state.current_player())
else:
return state.resample_from_infostate(
state.current_player(), pyspiel.UniformProbabilitySampler(0., 1.))
def create_new_node(self, state):
infostate_key = self.get_state_key(state)
self._node_pool.append(ISMCTSNode())
node = self._node_pool[-1]
self._nodes[infostate_key] = node
node.total_visits = UNEXPANDED_VISIT_COUNT
return node
def set_resampler(self, cb):
self._resampler_cb = cb
def lookup_node(self, state):
if self.get_state_key(state) in self._nodes:
return self._nodes[self.get_state_key(state)]
return None
def lookup_or_create_node(self, state):
node = self.lookup_node(state)
if node:
return node
return self.create_new_node(state)
def filter_illegals(self, node, legal_actions):
new_node = copy.deepcopy(node)
for action, child in node.child_info.items():
if action not in legal_actions:
new_node.total_visits -= child.visits
del new_node.child_info[action]
return new_node
def expand_if_necessary(self, node, action):
if action not in node.child_info:
node.child_info[action] = ChildInfo(0.0, 0.0, node.prior_map[action])
def select_action_tree_policy(self, node, legal_actions):
if self._allow_inconsistent_action_sets:
temp_node = self.filter_illegals(node, legal_actions)
if temp_node.total_visits == 0:
action = legal_actions[self._random_state.randint(
len(legal_actions))] # prior?
self.expand_if_necessary(node, action)
return action
else:
return self.select_action(temp_node)
else:
return self.select_action(node)
def select_action(self, node):
candidates = []
max_value = -float('inf')
for action, child in node.child_info.items():
assert child.visits > 0
action_value = child.value()
if self._child_selection_policy == ChildSelectionPolicy.UCT:
action_value += (self._uct_c *
np.sqrt(np.log(node.total_visits)/child.visits))
elif self._child_selection_policy == ChildSelectionPolicy.PUCT:
action_value += (self._uct_c * child.prior *
np.sqrt(node.total_visits)/(1 + child.visits))
else:
raise pyspiel.SpielError('Child selection policy unrecognized.')
if action_value > max_value + TIE_TOLERANCE:
candidates = [action]
max_value = action_value
elif (action_value > max_value - TIE_TOLERANCE and
action_value < max_value + TIE_TOLERANCE):
candidates.append(action)
max_value = action_value
assert len(candidates) >= 1
return candidates[self._random_state.randint(len(candidates))]
def check_expand(self, node, legal_actions):
if not self._allow_inconsistent_action_sets and len(
node.child_info) == len(legal_actions):
return pyspiel.INVALID_ACTION
legal_actions_copy = copy.deepcopy(legal_actions)
self._random_state.shuffle(legal_actions_copy)
for action in legal_actions_copy:
if action not in node.child_info:
return action
return pyspiel.INVALID_ACTION
def run_simulation(self, state):
if state.is_terminal():
return state.returns()
elif state.is_chance_node():
action_list, prob_list = zip(*state.chance_outcomes())
chance_action = self._random_state.choice(action_list, p=prob_list)
state.apply_action(chance_action)
return self.run_simulation(state)
legal_actions = state.legal_actions()
cur_player = state.current_player()
node = self.lookup_or_create_node(state)
assert node
if node.total_visits == UNEXPANDED_VISIT_COUNT:
node.total_visits = 0
for action, prob in self._evaluator.prior(state):
node.prior_map[action] = prob
return self._evaluator.evaluate(state)
else:
chosen_action = self.check_expand(
node, legal_actions) # add one children at a time?
if chosen_action != pyspiel.INVALID_ACTION:
# check if all actions have been expanded, if not, select one?
# if yes, ucb?
self.expand_if_necessary(node, chosen_action)
else:
chosen_action = self.select_action_tree_policy(node, legal_actions)
assert chosen_action != pyspiel.INVALID_ACTION
node.total_visits += 1
node.child_info[chosen_action].visits += 1
state.apply_action(chosen_action)
returns = self.run_simulation(state)
node.child_info[chosen_action].return_sum += returns[cur_player]
return returns
| open_spiel-master | open_spiel/python/algorithms/ismcts.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for adidas."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import adidas
from open_spiel.python.algorithms.adidas_utils.games.big import ElFarol
from open_spiel.python.algorithms.adidas_utils.games.small import MatrixGame
from open_spiel.python.algorithms.adidas_utils.solvers.symmetric import qre_anneal as qre_anneal_sym
class AdidasTest(absltest.TestCase):
def test_adidas_on_prisoners_dilemma(self):
"""Tests ADIDAS on a 2-player prisoner's dilemma game."""
# pylint:disable=bad-whitespace
pt_r = np.array([[-1, -3],
[0, -2]])
# pylint:enable=bad-whitespace
# shift tensor to ensure positivity required if run adidas w/ Tsallis entrpy
pt_r -= pt_r.min()
pt_c = pt_r.T # symmetric game
pt = np.stack((pt_r, pt_c), axis=0).astype(float)
pt /= pt.max() # arbitrary design choice to upper bound entries to 1
game = MatrixGame(pt, seed=0)
# for games with more than 2 players, see adidas_utils/games/big.py
solver = qre_anneal_sym.Solver(temperature=100,
proj_grad=False, euclidean=True,
lrs=(1e-4, 1e-4), exp_thresh=0.01,
rnd_init=True, seed=0)
# note we set rnd_init to True which initializes adidas' initial
# approximation to nash to a random point on the simplex. if rnd_init is
# False, adidas is initialized to uniform which is the Nash equilibrium
# of the prisoner's dilemma, in which case adidas trivially solves this
# game in 0 iterations.
lle = adidas.ADIDAS(seed=0)
lle.approximate_nash(game, solver, sym=True, num_iterations=1,
num_samples=1, num_eval_samples=int(1e5),
approx_eval=True, exact_eval=True,
avg_trajectory=False)
self.assertLess(lle.results['exps_exact'][-1], 0.2)
def test_adidas_on_elfarol(self):
"""Test ADIDAS on a 10-player, symmetric El Farol bar game."""
game = ElFarol(n=10, c=0.7)
solver = qre_anneal_sym.Solver(temperature=100,
proj_grad=False, euclidean=False,
lrs=(1e-4, 1e-2), exp_thresh=0.01,
seed=0)
lle = adidas.ADIDAS(seed=0)
lle.approximate_nash(game, solver, sym=True, num_iterations=1,
num_samples=np.inf, num_eval_samples=int(1e5),
approx_eval=True, exact_eval=True,
avg_trajectory=False)
self.assertLess(lle.results['exps_exact'][-1], 0.5)
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/adidas_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural Fictitious Self-Play (NFSP) agent implemented in TensorFlow.
See the paper https://arxiv.org/abs/1603.01121 for more details.
"""
import collections
import contextlib
import enum
import os
import random
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python import rl_agent
from open_spiel.python import simple_nets
from open_spiel.python.algorithms import dqn
# Temporarily disable TF2 behavior until code is updated.
tf.disable_v2_behavior()
Transition = collections.namedtuple(
"Transition", "info_state action_probs legal_actions_mask")
MODE = enum.Enum("mode", "best_response average_policy")
class NFSP(rl_agent.AbstractAgent):
"""NFSP Agent implementation in TensorFlow.
See open_spiel/python/examples/kuhn_nfsp.py for an usage example.
"""
def __init__(self,
session,
player_id,
state_representation_size,
num_actions,
hidden_layers_sizes,
reservoir_buffer_capacity,
anticipatory_param,
batch_size=128,
rl_learning_rate=0.01,
sl_learning_rate=0.01,
min_buffer_size_to_learn=1000,
learn_every=64,
optimizer_str="sgd",
**kwargs):
"""Initialize the `NFSP` agent."""
self.player_id = player_id
self._session = session
self._num_actions = num_actions
self._layer_sizes = hidden_layers_sizes
self._batch_size = batch_size
self._learn_every = learn_every
self._anticipatory_param = anticipatory_param
self._min_buffer_size_to_learn = min_buffer_size_to_learn
self._reservoir_buffer = ReservoirBuffer(reservoir_buffer_capacity)
self._prev_timestep = None
self._prev_action = None
# Step counter to keep track of learning.
self._step_counter = 0
# Inner RL agent
kwargs.update({
"batch_size": batch_size,
"learning_rate": rl_learning_rate,
"learn_every": learn_every,
"min_buffer_size_to_learn": min_buffer_size_to_learn,
"optimizer_str": optimizer_str,
})
self._rl_agent = dqn.DQN(session, player_id, state_representation_size,
num_actions, hidden_layers_sizes, **kwargs)
# Keep track of the last training loss achieved in an update step.
self._last_rl_loss_value = lambda: self._rl_agent.loss
self._last_sl_loss_value = None
# Placeholders.
self._info_state_ph = tf.placeholder(
shape=[None, state_representation_size],
dtype=tf.float32,
name="info_state_ph")
self._action_probs_ph = tf.placeholder(
shape=[None, num_actions], dtype=tf.float32, name="action_probs_ph")
self._legal_actions_mask_ph = tf.placeholder(
shape=[None, num_actions],
dtype=tf.float32,
name="legal_actions_mask_ph")
# Average policy network.
self._avg_network = simple_nets.MLP(state_representation_size,
self._layer_sizes, num_actions)
self._avg_policy = self._avg_network(self._info_state_ph)
self._avg_policy_probs = tf.nn.softmax(self._avg_policy)
self._savers = [
("q_network", tf.train.Saver(self._rl_agent._q_network.variables)),
("avg_network", tf.train.Saver(self._avg_network.variables))
]
# Loss
self._loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
labels=tf.stop_gradient(self._action_probs_ph),
logits=self._avg_policy))
if optimizer_str == "adam":
optimizer = tf.train.AdamOptimizer(learning_rate=sl_learning_rate)
elif optimizer_str == "sgd":
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=sl_learning_rate)
else:
raise ValueError("Not implemented. Choose from ['adam', 'sgd'].")
self._learn_step = optimizer.minimize(self._loss)
self._sample_episode_policy()
@contextlib.contextmanager
def temp_mode_as(self, mode):
"""Context manager to temporarily overwrite the mode."""
previous_mode = self._mode
self._mode = mode
yield
self._mode = previous_mode
def get_step_counter(self):
return self._step_counter
def _sample_episode_policy(self):
if np.random.rand() < self._anticipatory_param:
self._mode = MODE.best_response
else:
self._mode = MODE.average_policy
def _act(self, info_state, legal_actions):
info_state = np.reshape(info_state, [1, -1])
action_values, action_probs = self._session.run(
[self._avg_policy, self._avg_policy_probs],
feed_dict={self._info_state_ph: info_state})
self._last_action_values = action_values[0]
# Remove illegal actions, normalize probs
probs = np.zeros(self._num_actions)
probs[legal_actions] = action_probs[0][legal_actions]
probs /= sum(probs)
action = np.random.choice(len(probs), p=probs)
return action, probs
@property
def mode(self):
return self._mode
@property
def loss(self):
return (self._last_sl_loss_value, self._last_rl_loss_value())
def step(self, time_step, is_evaluation=False):
"""Returns the action to be taken and updates the Q-networks if needed.
Args:
time_step: an instance of rl_environment.TimeStep.
is_evaluation: bool, whether this is a training or evaluation call.
Returns:
A `rl_agent.StepOutput` containing the action probs and chosen action.
"""
if self._mode == MODE.best_response:
agent_output = self._rl_agent.step(time_step, is_evaluation)
if not is_evaluation and not time_step.last():
self._add_transition(time_step, agent_output)
elif self._mode == MODE.average_policy:
# Act step: don't act at terminal info states.
if not time_step.last():
info_state = time_step.observations["info_state"][self.player_id]
legal_actions = time_step.observations["legal_actions"][self.player_id]
action, probs = self._act(info_state, legal_actions)
agent_output = rl_agent.StepOutput(action=action, probs=probs)
if self._prev_timestep and not is_evaluation:
self._rl_agent.add_transition(self._prev_timestep, self._prev_action,
time_step)
else:
raise ValueError("Invalid mode ({})".format(self._mode))
if not is_evaluation:
self._step_counter += 1
if self._step_counter % self._learn_every == 0:
self._last_sl_loss_value = self._learn()
# If learn step not triggered by rl policy, learn.
if self._mode == MODE.average_policy:
self._rl_agent.learn()
# Prepare for the next episode.
if time_step.last():
self._sample_episode_policy()
self._prev_timestep = None
self._prev_action = None
return
else:
self._prev_timestep = time_step
self._prev_action = agent_output.action
return agent_output
def _add_transition(self, time_step, agent_output):
"""Adds the new transition using `time_step` to the reservoir buffer.
Transitions are in the form (time_step, agent_output.probs, legal_mask).
Args:
time_step: an instance of rl_environment.TimeStep.
agent_output: an instance of rl_agent.StepOutput.
"""
legal_actions = time_step.observations["legal_actions"][self.player_id]
legal_actions_mask = np.zeros(self._num_actions)
legal_actions_mask[legal_actions] = 1.0
transition = Transition(
info_state=(time_step.observations["info_state"][self.player_id][:]),
action_probs=agent_output.probs,
legal_actions_mask=legal_actions_mask)
self._reservoir_buffer.add(transition)
def _learn(self):
"""Compute the loss on sampled transitions and perform a avg-network update.
If there are not enough elements in the buffer, no loss is computed and
`None` is returned instead.
Returns:
The average loss obtained on this batch of transitions or `None`.
"""
if (len(self._reservoir_buffer) < self._batch_size or
len(self._reservoir_buffer) < self._min_buffer_size_to_learn):
return None
transitions = self._reservoir_buffer.sample(self._batch_size)
info_states = [t.info_state for t in transitions]
action_probs = [t.action_probs for t in transitions]
legal_actions_mask = [t.legal_actions_mask for t in transitions]
loss, _ = self._session.run(
[self._loss, self._learn_step],
feed_dict={
self._info_state_ph: info_states,
self._action_probs_ph: action_probs,
self._legal_actions_mask_ph: legal_actions_mask,
})
return loss
def _full_checkpoint_name(self, checkpoint_dir, name):
checkpoint_filename = "_".join([name, "pid" + str(self.player_id)])
return os.path.join(checkpoint_dir, checkpoint_filename)
def _latest_checkpoint_filename(self, name):
checkpoint_filename = "_".join([name, "pid" + str(self.player_id)])
return checkpoint_filename + "_latest"
def save(self, checkpoint_dir):
"""Saves the average policy network and the inner RL agent's q-network.
Note that this does not save the experience replay buffers and should
only be used to restore the agent's policy, not resume training.
Args:
checkpoint_dir: directory where checkpoints will be saved.
"""
for name, saver in self._savers:
path = saver.save(
self._session,
self._full_checkpoint_name(checkpoint_dir, name),
latest_filename=self._latest_checkpoint_filename(name))
logging.info("Saved to path: %s", path)
def has_checkpoint(self, checkpoint_dir):
for name, _ in self._savers:
if tf.train.latest_checkpoint(
self._full_checkpoint_name(checkpoint_dir, name),
os.path.join(checkpoint_dir,
self._latest_checkpoint_filename(name))) is None:
return False
return True
def restore(self, checkpoint_dir):
"""Restores the average policy network and the inner RL agent's q-network.
Note that this does not restore the experience replay buffers and should
only be used to restore the agent's policy, not resume training.
Args:
checkpoint_dir: directory from which checkpoints will be restored.
"""
for name, saver in self._savers:
full_checkpoint_dir = self._full_checkpoint_name(checkpoint_dir, name)
logging.info("Restoring checkpoint: %s", full_checkpoint_dir)
saver.restore(self._session, full_checkpoint_dir)
class ReservoirBuffer(object):
"""Allows uniform sampling over a stream of data.
This class supports the storage of arbitrary elements, such as observation
tensors, integer actions, etc.
See https://en.wikipedia.org/wiki/Reservoir_sampling for more details.
"""
def __init__(self, reservoir_buffer_capacity):
self._reservoir_buffer_capacity = reservoir_buffer_capacity
self._data = []
self._add_calls = 0
def add(self, element):
"""Potentially adds `element` to the reservoir buffer.
Args:
element: data to be added to the reservoir buffer.
"""
if len(self._data) < self._reservoir_buffer_capacity:
self._data.append(element)
else:
idx = np.random.randint(0, self._add_calls + 1)
if idx < self._reservoir_buffer_capacity:
self._data[idx] = element
self._add_calls += 1
def sample(self, num_samples):
"""Returns `num_samples` uniformly sampled from the buffer.
Args:
num_samples: `int`, number of samples to draw.
Returns:
An iterable over `num_samples` random elements of the buffer.
Raises:
ValueError: If there are less than `num_samples` elements in the buffer
"""
if len(self._data) < num_samples:
raise ValueError("{} elements could not be sampled from size {}".format(
num_samples, len(self._data)))
return random.sample(self._data, num_samples)
def clear(self):
self._data = []
self._add_calls = 0
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
| open_spiel-master | open_spiel/python/algorithms/nfsp.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python base module for the implementations of Monte Carlo Counterfactual Regret Minimization."""
import numpy as np
from open_spiel.python import policy
REGRET_INDEX = 0
AVG_POLICY_INDEX = 1
class AveragePolicy(policy.Policy):
"""A policy object representing the average policy for MCCFR algorithms."""
def __init__(self, game, player_ids, infostates):
# Do not create a copy of the dictionary
# but work on the same object
super().__init__(game, player_ids)
self._infostates = infostates
def action_probabilities(self, state, player_id=None):
"""Returns the MCCFR average policy for a player in a state.
If the policy is not defined for the provided state, a uniform
random policy is returned.
Args:
state: A `pyspiel.State` object.
player_id: Optional, the player id for which we want an action. Optional
unless this is a simultaneous state at which multiple players can act.
Returns:
A `dict` of `{action: probability}` for the specified player in the
supplied state. If the policy is defined for the state, this
will contain the average MCCFR strategy defined for that state.
Otherwise, it will contain all legal actions, each with the same
probability, equal to 1 / num_legal_actions.
"""
if player_id is None:
player_id = state.current_player()
legal_actions = state.legal_actions()
info_state_key = state.information_state_string(player_id)
retrieved_infostate = self._infostates.get(info_state_key, None)
if retrieved_infostate is None:
return {a: 1 / len(legal_actions) for a in legal_actions}
avstrat = (
retrieved_infostate[AVG_POLICY_INDEX] /
retrieved_infostate[AVG_POLICY_INDEX].sum())
return {legal_actions[i]: avstrat[i] for i in range(len(legal_actions))}
class MCCFRSolverBase(object):
"""A base class for both outcome MCCFR and external MCCFR."""
def __init__(self, game):
self._game = game
self._infostates = {} # infostate keys -> [regrets, avg strat]
self._num_players = game.num_players()
def _lookup_infostate_info(self, info_state_key, num_legal_actions):
"""Looks up an information set table for the given key.
Args:
info_state_key: information state key (string identifier).
num_legal_actions: number of legal actions at this information state.
Returns:
A list of:
- the average regrets as a numpy array of shape [num_legal_actions]
- the average strategy as a numpy array of shape
[num_legal_actions].
The average is weighted using `my_reach`
"""
retrieved_infostate = self._infostates.get(info_state_key, None)
if retrieved_infostate is not None:
return retrieved_infostate
# Start with a small amount of regret and total accumulation, to give a
# uniform policy: this will get erased fast.
self._infostates[info_state_key] = [
np.ones(num_legal_actions, dtype=np.float64) / 1e6,
np.ones(num_legal_actions, dtype=np.float64) / 1e6,
]
return self._infostates[info_state_key]
def _add_regret(self, info_state_key, action_idx, amount):
self._infostates[info_state_key][REGRET_INDEX][action_idx] += amount
def _add_avstrat(self, info_state_key, action_idx, amount):
self._infostates[info_state_key][AVG_POLICY_INDEX][action_idx] += amount
def average_policy(self):
"""Computes the average policy, containing the policy for all players.
Returns:
An average policy instance that should only be used during
the lifetime of solver object.
"""
return AveragePolicy(self._game, list(range(self._num_players)),
self._infostates)
def _regret_matching(self, regrets, num_legal_actions):
"""Applies regret matching to get a policy.
Args:
regrets: numpy array of regrets for each action.
num_legal_actions: number of legal actions at this state.
Returns:
numpy array of the policy indexed by the index of legal action in the
list.
"""
positive_regrets = np.maximum(regrets,
np.zeros(num_legal_actions, dtype=np.float64))
sum_pos_regret = positive_regrets.sum()
if sum_pos_regret <= 0:
return np.ones(num_legal_actions, dtype=np.float64) / num_legal_actions
else:
return positive_regrets / sum_pos_regret
| open_spiel-master | open_spiel/python/algorithms/mccfr.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Q-values and reach probabilities computation."""
import collections
import numpy as np
_CalculatorReturn = collections.namedtuple(
"_CalculatorReturn",
[
# A list of size `num_players` of the root node value for each player.
"root_node_values",
# An array of shape `[len(info_states), game.num_distinct_actions()]`
# giving the value of each action. Will be zero for invalid actions.
"action_values",
# The player's counterfactual reach probability of this infostate when
# playing, as a list of shape [num_info_states].
"counterfactual_reach_probs",
# The reach probability of the current player at the infostates, as a
# list of shape [num_info_states].
# This is the product of the current player probs along *one* trajectory
# leading to this info-state (this number should be the same along
# any trajectory leading to this info-state because of perfect recall).
"player_reach_probs",
# A list of `len(info_states)` `[game.num_distinct_actions()]` numpy
# array so that v[s_index][a] = \sum_{h \in x} cfr_reach(h) * Q(h, a)
"sum_cfr_reach_by_action_value",
])
class TreeWalkCalculator(object):
r"""Class to orchestrate the calculation.
This performs a full history tree walk and computes several statistics,
available as attributes.
Attributes:
weighted_action_values: A dictionary mapping (player,information state
string) to a dictionary mapping each action to a vector of the sum of
(reward * prob) reward taking that action for each player. To get the
action-values, one will need to normalize by `info_state_prob`.
info_state_prob: A dictionary mapping (player,information state string) to
the reach probability of this info_state.
info_state_player_prob: Same as info_state_prob for the player reach
probability.
info_state_cf_prob: Same as info_state_prob for the counterfactual reach
probability to get to that state, i.e. the sum over histories, of the
product of the opponents probabilities of actions leading to the history.
info_state_chance_prob: Same as above, for the chance probability to get
into that state.
info_state_cf_prob_by_q_sum: A dictionary mapping (player,information state
string) to a vector of shape `[num_actions]`, that store for each action
the cumulative \sum_{h \in x} cfr_reach(h) * Q(h, a)
root_values: The values at the root node [for player 0, for player 1].
"""
def __init__(self, game):
if not game.get_type().provides_information_state_string:
raise ValueError("Only game which provide the information_state_string "
"are supported, as this is being used in the key to "
"identify states.")
self._game = game
self._num_players = game.num_players()
self._num_actions = game.num_distinct_actions()
self.weighted_action_values = None
self.info_state_prob = None
self.info_state_player_prob = None
self.info_state_cf_prob = None
self.info_state_chance_prob = None
self.info_state_cf_prob_by_q_sum = None
self.root_values = None
def _get_action_values(self, state, policies, reach_probabilities):
"""Computes the value of the state given the policies for both players.
Args:
state: The state to start analysis from.
policies: List of `policy.Policy` objects, one per player.
reach_probabilities: A numpy array of shape `[num_players + 1]`.
reach_probabilities[i] is the product of the player i action
probabilities along the current trajectory. Note that
reach_probabilities[-1] corresponds to the chance player. Initially, it
should be called with np.ones(self._num_players + 1) at the root node.
Returns:
The value of the root state to each player.
Side-effects - populates:
`self.weighted_action_values[(player, infostate)][action]`.
`self.info_state_prob[(player, infostate)]`.
`self.info_state_cf_prob[(player, infostate)]`.
`self.info_state_chance_prob[(player, infostate)]`.
We use `(player, infostate)` as a key in case the same infostate is shared
by multiple players, e.g. in a simultaneous-move game.
"""
if state.is_terminal():
return np.array(state.returns())
current_player = state.current_player()
is_chance = state.is_chance_node()
if not is_chance:
key = (current_player, state.information_state_string())
reach_prob = np.prod(reach_probabilities)
# We exclude both the current and the chance players.
opponent_probability = (
np.prod(reach_probabilities[:current_player]) *
np.prod(reach_probabilities[current_player + 1:-1]))
self.info_state_cf_prob[key] += (
reach_probabilities[-1] * opponent_probability)
self.info_state_prob[key] += reach_prob
self.info_state_chance_prob[key] += reach_probabilities[-1]
# Mind that we have "=" here and not "+=", because we just need to use
# the reach prob for the player for *any* of the histories leading to
# the current info_state (they are all equal because of perfect recall).
self.info_state_player_prob[key] = reach_probabilities[current_player]
value = np.zeros(len(policies))
if is_chance:
action_to_prob = dict(state.chance_outcomes())
else:
action_to_prob = policies[current_player].action_probabilities(state)
for action in state.legal_actions():
prob = action_to_prob.get(action, 0)
new_reach_probabilities = reach_probabilities.copy()
new_reach_probabilities[current_player] *= prob
child = state.child(action)
child_value = self._get_action_values(
child, policies, reach_probabilities=new_reach_probabilities)
if not is_chance:
self.weighted_action_values[key][action] += child_value * reach_prob
self.info_state_cf_prob_by_q_sum[key][action] += (
child_value[current_player] * opponent_probability *
reach_probabilities[-1])
value += child_value * prob
return value
def compute_all_states_action_values(self, policies):
"""Computes action values per state for the player.
The internal state is fully re-created when calling this method, thus it's
safe to use one object to perform several tree-walks using different
policies, and to extract the results using for example
`calculator.infor_state_prob` to take ownership of the dictionary.
Args:
policies: List of `policy.Policy` objects, one per player. As the policy
will be accessed using `policies[i]`, it can also be a dictionary
mapping player_id to a `policy.Policy` object.
"""
assert len(policies) == self._num_players
# Compute action values
self.weighted_action_values = collections.defaultdict(
lambda: collections.defaultdict(lambda: np.zeros(self._num_players)))
self.info_state_prob = collections.defaultdict(float)
self.info_state_player_prob = collections.defaultdict(float)
self.info_state_cf_prob = collections.defaultdict(float)
self.info_state_chance_prob = collections.defaultdict(float)
self.info_state_cf_prob_by_q_sum = collections.defaultdict(
lambda: np.zeros(self._num_actions))
self.root_values = self._get_action_values(
self._game.new_initial_state(),
policies,
reach_probabilities=np.ones(self._num_players + 1))
def _get_tabular_statistics(self, keys):
"""Returns tabular numpy arrays of the resulting stastistics.
Args:
keys: A list of the (player, info_state_str) keys to use to return the
tabular numpy array of results.
"""
# Collect normalized action values for each information state
action_values = []
cfrp = [] # Counterfactual reach probabilities
player_reach_probs = []
sum_cfr_reach_by_action_value = []
for key in keys:
player = key[0]
av = self.weighted_action_values[key]
norm_prob = self.info_state_prob[key]
action_values.append([(av[a][player] / norm_prob) if
(a in av and norm_prob > 0) else 0
for a in range(self._num_actions)])
cfrp.append(self.info_state_cf_prob[key])
player_reach_probs.append(self.info_state_player_prob[key])
sum_cfr_reach_by_action_value.append(
self.info_state_cf_prob_by_q_sum[key])
# Return values
return _CalculatorReturn(
root_node_values=self.root_values,
action_values=action_values,
counterfactual_reach_probs=cfrp,
player_reach_probs=player_reach_probs,
sum_cfr_reach_by_action_value=sum_cfr_reach_by_action_value)
def get_tabular_statistics(self, tabular_policy):
"""Returns tabular numpy arrays of the resulting stastistics.
This function should be called after `compute_all_states_action_values`.
Optionally, one can directly call the object to perform both actions.
Args:
tabular_policy: A `policy.TabularPolicy` object, used to get the ordering
of the states in the tabular numpy array.
"""
keys = []
for player_id, player_states in enumerate(tabular_policy.states_per_player):
keys += [(player_id, s) for s in player_states]
return self._get_tabular_statistics(keys)
def __call__(self, policies, tabular_policy):
"""Computes action values per state for the player.
The internal state is fully re-created when calling this method, thus it's
safe to use one object to perform several tree-walks using different
policies, and to extract the results using for example
`calculator.infor_state_prob` to take ownership of the dictionary.
Args:
policies: List of `policy.Policy` objects, one per player.
tabular_policy: A `policy.TabularPolicy` object, used to get the ordering
of the states in the tabular numpy array.
Returns:
A `_CalculatorReturn` namedtuple. See its docstring for the details.
"""
self.compute_all_states_action_values(policies)
return self.get_tabular_statistics(tabular_policy)
def get_root_node_values(self, policies):
"""Gets root values only.
This speeds up calculation in two ways:
1. It only searches nodes with positive probability.
2. It does not populate a large dictionary of meta information.
Args:
policies: List of `policy.Policy` objects, one per player.
Returns:
A numpy array of shape [num_players] of the root value.
"""
return self._get_action_values_only(
self._game.new_initial_state(),
policies,
reach_probabilities=np.ones(self._num_players + 1))
def _get_action_values_only(self, state, policies, reach_probabilities):
"""Computes the value of the state given the policies for both players.
Args:
state: The state to start analysis from.
policies: List of `policy.Policy` objects, one per player.
reach_probabilities: A numpy array of shape `[num_players + 1]`.
reach_probabilities[i] is the product of the player i action
probabilities along the current trajectory. Note that
reach_probabilities[-1] corresponds to the chance player. Initially, it
should be called with np.ones(self._num_players + 1) at the root node.
Returns:
A numpy array of shape [num_players] of the root value.
"""
if state.is_terminal():
return np.array(state.returns())
current_player = state.current_player()
is_chance = state.is_chance_node()
value = np.zeros(len(policies))
if is_chance:
action_to_prob = dict(state.chance_outcomes())
else:
action_to_prob = policies[current_player].action_probabilities(state)
for action in state.legal_actions():
prob = action_to_prob.get(action, 0)
# Do not follow tree down if there is zero probability.
if prob == 0.0:
continue
new_reach_probabilities = reach_probabilities.copy()
new_reach_probabilities[current_player] *= prob
child = state.child(action)
child_value = self._get_action_values_only(
child, policies, reach_probabilities=new_reach_probabilities)
value += child_value * prob
return value
| open_spiel-master | open_spiel/python/algorithms/action_value.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.action_value_vs_best_response.py."""
from absl.testing import absltest
import numpy as np
from open_spiel.python import policy
from open_spiel.python.algorithms import action_value_vs_best_response
import pyspiel
class ActionValuesVsBestResponseTest(absltest.TestCase):
def test_kuhn_poker_uniform(self):
game = pyspiel.load_game("kuhn_poker")
calc = action_value_vs_best_response.Calculator(game)
(expl, avvbr, cfrp,
player_reach_probs) = calc(0, policy.UniformRandomPolicy(game),
["0", "1", "2", "0pb", "1pb", "2pb"])
self.assertAlmostEqual(expl, 15 / 36)
np.testing.assert_allclose(
avvbr,
[
[-1.5, -2.0], # 0 (better to pass)
[-0.5, -0.5], # 1 (same)
[0.5, 1.5], # 2 (better to bet)
[-1.0, -2.0], # 0pb - losing
[-1.0, 0.0], # 1pb - best response is bet always
[-1.0, 2.0], # 2pb - winning
])
np.testing.assert_allclose(cfrp, [1 / 3, 1 / 3, 1 / 3, 1 / 3, 1 / 3, 1 / 3])
np.testing.assert_allclose([1, 1, 1, 1 / 2, 1 / 2, 1 / 2],
player_reach_probs)
def test_kuhn_poker_always_pass_p0(self):
game = pyspiel.load_game("kuhn_poker")
calc = action_value_vs_best_response.Calculator(game)
(expl, avvbr, cfrp, player_reach_probs) = calc(
0, policy.FirstActionPolicy(game),
["0", "1", "2", "0pb", "1pb", "2pb"])
self.assertAlmostEqual(expl, 1.)
np.testing.assert_allclose(
avvbr,
[
# Opening bet. If we pass, we always lose (pass-pass with op's K,
# otherwise pass-bet-pass).
# If we bet, we always win (because op's best response is to pass,
# because this is an unreachable state and we break ties in favour
# of the lowest action).
[-1, 1],
[-1, 1],
[-1, 1],
# We pass, opp bets into us. This can be either J or Q (K will pass
# because of the tie-break rules).
# So we are guaranteed to be winning with Q or K.
[-1, -2], # 0pb
[-1, 2], # 1pb
[-1, 2], # 2pb
])
np.testing.assert_allclose(cfrp, [1 / 3, 1 / 3, 1 / 3, 1 / 6, 1 / 6, 1 / 3])
np.testing.assert_allclose([1., 1., 1., 1., 1., 1.], player_reach_probs)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/action_value_vs_best_response_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Discounted CFR and Linear CFR algorithms.
This implements Discounted CFR and Linear CFR, from Noam Brown and Tuomas
Sandholm, 2019, "Solving Imperfect-Information Games via Discounted Regret
Minimization".
See https://arxiv.org/abs/1809.04040.
Linear CFR (LCFR), is identical to CFR, except on iteration `t` the updates to
the regrets and average strategies are given weight `t`. (Equivalently, one
could multiply the accumulated regret by t / (t+1) on each iteration.)
Discounted CFR(alpha, beta, gamma) is defined by, at iteration `t`:
- multiplying the positive accumulated regrets by (t^alpha / (t^alpha + 1))
- multiplying the negative accumulated regrets by (t^beta / (t^beta + 1))
- multiplying the contribution to the average strategy by t^gamma
WARNING: This was contributed on Github, and the OpenSpiel team is not aware it
has been verified we can reproduce the paper results.
"""
import numpy as np
from open_spiel.python.algorithms import cfr
_InfoStateNode = cfr._InfoStateNode # pylint: disable=protected-access
class _DCFRSolver(cfr._CFRSolver): # pylint: disable=protected-access
"""Discounted CFR."""
def __init__(self, game, alternating_updates, linear_averaging,
regret_matching_plus, alpha, beta, gamma):
super(_DCFRSolver, self).__init__(game, alternating_updates,
linear_averaging, regret_matching_plus)
self.alpha = alpha
self.beta = beta
self.gamma = gamma
# We build a list of the nodes for all players, which will be updated
# within `evaluate_and_update_policy`.
self._player_nodes = [[] for _ in range(self._num_players)]
for info_state in self._info_state_nodes.values():
self._player_nodes[info_state.player].append(info_state)
def _initialize_info_state_nodes(self, state):
"""Initializes info_state_nodes.
We override the parent function, to add the current player information
at the given node. This is used because we want to do updates for all nodes
for a specific player.
Args:
state: The current state in the tree walk. This should be the root node
when we call this function from a CFR solver.
"""
if state.is_terminal():
return
if state.is_chance_node():
for action, unused_action_prob in state.chance_outcomes():
self._initialize_info_state_nodes(state.child(action))
return
current_player = state.current_player()
info_state = state.information_state_string(current_player)
info_state_node = self._info_state_nodes.get(info_state)
if info_state_node is None:
legal_actions = state.legal_actions(current_player)
info_state_node = _InfoStateNode(
legal_actions=legal_actions,
index_in_tabular_policy=self._current_policy.state_lookup[info_state])
info_state_node.player = current_player
self._info_state_nodes[info_state] = info_state_node
for action in info_state_node.legal_actions:
self._initialize_info_state_nodes(state.child(action))
def _compute_counterfactual_regret_for_player(self, state, policies,
reach_probabilities, player):
"""Increments the cumulative regrets and policy for `player`.
Args:
state: The initial game state to analyze from.
policies: Unused. To be compatible with the `_CFRSolver` signature.
reach_probabilities: The probability for each player of reaching `state`
as a numpy array [prob for player 0, for player 1,..., for chance].
`player_reach_probabilities[player]` will work in all cases.
player: The 0-indexed player to update the values for. If `None`, the
update for all players will be performed.
Returns:
The utility of `state` for all players, assuming all players follow the
current policy defined by `self.Policy`.
"""
if state.is_terminal():
return np.asarray(state.returns())
if state.is_chance_node():
state_value = 0.0
for action, action_prob in state.chance_outcomes():
assert action_prob > 0
new_state = state.child(action)
new_reach_probabilities = reach_probabilities.copy()
new_reach_probabilities[-1] *= action_prob
state_value += action_prob * self._compute_counterfactual_regret_for_player(
new_state, policies, new_reach_probabilities, player)
return state_value
current_player = state.current_player()
info_state = state.information_state_string(current_player)
# No need to continue on this history branch as no update will be performed
# for any player.
# The value we return here is not used in practice. If the conditional
# statement is True, then the last taken action has probability 0 of
# occurring, so the returned value is not impacting the parent node value.
if all(reach_probabilities[:-1] == 0):
return np.zeros(self._num_players)
state_value = np.zeros(self._num_players)
# The utilities of the children states are computed recursively. As the
# regrets are added to the information state regrets for each state in that
# information state, the recursive call can only be made once per child
# state. Therefore, the utilities are cached.
children_utilities = {}
info_state_node = self._info_state_nodes[info_state]
if policies is None:
info_state_policy = self._get_infostate_policy(info_state)
else:
info_state_policy = policies[current_player](info_state)
for action in state.legal_actions():
action_prob = info_state_policy.get(action, 0.)
new_state = state.child(action)
new_reach_probabilities = reach_probabilities.copy()
new_reach_probabilities[current_player] *= action_prob
child_utility = self._compute_counterfactual_regret_for_player(
new_state,
policies=policies,
reach_probabilities=new_reach_probabilities,
player=player)
state_value += action_prob * child_utility
children_utilities[action] = child_utility
# If we are performing alternating updates, and the current player is not
# the current_player, we skip the cumulative values update.
# If we are performing simultaneous updates, we do update the cumulative
# values.
simulatenous_updates = player is None
if not simulatenous_updates and current_player != player:
return state_value
reach_prob = reach_probabilities[current_player]
counterfactual_reach_prob = (
np.prod(reach_probabilities[:current_player]) *
np.prod(reach_probabilities[current_player + 1:]))
state_value_for_player = state_value[current_player]
for action, action_prob in info_state_policy.items():
cfr_regret = counterfactual_reach_prob * (
children_utilities[action][current_player] - state_value_for_player)
info_state_node = self._info_state_nodes[info_state]
info_state_node.cumulative_regret[action] += cfr_regret
if self._linear_averaging:
info_state_node.cumulative_policy[action] += (
reach_prob * action_prob * (self._iteration**self.gamma))
else:
info_state_node.cumulative_policy[action] += reach_prob * action_prob
return state_value
def evaluate_and_update_policy(self):
"""Performs a single step of policy evaluation and policy improvement."""
self._iteration += 1
if self._alternating_updates:
for current_player in range(self._game.num_players()):
self._compute_counterfactual_regret_for_player(
self._root_node,
policies=None,
reach_probabilities=np.ones(self._game.num_players() + 1),
player=current_player)
for info_state in self._player_nodes[current_player]:
for action in info_state.cumulative_regret.keys():
if info_state.cumulative_regret[action] >= 0:
info_state.cumulative_regret[action] *= (
self._iteration**self.alpha /
(self._iteration**self.alpha + 1))
else:
info_state.cumulative_regret[action] *= (
self._iteration**self.beta / (self._iteration**self.beta + 1))
cfr._update_current_policy(self._current_policy, self._info_state_nodes) # pylint: disable=protected-access
class DCFRSolver(_DCFRSolver):
def __init__(self, game, alpha=3 / 2, beta=0, gamma=2):
super(DCFRSolver, self).__init__(
game,
regret_matching_plus=False,
alternating_updates=True,
linear_averaging=True,
alpha=alpha,
beta=beta,
gamma=gamma)
class LCFRSolver(_DCFRSolver):
def __init__(self, game):
super(LCFRSolver, self).__init__(
game,
regret_matching_plus=False,
alternating_updates=True,
linear_averaging=True,
alpha=1,
beta=1,
gamma=1)
| open_spiel-master | open_spiel/python/algorithms/discounted_cfr.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN agent implemented in TensorFlow."""
import collections
import os
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python import rl_agent
from open_spiel.python import simple_nets
from open_spiel.python.utils.replay_buffer import ReplayBuffer
# Temporarily disable TF2 behavior until code is updated.
tf.disable_v2_behavior()
Transition = collections.namedtuple(
"Transition",
"info_state action reward next_info_state is_final_step legal_actions_mask")
ILLEGAL_ACTION_LOGITS_PENALTY = -1e9
class DQN(rl_agent.AbstractAgent):
"""DQN Agent implementation in TensorFlow.
See open_spiel/python/examples/breakthrough_dqn.py for an usage example.
"""
def __init__(self,
session,
player_id,
state_representation_size,
num_actions,
hidden_layers_sizes=128,
replay_buffer_capacity=10000,
batch_size=128,
replay_buffer_class=ReplayBuffer,
learning_rate=0.01,
update_target_network_every=1000,
learn_every=10,
discount_factor=1.0,
min_buffer_size_to_learn=1000,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_duration=int(1e6),
optimizer_str="sgd",
loss_str="mse"):
"""Initialize the DQN agent."""
# This call to locals() is used to store every argument used to initialize
# the class instance, so it can be copied with no hyperparameter change.
self._kwargs = locals()
self.player_id = player_id
self._session = session
self._num_actions = num_actions
if isinstance(hidden_layers_sizes, int):
hidden_layers_sizes = [hidden_layers_sizes]
self._layer_sizes = hidden_layers_sizes
self._batch_size = batch_size
self._update_target_network_every = update_target_network_every
self._learn_every = learn_every
self._min_buffer_size_to_learn = min_buffer_size_to_learn
self._discount_factor = discount_factor
self._epsilon_start = epsilon_start
self._epsilon_end = epsilon_end
self._epsilon_decay_duration = epsilon_decay_duration
# TODO(author6) Allow for optional replay buffer config.
if not isinstance(replay_buffer_capacity, int):
raise ValueError("Replay buffer capacity not an integer.")
self._replay_buffer = replay_buffer_class(replay_buffer_capacity)
self._prev_timestep = None
self._prev_action = None
# Step counter to keep track of learning, eps decay and target network.
self._step_counter = 0
# Keep track of the last training loss achieved in an update step.
self._last_loss_value = None
# Create required TensorFlow placeholders to perform the Q-network updates.
self._info_state_ph = tf.placeholder(
shape=[None, state_representation_size],
dtype=tf.float32,
name="info_state_ph")
self._action_ph = tf.placeholder(
shape=[None], dtype=tf.int32, name="action_ph")
self._reward_ph = tf.placeholder(
shape=[None], dtype=tf.float32, name="reward_ph")
self._is_final_step_ph = tf.placeholder(
shape=[None], dtype=tf.float32, name="is_final_step_ph")
self._next_info_state_ph = tf.placeholder(
shape=[None, state_representation_size],
dtype=tf.float32,
name="next_info_state_ph")
self._legal_actions_mask_ph = tf.placeholder(
shape=[None, num_actions],
dtype=tf.float32,
name="legal_actions_mask_ph")
self._q_network = simple_nets.MLP(state_representation_size,
self._layer_sizes, num_actions)
self._q_values = self._q_network(self._info_state_ph)
self._target_q_network = simple_nets.MLP(state_representation_size,
self._layer_sizes, num_actions)
self._target_q_values = self._target_q_network(self._next_info_state_ph)
# Stop gradient to prevent updates to the target network while learning
self._target_q_values = tf.stop_gradient(self._target_q_values)
self._update_target_network = self._create_target_network_update_op(
self._q_network, self._target_q_network)
# Create the loss operations.
# Sum a large negative constant to illegal action logits before taking the
# max. This prevents illegal action values from being considered as target.
illegal_actions = 1 - self._legal_actions_mask_ph
illegal_logits = illegal_actions * ILLEGAL_ACTION_LOGITS_PENALTY
max_next_q = tf.reduce_max(
tf.math.add(tf.stop_gradient(self._target_q_values), illegal_logits),
axis=-1)
target = (
self._reward_ph +
(1 - self._is_final_step_ph) * self._discount_factor * max_next_q)
action_indices = tf.stack(
[tf.range(tf.shape(self._q_values)[0]), self._action_ph], axis=-1)
predictions = tf.gather_nd(self._q_values, action_indices)
self._savers = [("q_network", tf.train.Saver(self._q_network.variables)),
("target_q_network",
tf.train.Saver(self._target_q_network.variables))]
if loss_str == "mse":
loss_class = tf.losses.mean_squared_error
elif loss_str == "huber":
loss_class = tf.losses.huber_loss
else:
raise ValueError("Not implemented, choose from 'mse', 'huber'.")
self._loss = tf.reduce_mean(
loss_class(labels=target, predictions=predictions))
if optimizer_str == "adam":
self._optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
elif optimizer_str == "sgd":
self._optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate)
else:
raise ValueError("Not implemented, choose from 'adam' and 'sgd'.")
self._learn_step = self._optimizer.minimize(self._loss)
self._initialize()
def get_step_counter(self):
return self._step_counter
def step(self, time_step, is_evaluation=False, add_transition_record=True):
"""Returns the action to be taken and updates the Q-network if needed.
Args:
time_step: an instance of rl_environment.TimeStep.
is_evaluation: bool, whether this is a training or evaluation call.
add_transition_record: Whether to add to the replay buffer on this step.
Returns:
A `rl_agent.StepOutput` containing the action probs and chosen action.
"""
# Act step: don't act at terminal info states or if its not our turn.
if (not time_step.last()) and (
time_step.is_simultaneous_move() or
self.player_id == time_step.current_player()):
info_state = time_step.observations["info_state"][self.player_id]
legal_actions = time_step.observations["legal_actions"][self.player_id]
epsilon = self._get_epsilon(is_evaluation)
action, probs = self._epsilon_greedy(info_state, legal_actions, epsilon)
else:
action = None
probs = []
# Don't mess up with the state during evaluation.
if not is_evaluation:
self._step_counter += 1
if self._step_counter % self._learn_every == 0:
self._last_loss_value = self.learn()
if self._step_counter % self._update_target_network_every == 0:
self._session.run(self._update_target_network)
if self._prev_timestep and add_transition_record:
# We may omit record adding here if it's done elsewhere.
self.add_transition(self._prev_timestep, self._prev_action, time_step)
if time_step.last(): # prepare for the next episode.
self._prev_timestep = None
self._prev_action = None
return
else:
self._prev_timestep = time_step
self._prev_action = action
return rl_agent.StepOutput(action=action, probs=probs)
def add_transition(self, prev_time_step, prev_action, time_step):
"""Adds the new transition using `time_step` to the replay buffer.
Adds the transition from `self._prev_timestep` to `time_step` by
`self._prev_action`.
Args:
prev_time_step: prev ts, an instance of rl_environment.TimeStep.
prev_action: int, action taken at `prev_time_step`.
time_step: current ts, an instance of rl_environment.TimeStep.
"""
assert prev_time_step is not None
legal_actions = (time_step.observations["legal_actions"][self.player_id])
legal_actions_mask = np.zeros(self._num_actions)
legal_actions_mask[legal_actions] = 1.0
transition = Transition(
info_state=(
prev_time_step.observations["info_state"][self.player_id][:]),
action=prev_action,
reward=time_step.rewards[self.player_id],
next_info_state=time_step.observations["info_state"][self.player_id][:],
is_final_step=float(time_step.last()),
legal_actions_mask=legal_actions_mask)
self._replay_buffer.add(transition)
def _create_target_network_update_op(self, q_network, target_q_network):
"""Create TF ops copying the params of the Q-network to the target network.
Args:
q_network: A q-network object that implements provides the `variables`
property representing the TF variable list.
target_q_network: A target q-net object that provides the `variables`
property representing the TF variable list.
Returns:
A `tf.Operation` that updates the variables of the target.
"""
self._variables = q_network.variables[:]
self._target_variables = target_q_network.variables[:]
assert self._variables
assert len(self._variables) == len(self._target_variables)
return tf.group([
tf.assign(target_v, v)
for (target_v, v) in zip(self._target_variables, self._variables)
])
def _epsilon_greedy(self, info_state, legal_actions, epsilon):
"""Returns a valid epsilon-greedy action and valid action probs.
Action probabilities are given by a softmax over legal q-values.
Args:
info_state: hashable representation of the information state.
legal_actions: list of legal actions at `info_state`.
epsilon: float, probability of taking an exploratory action.
Returns:
A valid epsilon-greedy action and valid action probabilities.
"""
probs = np.zeros(self._num_actions)
if np.random.rand() < epsilon:
action = np.random.choice(legal_actions)
probs[legal_actions] = 1.0 / len(legal_actions)
else:
info_state = np.reshape(info_state, [1, -1])
q_values = self._session.run(
self._q_values, feed_dict={self._info_state_ph: info_state})[0]
legal_q_values = q_values[legal_actions]
action = legal_actions[np.argmax(legal_q_values)]
probs[action] = 1.0
return action, probs
def _get_epsilon(self, is_evaluation, power=1.0):
"""Returns the evaluation or decayed epsilon value."""
if is_evaluation:
return 0.0
decay_steps = min(self._step_counter, self._epsilon_decay_duration)
decayed_epsilon = (
self._epsilon_end + (self._epsilon_start - self._epsilon_end) *
(1 - decay_steps / self._epsilon_decay_duration)**power)
return decayed_epsilon
def learn(self):
"""Compute the loss on sampled transitions and perform a Q-network update.
If there are not enough elements in the buffer, no loss is computed and
`None` is returned instead.
Returns:
The average loss obtained on this batch of transitions or `None`.
"""
if (len(self._replay_buffer) < self._batch_size or
len(self._replay_buffer) < self._min_buffer_size_to_learn):
return None
transitions = self._replay_buffer.sample(self._batch_size)
info_states = [t.info_state for t in transitions]
actions = [t.action for t in transitions]
rewards = [t.reward for t in transitions]
next_info_states = [t.next_info_state for t in transitions]
are_final_steps = [t.is_final_step for t in transitions]
legal_actions_mask = [t.legal_actions_mask for t in transitions]
loss, _ = self._session.run(
[self._loss, self._learn_step],
feed_dict={
self._info_state_ph: info_states,
self._action_ph: actions,
self._reward_ph: rewards,
self._is_final_step_ph: are_final_steps,
self._next_info_state_ph: next_info_states,
self._legal_actions_mask_ph: legal_actions_mask,
})
return loss
def _full_checkpoint_name(self, checkpoint_dir, name):
checkpoint_filename = "_".join([name, "pid" + str(self.player_id)])
return os.path.join(checkpoint_dir, checkpoint_filename)
def _latest_checkpoint_filename(self, name):
checkpoint_filename = "_".join([name, "pid" + str(self.player_id)])
return checkpoint_filename + "_latest"
def save(self, checkpoint_dir):
"""Saves the q network and the target q-network.
Note that this does not save the experience replay buffers and should
only be used to restore the agent's policy, not resume training.
Args:
checkpoint_dir: directory where checkpoints will be saved.
"""
for name, saver in self._savers:
path = saver.save(
self._session,
self._full_checkpoint_name(checkpoint_dir, name),
latest_filename=self._latest_checkpoint_filename(name))
logging.info("Saved to path: %s", path)
def has_checkpoint(self, checkpoint_dir):
for name, _ in self._savers:
if tf.train.latest_checkpoint(
self._full_checkpoint_name(checkpoint_dir, name),
os.path.join(checkpoint_dir,
self._latest_checkpoint_filename(name))) is None:
return False
return True
def restore(self, checkpoint_dir):
"""Restores the q network and the target q-network.
Note that this does not restore the experience replay buffers and should
only be used to restore the agent's policy, not resume training.
Args:
checkpoint_dir: directory from which checkpoints will be restored.
"""
for name, saver in self._savers:
full_checkpoint_dir = self._full_checkpoint_name(checkpoint_dir, name)
logging.info("Restoring checkpoint: %s", full_checkpoint_dir)
saver.restore(self._session, full_checkpoint_dir)
@property
def q_values(self):
return self._q_values
@property
def replay_buffer(self):
return self._replay_buffer
@property
def info_state_ph(self):
return self._info_state_ph
@property
def loss(self):
return self._last_loss_value
@property
def prev_timestep(self):
return self._prev_timestep
@property
def prev_action(self):
return self._prev_action
@property
def step_counter(self):
return self._step_counter
def _initialize(self):
initialization_weights = tf.group(
*[var.initializer for var in self._variables])
initialization_target_weights = tf.group(
*[var.initializer for var in self._target_variables])
initialization_opt = tf.group(
*[var.initializer for var in self._optimizer.variables()])
self._session.run(
tf.group(*[
initialization_weights, initialization_target_weights,
initialization_opt,
]))
def get_weights(self):
variables = [self._session.run(self._q_network.variables)]
variables.append(self._session.run(self._target_q_network.variables))
return variables
def copy_with_noise(self, sigma=0.0, copy_weights=True):
"""Copies the object and perturbates it with noise.
Args:
sigma: gaussian dropout variance term : Multiplicative noise following
(1+sigma*epsilon), epsilon standard gaussian variable, multiplies each
model weight. sigma=0 means no perturbation.
copy_weights: Boolean determining whether to copy model weights (True) or
just model hyperparameters.
Returns:
Perturbated copy of the model.
"""
_ = self._kwargs.pop("self", None)
copied_object = DQN(**self._kwargs)
q_network = getattr(copied_object, "_q_network")
target_q_network = getattr(copied_object, "_target_q_network")
if copy_weights:
copy_weights = tf.group(*[
va.assign(vb * (1 + sigma * tf.random.normal(vb.shape)))
for va, vb in zip(q_network.variables, self._q_network.variables)
])
self._session.run(copy_weights)
copy_target_weights = tf.group(*[
va.assign(vb * (1 + sigma * tf.random.normal(vb.shape)))
for va, vb in zip(target_q_network.variables,
self._target_q_network.variables)
])
self._session.run(copy_target_weights)
return copied_object
| open_spiel-master | open_spiel/python/algorithms/dqn.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Useful sequence form functions used in the MMD implementation."""
import numpy as np
from open_spiel.python import policy
_DELIMITER = " -=- "
_EMPTY_INFOSET_KEYS = ["***EMPTY_INFOSET_P0***", "***EMPTY_INFOSET_P1***"]
_EMPTY_INFOSET_ACTION_KEYS = [
"***EMPTY_INFOSET_ACTION_P0***", "***EMPTY_INFOSET_ACTION_P1***"
]
def _get_isa_key(info_state, action):
return info_state + _DELIMITER + str(action)
def _get_action_from_key(isa_key):
_, action_str = isa_key.split(_DELIMITER)
return int(action_str)
def _get_infostate_from_key(isa_key):
assert not is_root(isa_key), "Cannot use this method for root nodes."
infostate, _ = isa_key.split(_DELIMITER)
return infostate
def is_root(key):
return True if key in _EMPTY_INFOSET_KEYS + _EMPTY_INFOSET_ACTION_KEYS else False
def construct_vars(game):
"""Construct useful sequence from variables from game.
Args:
game: The spiel game to solve (must be zero-sum, sequential, and have
chance node of deterministic or explicit stochastic).
Returns:
An 8 tuple of sequence form variables from _construct_vars by
recursively
traversing the game tree.
"""
initial_state = game.new_initial_state()
# initialize variables
infosets = [{_EMPTY_INFOSET_KEYS[0]: 0}, {_EMPTY_INFOSET_KEYS[1]: 0}]
infoset_actions_to_seq = [{
_EMPTY_INFOSET_ACTION_KEYS[0]: 0
}, {
_EMPTY_INFOSET_ACTION_KEYS[1]: 0
}]
infoset_action_maps = [{
_EMPTY_INFOSET_KEYS[0]: [_EMPTY_INFOSET_ACTION_KEYS[0]]
}, {
_EMPTY_INFOSET_KEYS[1]: [_EMPTY_INFOSET_ACTION_KEYS[1]]
}]
# infoset_action_maps = [{}, {}]
payoff_dict = dict()
infoset_parent_map = [{
_EMPTY_INFOSET_ACTION_KEYS[0]: None
}, {
_EMPTY_INFOSET_ACTION_KEYS[1]: None
}]
infoset_actions_children = [{
_EMPTY_INFOSET_ACTION_KEYS[0]: []
}, {
_EMPTY_INFOSET_ACTION_KEYS[1]: []
}]
_construct_vars(initial_state, infosets, infoset_actions_to_seq,
infoset_action_maps, infoset_parent_map, 1.0,
_EMPTY_INFOSET_KEYS[:], _EMPTY_INFOSET_ACTION_KEYS[:],
payoff_dict, infoset_actions_children)
payoff_mat = _construct_numpy_vars(payoff_dict, infoset_actions_to_seq)
return (infosets, infoset_actions_to_seq,
infoset_action_maps, infoset_parent_map,
payoff_mat, infoset_actions_children)
def uniform_random_seq(game, infoset_actions_to_seq):
"""Generate uniform random sequence.
The sequence generated is equivalent to a uniform random tabular policy.
Args:
game: the spiel game to solve (must be zero-sum, sequential, and have
chance mode of deterministic or explicit stochastic).
infoset_actions_to_seq: a list of dicts, one per player, that maps a
string of (infostate, action) pair to an id.
Returns:
A list of NumPy arrays, one for each player.
"""
policies = policy.TabularPolicy(game)
initial_state = game.new_initial_state()
sequences = [
np.ones(len(infoset_actions_to_seq[0])),
np.ones(len(infoset_actions_to_seq[1]))
]
_policy_to_sequence(initial_state, policies, sequences,
infoset_actions_to_seq, [1, 1])
return sequences
def _construct_vars(state, infosets, infoset_actions_to_seq,
infoset_action_maps, infoset_parent_map, chance_reach,
parent_is_keys, parent_isa_keys, payoff_dict,
infoset_actions_children):
"""Recursively builds maps and the sequence form payoff matrix.
Args:
state: pyspiel (OpenSpiel) state
infosets: a list of dicts, one per player, that maps infostate to an id.
The dicts are filled by this function and should initially only
contain root values.
infoset_actions_to_seq: a list of dicts, one per player, that maps a
string of (infostate, action) pair to an id. The dicts are filled by
this function and should inirially only contain the root values.
infoset_action_maps: a list of dicts, one per player, that maps each
info_state to a list of (infostate, action) string.
infoset_parent_map: a list of dicts, one per player, that maps each
info_state to an (infostate, action) string.
chance_reach: the contribution of chance's reach probability (should
start at 1).
parent_is_keys: a list of parent information state keys for this state
parent_isa_keys: a list of parent (infostate, action) keys
payoff_dict: a dict that maps ((infostate, action), (infostate, action))
to the chance weighted reward
infoset_actions_children: a list of dicts, one for each player, mapping
(infostate, action) keys to reachable infostates for each player
"""
if state.is_terminal():
returns = state.returns()
matrix_index = (parent_isa_keys[0], parent_isa_keys[1])
payoff_dict.setdefault(matrix_index, 0)
# note the payoff matrix A is for the min max problem x.T @ A y
# where x is player 0 in openspiel
payoff_dict[matrix_index] += -returns[0] * chance_reach
return
if state.is_chance_node():
for action, prob in state.chance_outcomes():
new_state = state.child(action)
_construct_vars(new_state, infosets, infoset_actions_to_seq,
infoset_action_maps, infoset_parent_map,
prob * chance_reach, parent_is_keys, parent_isa_keys,
payoff_dict, infoset_actions_children)
return
player = state.current_player()
info_state = state.information_state_string(player)
legal_actions = state.legal_actions(player)
# Add to the infostate maps
if info_state not in infosets[player]:
infosets[player][info_state] = len(infosets[player])
if info_state not in infoset_action_maps[player]:
infoset_action_maps[player][info_state] = []
# Add to infoset to parent infoset action map
if info_state not in infoset_parent_map[player]:
infoset_parent_map[player][info_state] = parent_isa_keys[player]
# add as child to parent
if parent_isa_keys[player] in infoset_actions_children[player]:
if info_state not in infoset_actions_children[player][
parent_isa_keys[player]]:
infoset_actions_children[player][parent_isa_keys[player]].append(
info_state)
else:
infoset_actions_children[player][parent_isa_keys[player]] = [info_state]
new_parent_is_keys = parent_is_keys[:]
new_parent_is_keys[player] = info_state
for action in legal_actions:
isa_key = _get_isa_key(info_state, action)
if isa_key not in infoset_actions_to_seq[player]:
infoset_actions_to_seq[player][isa_key] = len(
infoset_actions_to_seq[player])
if isa_key not in infoset_action_maps[player][info_state]:
infoset_action_maps[player][info_state].append(isa_key)
new_parent_isa_keys = parent_isa_keys[:]
new_parent_isa_keys[player] = isa_key
new_state = state.child(action)
_construct_vars(new_state, infosets, infoset_actions_to_seq,
infoset_action_maps, infoset_parent_map, chance_reach,
new_parent_is_keys, new_parent_isa_keys, payoff_dict,
infoset_actions_children)
def _construct_numpy_vars(payoff_dict, infoset_actions_to_seq):
"""Convert sequence form payoff dict to numpy array.
Args:
payoff_dict: a dict that maps ((infostate, action), (infostate, action))
to the chance weighted reward.
infoset_actions_to_seq: a list of dicts, one per player, that maps a
string of (infostate, action) pair to an id.
Returns:
A numpy array corresponding to the chance weighted rewards
i.e. the sequence form payoff matrix.
"""
sequence_sizes = (len(infoset_actions_to_seq[0]),
len(infoset_actions_to_seq[1]))
payoff_mat = np.zeros(sequence_sizes)
for p1_sequence, i in infoset_actions_to_seq[0].items():
for p2_sequence, j in infoset_actions_to_seq[1].items():
payoff_mat[i, j] = payoff_dict.get((p1_sequence, p2_sequence), 0)
return payoff_mat
def sequence_to_policy(sequences, game, infoset_actions_to_seq,
infoset_action_maps):
"""Convert sequence form policies to the realization-equivalent tabular ones.
Args:
sequences: list of two sequence form policies, one for each player.
game: a spiel game with two players.
infoset_actions_to_seq: a list of dicts, one per player, that maps a
string of (infostate, action) pair to an id.
infoset_action_maps: a list of dicts, one per player, that maps each
info_state to a list of (infostate, action) string.
Returns:
A TabularPolicy object.
"""
policies = policy.TabularPolicy(game)
for player in range(2):
for info_state in infoset_action_maps[player]:
if is_root(info_state):
continue
state_policy = policies.policy_for_key(info_state)
total_weight = 0
num_actions = 0
for isa_key in infoset_action_maps[player][info_state]:
total_weight += sequences[player][infoset_actions_to_seq[player]
[isa_key]]
num_actions += 1
unif_pr = 1.0 / num_actions
for isa_key in infoset_action_maps[player][info_state]:
rel_weight = sequences[player][infoset_actions_to_seq[player][isa_key]]
_, action_str = isa_key.split(_DELIMITER)
action = int(action_str)
pr_action = rel_weight / total_weight if total_weight > 0 else unif_pr
state_policy[action] = pr_action
return policies
def policy_to_sequence(game, policies, infoset_actions_to_seq):
"""Converts a TabularPolicy object for a two-player game.
The converted policy is its realization-equivalent sequence form one.
Args:
game: a two-player open spiel game.
policies: a TabularPolicy object.
infoset_actions_to_seq: a list of dicts, one per player, that maps a
string of (infostate, action) pair to an id.
Returns:
A list of numpy arrays, one for each player.
"""
initial_state = game.new_initial_state()
sequences = [
np.ones(len(infoset_actions_to_seq[0])),
np.ones(len(infoset_actions_to_seq[1]))
]
_policy_to_sequence(initial_state, policies, sequences,
infoset_actions_to_seq, [1, 1])
return sequences
def _policy_to_sequence(state, policies, sequences, infoset_actions_to_seq,
parent_seq_val):
"""Converts a TabularPolicy object to its equivalent sequence form.
This method modifies the sequences inplace and should not be called directly.
Args:
state: an openspiel state.
policies: a TabularPolicy object.
sequences: list of numpy arrays to be modified.
infoset_actions_to_seq: a list of dicts, one per player, that maps a
string of (infostate, action) pair to an id.
parent_seq_val: list of parent sequence values, this method should be
called with initial value of [1,1].
"""
if state.is_terminal():
return
if state.is_chance_node():
for action, _ in state.chance_outcomes():
new_state = state.child(action)
_policy_to_sequence(new_state, policies, sequences,
infoset_actions_to_seq, parent_seq_val)
return
player = state.current_player()
info_state = state.information_state_string(player)
legal_actions = state.legal_actions(player)
state_policy = policies.policy_for_key(info_state)
for action in legal_actions:
isa_key = _get_isa_key(info_state, action)
# update sequence form
sequences[player][infoset_actions_to_seq[player]
[isa_key]] = parent_seq_val[player] * state_policy[action]
new_parent_seq_val = parent_seq_val[:]
new_parent_seq_val[player] = sequences[player][
infoset_actions_to_seq[player][isa_key]]
new_state = state.child(action)
_policy_to_sequence(new_state, policies, sequences, infoset_actions_to_seq,
new_parent_seq_val)
| open_spiel-master | open_spiel/python/algorithms/sequence_form_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Python implementation of the magnetic mirror descent (MMD) algorithm.
The algorithm operated over the sequence-from with dilated entropy.
See https://arxiv.org/abs/2206.05825.
One iteration of MMD consists of:
1) Compute gradients of dilated entropy
and payoffs for current sequence form policies.
2) Compute behavioural form policy starting from the bottom
of the tree and updating gradients of parent nodes along the way.
3) Convert behavioural form policy to equivalent sequence form policy.
The last sequence form policy converges linearly (exponentially fast)
to a \alpha-reduced normal-form QRE.
"""
import copy
import warnings
import numpy as np
from scipy import stats as scipy_stats
from open_spiel.python import policy
from open_spiel.python.algorithms.sequence_form_utils import _EMPTY_INFOSET_ACTION_KEYS
from open_spiel.python.algorithms.sequence_form_utils import _EMPTY_INFOSET_KEYS
from open_spiel.python.algorithms.sequence_form_utils import _get_action_from_key
from open_spiel.python.algorithms.sequence_form_utils import construct_vars
from open_spiel.python.algorithms.sequence_form_utils import is_root
from open_spiel.python.algorithms.sequence_form_utils import policy_to_sequence
from open_spiel.python.algorithms.sequence_form_utils import sequence_to_policy
from open_spiel.python.algorithms.sequence_form_utils import uniform_random_seq
import pyspiel
def neg_entropy(probs):
return -scipy_stats.entropy(probs)
def softmax(x):
unnormalized = np.exp(x - np.max(x))
return unnormalized / np.sum(unnormalized)
def divergence(x, y, psi_x, psi_y, grad_psi_y):
"""Compute Bregman divergence between x and y, B_psi(x;y).
Args:
x: Numpy array.
y: Numpy array.
psi_x: Value of psi evaluated at x.
psi_y: Value of psi evaluated at y.
grad_psi_y: Gradient of psi evaluated at y.
Returns:
Scalar.
"""
return psi_x - psi_y - np.dot(grad_psi_y, x - y)
def dilated_dgf_divergence(mmd_1, mmd_2):
"""Bregman divergence between two MMDDilatedEnt objects.
The value is equivalent to a sum of two Bregman divergences
over the sequence form, one for each player.
Args:
mmd_1: MMDDilatedEnt Object
mmd_2: MMDDilatedEnt Object
Returns:
Scalar.
"""
dgf_values = [mmd_1.dgf_eval(), mmd_2.dgf_eval()]
dgf_grads = mmd_2.dgf_grads()
div = 0
for player in range(2):
div += divergence(mmd_1.sequences[player], mmd_2.sequences[player],
dgf_values[0][player], dgf_values[1][player],
dgf_grads[player])
return div
class MMDDilatedEnt(object):
r"""Implements Magnetic Mirror Descent (MMD) with Dilated Entropy.
The implementation uses the sequence form representation.
The policies converge to a \alpha-reduced normal form QRE of a
two-player zero-sum extensive-form game. If \alpha is set
to zero then the method is equivalent to mirror descent ascent
over the sequence form with dilated entropy and the policies
will converge on average to a nash equilibrium with
the appropriate stepsize schedule (or approximate equilirbrium
for fixed stepsize).
The main iteration loop is implemented in `update_sequences`:
```python
game = pyspiel.load_game("game_name")
mmd = MMDDilatedEnt(game, alpha=0.1)
for i in range(num_iterations):
mmd.update_sequences()
```
The gap in the regularized game (i.e. 2x exploitability) converges
to zero and can be computed:
```python
gap = mmd.get_gap()
```
The policy (i.e. behavioural form policy) can be retrieved:
```python
policies = mmd.get_policies()
```
The average sequences and policies can be retrieved:
```python
avg_sequences = mmd.get_avg_sequences()
avg_policies = mmd.get_avg_policies()
```
"""
empy_state_action_keys = _EMPTY_INFOSET_ACTION_KEYS[:]
empty_infoset_keys = _EMPTY_INFOSET_KEYS[:]
def __init__(self, game, alpha, stepsize=None):
"""Initialize the solver object.
Args:
game: a zeros-um spiel game with two players.
alpha: weight of dilated entropy regularization. If alpha > 0 MMD
will converge to an alpha-QRE. If alpha = 0 mmd will converge to
Nash on average.
stepsize: MMD stepsize. Will be set automatically if None.
"""
assert game.num_players() == 2
assert game.get_type().utility == pyspiel.GameType.Utility.ZERO_SUM
assert game.get_type().dynamics == pyspiel.GameType.Dynamics.SEQUENTIAL
assert (game.get_type().chance_mode
== pyspiel.GameType.ChanceMode.DETERMINISTIC or
game.get_type().chance_mode
== pyspiel.GameType.ChanceMode.EXPLICIT_STOCHASTIC)
assert alpha >= 0
self.game = game
self.alpha = float(alpha)
(self.infosets, self.infoset_actions_to_seq, self.infoset_action_maps,
self.infoset_parent_map, self.payoff_mat,
self.infoset_actions_children) = construct_vars(game)
if stepsize is not None:
self.stepsize = stepsize
else:
self.stepsize = self.alpha / (np.max(np.abs(self.payoff_mat))**2)
if self.stepsize == 0.:
warnings.warn("MMD stepsize is 0, probably because alpha = 0.")
self.sequences = uniform_random_seq(game, self.infoset_actions_to_seq)
self.avg_sequences = copy.deepcopy(self.sequences)
self.iteration_count = 1
def get_parent_seq(self, player, infostate):
"""Looks up the parent sequence value for a given infostate.
Args:
player: player number, either 0 or 1.
infostate: infostate id string.
Returns:
Scalar.
"""
parent_isa_key = self.infoset_parent_map[player][infostate]
seq_id = self.infoset_actions_to_seq[player][parent_isa_key]
parent_seq = self.sequences[player][seq_id]
return parent_seq
def get_infostate_seq(self, player, infostate):
"""Gets vector of sequence form values corresponding to a given infostate.
Args:
player: player number, either 0 or 1.
infostate: infostate id string.
Returns:
Numpy array.
"""
seq_idx = [
self.infoset_actions_to_seq[player][isa_key]
for isa_key in self.infoset_action_maps[player][infostate]
]
seqs = np.array([self.sequences[player][idx] for idx in seq_idx])
return seqs
def dgf_eval(self):
"""Computes the value of dilated entropy for current sequences.
Returns:
List of values, one for each player.
"""
dgf_value = [0., 0.]
for player in range(2):
for infostate in self.infosets[player]:
if is_root(infostate):
continue
parent_seq = self.get_parent_seq(player, infostate)
if parent_seq > 0:
children_seq = self.get_infostate_seq(player, infostate)
dgf_value[player] += parent_seq * neg_entropy(
children_seq / parent_seq)
return dgf_value
def dgf_grads(self):
"""Computes gradients of dilated entropy for each player and current seqs.
Returns:
A list of numpy arrays.
"""
grads = [np.zeros(len(self.sequences[0])), np.zeros(len(self.sequences[1]))]
for player in range(2):
for infostate in self.infosets[player]:
# infostates contain empty sequence for root variable
if is_root(infostate):
continue
parent_seq = self.get_parent_seq(player, infostate)
if parent_seq > 0:
for isa_key in self.infoset_action_maps[player][infostate]:
# compute infostate term
seq_idx = self.infoset_actions_to_seq[player][isa_key]
seq = self.sequences[player][seq_idx]
grads[player][seq_idx] += np.log(seq / parent_seq) + 1
# compute terms from children if there are any
num_children = len(self.infoset_actions_children[player].get(
isa_key, []))
grads[player][seq_idx] -= num_children
return grads
def update_sequences(self):
"""Performs one step of MMD."""
self.iteration_count += 1
psi_grads = self.dgf_grads()
# pylint: disable=invalid-unary-operand-type
grads = [
(self.stepsize * self.payoff_mat @ self.sequences[1] - psi_grads[0]) /
((1 + self.stepsize * self.alpha)),
(-self.stepsize * self.payoff_mat.T @ self.sequences[0] - psi_grads[1])
/ (1 + self.stepsize * self.alpha)
]
new_policy = policy.TabularPolicy(self.game)
for player in range(2):
self._update_state_sequences(self.empty_infoset_keys[player],
grads[player], player, new_policy)
self.sequences = policy_to_sequence(self.game, new_policy,
self.infoset_actions_to_seq)
self.update_avg_sequences()
def _update_state_sequences(self, infostate, g, player, pol):
"""Update the state sequences."""
isa_keys = self.infoset_action_maps[player][infostate]
seq_idx = [
self.infoset_actions_to_seq[player][isa_key] for isa_key in isa_keys
]
for isa_key, isa_idx in zip(isa_keys, seq_idx):
# update children first if there are any
children = self.infoset_actions_children[player].get(isa_key, [])
for child in children:
self._update_state_sequences(child, g, player, pol)
# update gradient
child_isa_keys = self.infoset_action_maps[player][child]
child_seq_idx = [
self.infoset_actions_to_seq[player][child_isa_key]
for child_isa_key in child_isa_keys
]
g_child = np.array([g[idx] for idx in child_seq_idx])
actions_child = [
_get_action_from_key(child_isa_key)
for child_isa_key in child_isa_keys
]
policy_child = pol.policy_for_key(child)[:]
policy_child = np.array([policy_child[a] for a in actions_child])
g[isa_idx] += np.dot(g_child, policy_child)
g[isa_idx] += neg_entropy(policy_child)
# no update needed for empty sequence
if is_root(infostate):
return
state_policy = pol.policy_for_key(infostate)
g_infostate = np.array([g[idx] for idx in seq_idx])
actions = [_get_action_from_key(isa_key) for isa_key in isa_keys]
new_state_policy = softmax(-g_infostate)
for action, pr in zip(actions, new_state_policy):
state_policy[action] = pr
def get_gap(self):
"""Computes saddle point gap of the regularized game.
The gap measures convergence to the alpha-QRE.
Returns:
Scalar.
"""
assert self.alpha > 0, "gap cannot be computed for alpha = 0"
grads = [(self.payoff_mat @ self.sequences[1]) / (self.alpha),
(-self.payoff_mat.T @ self.sequences[0]) / (self.alpha)]
dgf_values = self.dgf_eval()
br_policy = policy.TabularPolicy(self.game)
for player in range(2):
self._update_state_sequences(self.empty_infoset_keys[player],
grads[player], player, br_policy)
br_sequences = policy_to_sequence(self.game, br_policy,
self.infoset_actions_to_seq)
curr_sequences = copy.deepcopy(self.sequences)
self.sequences = br_sequences
br_dgf_values = self.dgf_eval()
self.sequences = curr_sequences
# gap of sequences (x,y)
# d(x) + max_y' x.T A y'-d(y') + d(y) - min_x' d(x') + x'.T Ay
gap = 0
gap += curr_sequences[0].T @ self.payoff_mat @ br_sequences[1]
gap += self.alpha * (dgf_values[1] - br_dgf_values[1])
gap += self.alpha * (dgf_values[0] - br_dgf_values[0])
gap += -br_sequences[0].T @ self.payoff_mat @ curr_sequences[1]
return gap
def update_avg_sequences(self):
for player in range(2):
self.avg_sequences[player] = self.avg_sequences[player] * (
self.iteration_count - 1) + self.sequences[player]
self.avg_sequences[
player] = self.avg_sequences[player] / self.iteration_count
def current_sequences(self):
"""Retrieves the current sequences.
Returns:
the current sequences for each player as list of numpy arrays.
"""
return self.sequences
def get_avg_sequences(self):
"""Retrieves the average sequences.
Returns:
the average sequences for each player as list of numpy arrays.
"""
return self.avg_sequences
def get_policies(self):
"""Convert current sequences to equivalent behavioural form policies.
Returns:
spiel TabularPolicy Object.
"""
return sequence_to_policy(self.sequences, self.game,
self.infoset_actions_to_seq,
self.infoset_action_maps)
def get_avg_policies(self):
"""Convert average sequences to equivalent behavioural form policies.
Returns:
spiel TabularPolicy Object.
"""
return sequence_to_policy(self.avg_sequences, self.game,
self.infoset_actions_to_seq,
self.infoset_action_maps)
| open_spiel-master | open_spiel/python/algorithms/mmd_dilated.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.nfsp."""
import tensorflow.compat.v1 as tf
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import nfsp
# Temporarily disable TF2 behavior until code is updated.
tf.disable_v2_behavior()
class NFSPTest(tf.test.TestCase):
def test_run_kuhn(self):
env = rl_environment.Environment("kuhn_poker")
state_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
with self.session() as sess:
agents = [
nfsp.NFSP( # pylint: disable=g-complex-comprehension
sess,
player_id,
state_representation_size=state_size,
num_actions=num_actions,
hidden_layers_sizes=[16],
reservoir_buffer_capacity=10,
anticipatory_param=0.1) for player_id in [0, 1]
]
sess.run(tf.global_variables_initializer())
for unused_ep in range(10):
time_step = env.reset()
while not time_step.last():
current_player = time_step.observations["current_player"]
current_agent = agents[current_player]
agent_output = current_agent.step(time_step)
time_step = env.step([agent_output.action])
for agent in agents:
agent.step(time_step)
class ReservoirBufferTest(tf.test.TestCase):
def test_reservoir_buffer_add(self):
reservoir_buffer = nfsp.ReservoirBuffer(reservoir_buffer_capacity=10)
self.assertEmpty(reservoir_buffer)
reservoir_buffer.add("entry1")
self.assertLen(reservoir_buffer, 1)
reservoir_buffer.add("entry2")
self.assertLen(reservoir_buffer, 2)
self.assertIn("entry1", reservoir_buffer)
self.assertIn("entry2", reservoir_buffer)
def test_reservoir_buffer_max_capacity(self):
reservoir_buffer = nfsp.ReservoirBuffer(reservoir_buffer_capacity=2)
reservoir_buffer.add("entry1")
reservoir_buffer.add("entry2")
reservoir_buffer.add("entry3")
self.assertLen(reservoir_buffer, 2)
def test_reservoir_buffer_sample(self):
replay_buffer = nfsp.ReservoirBuffer(reservoir_buffer_capacity=3)
replay_buffer.add("entry1")
replay_buffer.add("entry2")
replay_buffer.add("entry3")
samples = replay_buffer.sample(3)
self.assertIn("entry1", samples)
self.assertIn("entry2", samples)
self.assertIn("entry3", samples)
if __name__ == "__main__":
tf.test.main()
| open_spiel-master | open_spiel/python/algorithms/nfsp_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements Deep CFR Algorithm.
See https://arxiv.org/abs/1811.00164.
The algorithm defines an `advantage` and `strategy` networks that compute
advantages used to do regret matching across information sets and to approximate
the strategy profiles of the game. To train these networks a reservoir buffer
(other data structures may be used) memory is used to accumulate samples to
train the networks.
This implementation uses skip connections as described in the paper if two
consecutive layers of the advantage or policy network have the same number
of units, except for the last connection. Before the last hidden layer
a layer normalization is applied.
"""
import collections
import contextlib
import os
import random
import numpy as np
import tensorflow as tf
from open_spiel.python import policy
import pyspiel
# The size of the shuffle buffer used to reshuffle part of the data each
# epoch within one training iteration
ADVANTAGE_TRAIN_SHUFFLE_SIZE = 100000
STRATEGY_TRAIN_SHUFFLE_SIZE = 1000000
# TODO(author3) Refactor into data structures lib.
class ReservoirBuffer(object):
"""Allows uniform sampling over a stream of data.
This class supports the storage of arbitrary elements, such as observation
tensors, integer actions, etc.
See https://en.wikipedia.org/wiki/Reservoir_sampling for more details.
"""
def __init__(self, reservoir_buffer_capacity):
self._reservoir_buffer_capacity = reservoir_buffer_capacity
self._data = []
self._add_calls = 0
def add(self, element):
"""Potentially adds `element` to the reservoir buffer.
Args:
element: data to be added to the reservoir buffer.
"""
if len(self._data) < self._reservoir_buffer_capacity:
self._data.append(element)
else:
idx = np.random.randint(0, self._add_calls + 1)
if idx < self._reservoir_buffer_capacity:
self._data[idx] = element
self._add_calls += 1
def sample(self, num_samples):
"""Returns `num_samples` uniformly sampled from the buffer.
Args:
num_samples: `int`, number of samples to draw.
Returns:
An iterable over `num_samples` random elements of the buffer.
Raises:
ValueError: If there are less than `num_samples` elements in the buffer
"""
if len(self._data) < num_samples:
raise ValueError('{} elements could not be sampled from size {}'.format(
num_samples, len(self._data)))
return random.sample(self._data, num_samples)
def clear(self):
self._data = []
self._add_calls = 0
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
@property
def data(self):
return self._data
def shuffle_data(self):
random.shuffle(self._data)
class SkipDense(tf.keras.layers.Layer):
"""Dense Layer with skip connection."""
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.hidden = tf.keras.layers.Dense(units, kernel_initializer='he_normal')
def call(self, x):
return self.hidden(x) + x
class PolicyNetwork(tf.keras.Model):
"""Implements the policy network as an MLP.
Implements the policy network as a MLP with skip connections in adjacent
layers with the same number of units, except for the last hidden connection
where a layer normalization is applied.
"""
def __init__(self,
input_size,
policy_network_layers,
num_actions,
activation='leakyrelu',
**kwargs):
super().__init__(**kwargs)
self._input_size = input_size
self._num_actions = num_actions
if activation == 'leakyrelu':
self.activation = tf.keras.layers.LeakyReLU(alpha=0.2)
elif activation == 'relu':
self.activation = tf.keras.layers.ReLU()
else:
self.activation = activation
self.softmax = tf.keras.layers.Softmax()
self.hidden = []
prevunits = 0
for units in policy_network_layers[:-1]:
if prevunits == units:
self.hidden.append(SkipDense(units))
else:
self.hidden.append(
tf.keras.layers.Dense(units, kernel_initializer='he_normal'))
prevunits = units
self.normalization = tf.keras.layers.LayerNormalization()
self.lastlayer = tf.keras.layers.Dense(
policy_network_layers[-1], kernel_initializer='he_normal')
self.out_layer = tf.keras.layers.Dense(num_actions)
@tf.function
def call(self, inputs):
"""Applies Policy Network.
Args:
inputs: Tuple representing (info_state, legal_action_mask)
Returns:
Action probabilities
"""
x, mask = inputs
for layer in self.hidden:
x = layer(x)
x = self.activation(x)
x = self.normalization(x)
x = self.lastlayer(x)
x = self.activation(x)
x = self.out_layer(x)
x = tf.where(mask == 1, x, -10e20)
x = self.softmax(x)
return x
class AdvantageNetwork(tf.keras.Model):
"""Implements the advantage network as an MLP.
Implements the advantage network as an MLP with skip connections in
adjacent layers with the same number of units, except for the last hidden
connection where a layer normalization is applied.
"""
def __init__(self,
input_size,
adv_network_layers,
num_actions,
activation='leakyrelu',
**kwargs):
super().__init__(**kwargs)
self._input_size = input_size
self._num_actions = num_actions
if activation == 'leakyrelu':
self.activation = tf.keras.layers.LeakyReLU(alpha=0.2)
elif activation == 'relu':
self.activation = tf.keras.layers.ReLU()
else:
self.activation = activation
self.hidden = []
prevunits = 0
for units in adv_network_layers[:-1]:
if prevunits == units:
self.hidden.append(SkipDense(units))
else:
self.hidden.append(
tf.keras.layers.Dense(units, kernel_initializer='he_normal'))
prevunits = units
self.normalization = tf.keras.layers.LayerNormalization()
self.lastlayer = tf.keras.layers.Dense(
adv_network_layers[-1], kernel_initializer='he_normal')
self.out_layer = tf.keras.layers.Dense(num_actions)
@tf.function
def call(self, inputs):
"""Applies Policy Network.
Args:
inputs: Tuple representing (info_state, legal_action_mask)
Returns:
Cumulative regret for each info_state action
"""
x, mask = inputs
for layer in self.hidden:
x = layer(x)
x = self.activation(x)
x = self.normalization(x)
x = self.lastlayer(x)
x = self.activation(x)
x = self.out_layer(x)
x = mask * x
return x
class DeepCFRSolver(policy.Policy):
"""Implements a solver for the Deep CFR Algorithm.
See https://arxiv.org/abs/1811.00164.
Define all networks and sampling buffers/memories. Derive losses & learning
steps. Initialize the game state and algorithmic variables.
"""
def __init__(self,
game,
policy_network_layers=(256, 256),
advantage_network_layers=(128, 128),
num_iterations: int = 100,
num_traversals: int = 100,
learning_rate: float = 1e-3,
batch_size_advantage: int = 2048,
batch_size_strategy: int = 2048,
memory_capacity: int = int(1e6),
policy_network_train_steps: int = 5000,
advantage_network_train_steps: int = 750,
reinitialize_advantage_networks: bool = True,
save_advantage_networks: str = None,
save_strategy_memories: str = None,
infer_device='cpu',
train_device='cpu'):
"""Initialize the Deep CFR algorithm.
Args:
game: Open Spiel game.
policy_network_layers: (list[int]) Layer sizes of strategy net MLP.
advantage_network_layers: (list[int]) Layer sizes of advantage net MLP.
num_iterations: Number of iterations.
num_traversals: Number of traversals per iteration.
learning_rate: Learning rate.
batch_size_advantage: (int) Batch size to sample from advantage memories.
batch_size_strategy: (int) Batch size to sample from strategy memories.
memory_capacity: Number of samples that can be stored in memory.
policy_network_train_steps: Number of policy network training steps (one
policy training iteration at the end).
advantage_network_train_steps: Number of advantage network training steps
(per iteration).
reinitialize_advantage_networks: Whether to re-initialize the advantage
network before training on each iteration.
save_advantage_networks: If provided, all advantage network itearations
are saved in the given folder. This can be useful to implement SD-CFR
https://arxiv.org/abs/1901.07621
save_strategy_memories: saves the collected strategy memories as a
tfrecords file in the given location. This is not affected by
memory_capacity. All memories are saved to disk and not kept in memory
infer_device: device used for TF-operations in the traversal branch.
Format is anything accepted by tf.device
train_device: device used for TF-operations in the NN training steps.
Format is anything accepted by tf.device
"""
all_players = list(range(game.num_players()))
super(DeepCFRSolver, self).__init__(game, all_players)
self._game = game
if game.get_type().dynamics == pyspiel.GameType.Dynamics.SIMULTANEOUS:
# `_traverse_game_tree` does not take into account this option.
raise ValueError('Simulatenous games are not supported.')
self._batch_size_advantage = batch_size_advantage
self._batch_size_strategy = batch_size_strategy
self._policy_network_train_steps = policy_network_train_steps
self._advantage_network_train_steps = advantage_network_train_steps
self._policy_network_layers = policy_network_layers
self._advantage_network_layers = advantage_network_layers
self._num_players = game.num_players()
self._root_node = self._game.new_initial_state()
self._embedding_size = len(self._root_node.information_state_tensor(0))
self._num_iterations = num_iterations
self._num_traversals = num_traversals
self._reinitialize_advantage_networks = reinitialize_advantage_networks
self._num_actions = game.num_distinct_actions()
self._iteration = 1
self._learning_rate = learning_rate
self._save_advantage_networks = save_advantage_networks
self._save_strategy_memories = save_strategy_memories
self._infer_device = infer_device
self._train_device = train_device
self._memories_tfrecordpath = None
self._memories_tfrecordfile = None
# Initialize file save locations
if self._save_advantage_networks:
os.makedirs(self._save_advantage_networks, exist_ok=True)
if self._save_strategy_memories:
if os.path.isdir(self._save_strategy_memories):
self._memories_tfrecordpath = os.path.join(
self._save_strategy_memories, 'strategy_memories.tfrecord')
else:
os.makedirs(
os.path.split(self._save_strategy_memories)[0], exist_ok=True)
self._memories_tfrecordpath = self._save_strategy_memories
# Initialize policy network, loss, optmizer
self._reinitialize_policy_network()
# Initialize advantage networks, losses, optmizers
self._adv_networks = []
self._adv_networks_train = []
self._loss_advantages = []
self._optimizer_advantages = []
self._advantage_train_step = []
for player in range(self._num_players):
self._adv_networks.append(
AdvantageNetwork(self._embedding_size, self._advantage_network_layers,
self._num_actions))
with tf.device(self._train_device):
self._adv_networks_train.append(
AdvantageNetwork(self._embedding_size,
self._advantage_network_layers, self._num_actions))
self._loss_advantages.append(tf.keras.losses.MeanSquaredError())
self._optimizer_advantages.append(
tf.keras.optimizers.Adam(learning_rate=learning_rate))
self._advantage_train_step.append(
self._get_advantage_train_graph(player))
self._create_memories(memory_capacity)
def _reinitialize_policy_network(self):
"""Reinitalize policy network and optimizer for training."""
with tf.device(self._train_device):
self._policy_network = PolicyNetwork(self._embedding_size,
self._policy_network_layers,
self._num_actions)
self._optimizer_policy = tf.keras.optimizers.Adam(
learning_rate=self._learning_rate)
self._loss_policy = tf.keras.losses.MeanSquaredError()
def _reinitialize_advantage_network(self, player):
"""Reinitalize player's advantage network and optimizer for training."""
with tf.device(self._train_device):
self._adv_networks_train[player] = AdvantageNetwork(
self._embedding_size, self._advantage_network_layers,
self._num_actions)
self._optimizer_advantages[player] = tf.keras.optimizers.Adam(
learning_rate=self._learning_rate)
self._advantage_train_step[player] = (
self._get_advantage_train_graph(player))
@property
def advantage_buffers(self):
return self._advantage_memories
@property
def strategy_buffer(self):
return self._strategy_memories
def clear_advantage_buffers(self):
for p in range(self._num_players):
self._advantage_memories[p].clear()
def _create_memories(self, memory_capacity):
"""Create memory buffers and associated feature descriptions."""
self._strategy_memories = ReservoirBuffer(memory_capacity)
self._advantage_memories = [
ReservoirBuffer(memory_capacity) for _ in range(self._num_players)
]
self._strategy_feature_description = {
'info_state': tf.io.FixedLenFeature([self._embedding_size], tf.float32),
'action_probs': tf.io.FixedLenFeature([self._num_actions], tf.float32),
'iteration': tf.io.FixedLenFeature([1], tf.float32),
'legal_actions': tf.io.FixedLenFeature([self._num_actions], tf.float32)
}
self._advantage_feature_description = {
'info_state': tf.io.FixedLenFeature([self._embedding_size], tf.float32),
'iteration': tf.io.FixedLenFeature([1], tf.float32),
'samp_regret': tf.io.FixedLenFeature([self._num_actions], tf.float32),
'legal_actions': tf.io.FixedLenFeature([self._num_actions], tf.float32)
}
def solve(self):
"""Solution logic for Deep CFR."""
advantage_losses = collections.defaultdict(list)
with tf.device(self._infer_device):
with contextlib.ExitStack() as stack:
if self._save_strategy_memories:
self._memories_tfrecordfile = stack.enter_context(
tf.io.TFRecordWriter(self._memories_tfrecordpath))
for _ in range(self._num_iterations):
for p in range(self._num_players):
for _ in range(self._num_traversals):
self._traverse_game_tree(self._root_node, p)
if self._reinitialize_advantage_networks:
# Re-initialize advantage network for p and train from scratch.
self._reinitialize_advantage_network(p)
advantage_losses[p].append(self._learn_advantage_network(p))
if self._save_advantage_networks:
os.makedirs(self._save_advantage_networks, exist_ok=True)
self._adv_networks[p].save(
os.path.join(self._save_advantage_networks,
f'advnet_p{p}_it{self._iteration:04}'))
self._iteration += 1
# Train policy network.
policy_loss = self._learn_strategy_network()
return self._policy_network, advantage_losses, policy_loss
def save_policy_network(self, outputfolder):
"""Saves the policy network to the given folder."""
os.makedirs(outputfolder, exist_ok=True)
self._policy_network.save(outputfolder)
def train_policy_network_from_file(self,
tfrecordpath,
iteration=None,
batch_size_strategy=None,
policy_network_train_steps=None,
reinitialize_policy_network=True):
"""Trains the policy network from a previously stored tfrecords-file."""
self._memories_tfrecordpath = tfrecordpath
if iteration:
self._iteration = iteration
if batch_size_strategy:
self._batch_size_strategy = batch_size_strategy
if policy_network_train_steps:
self._policy_network_train_steps = policy_network_train_steps
if reinitialize_policy_network:
self._reinitialize_policy_network()
policy_loss = self._learn_strategy_network()
return policy_loss
def _add_to_strategy_memory(self, info_state, iteration,
strategy_action_probs, legal_actions_mask):
# pylint: disable=g-doc-args
"""Adds the given strategy data to the memory.
Uses either a tfrecordsfile on disk if provided, or a reservoir buffer.
"""
serialized_example = self._serialize_strategy_memory(
info_state, iteration, strategy_action_probs, legal_actions_mask)
if self._save_strategy_memories:
self._memories_tfrecordfile.write(serialized_example)
else:
self._strategy_memories.add(serialized_example)
def _serialize_strategy_memory(self, info_state, iteration,
strategy_action_probs, legal_actions_mask):
"""Create serialized example to store a strategy entry."""
example = tf.train.Example(
features=tf.train.Features(
feature={
'info_state':
tf.train.Feature(
float_list=tf.train.FloatList(value=info_state)),
'action_probs':
tf.train.Feature(
float_list=tf.train.FloatList(
value=strategy_action_probs)),
'iteration':
tf.train.Feature(
float_list=tf.train.FloatList(value=[iteration])),
'legal_actions':
tf.train.Feature(
float_list=tf.train.FloatList(value=legal_actions_mask))
}))
return example.SerializeToString()
def _deserialize_strategy_memory(self, serialized):
"""Deserializes a batch of strategy examples for the train step."""
tups = tf.io.parse_example(serialized, self._strategy_feature_description)
return (tups['info_state'], tups['action_probs'], tups['iteration'],
tups['legal_actions'])
def _serialize_advantage_memory(self, info_state, iteration, samp_regret,
legal_actions_mask):
"""Create serialized example to store an advantage entry."""
example = tf.train.Example(
features=tf.train.Features(
feature={
'info_state':
tf.train.Feature(
float_list=tf.train.FloatList(value=info_state)),
'iteration':
tf.train.Feature(
float_list=tf.train.FloatList(value=[iteration])),
'samp_regret':
tf.train.Feature(
float_list=tf.train.FloatList(value=samp_regret)),
'legal_actions':
tf.train.Feature(
float_list=tf.train.FloatList(value=legal_actions_mask))
}))
return example.SerializeToString()
def _deserialize_advantage_memory(self, serialized):
"""Deserializes a batch of advantage examples for the train step."""
tups = tf.io.parse_example(serialized, self._advantage_feature_description)
return (tups['info_state'], tups['samp_regret'], tups['iteration'],
tups['legal_actions'])
def _traverse_game_tree(self, state, player):
"""Performs a traversal of the game tree using external sampling.
Over a traversal the advantage and strategy memories are populated with
computed advantage values and matched regrets respectively.
Args:
state: Current OpenSpiel game state.
player: (int) Player index for this traversal.
Returns:
Recursively returns expected payoffs for each action.
"""
if state.is_terminal():
# Terminal state get returns.
return state.returns()[player]
elif state.is_chance_node():
# If this is a chance node, sample an action
chance_outcome, chance_proba = zip(*state.chance_outcomes())
action = np.random.choice(chance_outcome, p=chance_proba)
return self._traverse_game_tree(state.child(action), player)
elif state.current_player() == player:
# Update the policy over the info set & actions via regret matching.
_, strategy = self._sample_action_from_advantage(state, player)
exp_payoff = 0 * strategy
for action in state.legal_actions():
exp_payoff[action] = self._traverse_game_tree(
state.child(action), player)
ev = np.sum(exp_payoff * strategy)
samp_regret = (exp_payoff - ev) * state.legal_actions_mask(player)
self._advantage_memories[player].add(
self._serialize_advantage_memory(state.information_state_tensor(),
self._iteration, samp_regret,
state.legal_actions_mask(player)))
return ev
else:
other_player = state.current_player()
_, strategy = self._sample_action_from_advantage(state, other_player)
# Recompute distribution for numerical errors.
probs = strategy
probs /= probs.sum()
sampled_action = np.random.choice(range(self._num_actions), p=probs)
self._add_to_strategy_memory(
state.information_state_tensor(other_player), self._iteration,
strategy, state.legal_actions_mask(other_player))
return self._traverse_game_tree(state.child(sampled_action), player)
@tf.function
def _get_matched_regrets(self, info_state, legal_actions_mask, player):
"""TF-Graph to calculate regret matching."""
advs = self._adv_networks[player](
(tf.expand_dims(info_state, axis=0), legal_actions_mask),
training=False)[0]
advantages = tf.maximum(advs, 0)
summed_regret = tf.reduce_sum(advantages)
if summed_regret > 0:
matched_regrets = advantages / summed_regret
else:
matched_regrets = tf.one_hot(
tf.argmax(tf.where(legal_actions_mask == 1, advs, -10e20)),
self._num_actions)
return advantages, matched_regrets
def _sample_action_from_advantage(self, state, player):
"""Returns an info state policy by applying regret-matching.
Args:
state: Current OpenSpiel game state.
player: (int) Player index over which to compute regrets.
Returns:
1. (np-array) Advantage values for info state actions indexed by action.
2. (np-array) Matched regrets, prob for actions indexed by action.
"""
info_state = tf.constant(
state.information_state_tensor(player), dtype=tf.float32)
legal_actions_mask = tf.constant(
state.legal_actions_mask(player), dtype=tf.float32)
advantages, matched_regrets = self._get_matched_regrets(
info_state, legal_actions_mask, player)
return advantages.numpy(), matched_regrets.numpy()
def action_probabilities(self, state):
"""Returns action probabilities dict for a single batch."""
cur_player = state.current_player()
legal_actions = state.legal_actions(cur_player)
legal_actions_mask = tf.constant(
state.legal_actions_mask(cur_player), dtype=tf.float32)
info_state_vector = tf.constant(
state.information_state_tensor(), dtype=tf.float32)
if len(info_state_vector.shape) == 1:
info_state_vector = tf.expand_dims(info_state_vector, axis=0)
probs = self._policy_network((info_state_vector, legal_actions_mask),
training=False)
probs = probs.numpy()
return {action: probs[0][action] for action in legal_actions}
def _get_advantage_dataset(self, player):
"""Returns the collected regrets for the given player as a dataset."""
self._advantage_memories[player].shuffle_data()
data = tf.data.Dataset.from_tensor_slices(
self._advantage_memories[player].data)
data = data.shuffle(ADVANTAGE_TRAIN_SHUFFLE_SIZE)
data = data.repeat()
data = data.batch(self._batch_size_advantage)
data = data.map(self._deserialize_advantage_memory)
data = data.prefetch(tf.data.experimental.AUTOTUNE)
return data
def _get_advantage_train_graph(self, player):
"""Return TF-Graph to perform advantage network train step."""
@tf.function
def train_step(info_states, advantages, iterations, masks, iteration):
model = self._adv_networks_train[player]
with tf.GradientTape() as tape:
preds = model((info_states, masks), training=True)
main_loss = self._loss_advantages[player](
advantages, preds, sample_weight=iterations * 2 / iteration)
loss = tf.add_n([main_loss], model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
self._optimizer_advantages[player].apply_gradients(
zip(gradients, model.trainable_variables))
return main_loss
return train_step
def _learn_advantage_network(self, player):
"""Compute the loss on sampled transitions and perform a Q-network update.
If there are not enough elements in the buffer, no loss is computed and
`None` is returned instead.
Args:
player: (int) player index.
Returns:
The average loss over the advantage network of the last batch.
"""
with tf.device(self._train_device):
tfit = tf.constant(self._iteration, dtype=tf.float32)
data = self._get_advantage_dataset(player)
for d in data.take(self._advantage_network_train_steps):
main_loss = self._advantage_train_step[player](*d, tfit)
self._adv_networks[player].set_weights(
self._adv_networks_train[player].get_weights())
return main_loss
def _get_strategy_dataset(self):
"""Returns the collected strategy memories as a dataset."""
if self._memories_tfrecordpath:
data = tf.data.TFRecordDataset(self._memories_tfrecordpath)
else:
self._strategy_memories.shuffle_data()
data = tf.data.Dataset.from_tensor_slices(self._strategy_memories.data)
data = data.shuffle(STRATEGY_TRAIN_SHUFFLE_SIZE)
data = data.repeat()
data = data.batch(self._batch_size_strategy)
data = data.map(self._deserialize_strategy_memory)
data = data.prefetch(tf.data.experimental.AUTOTUNE)
return data
def _learn_strategy_network(self):
"""Compute the loss over the strategy network.
Returns:
The average loss obtained on the last training batch of transitions
or `None`.
"""
@tf.function
def train_step(info_states, action_probs, iterations, masks):
model = self._policy_network
with tf.GradientTape() as tape:
preds = model((info_states, masks), training=True)
main_loss = self._loss_policy(
action_probs, preds, sample_weight=iterations * 2 / self._iteration)
loss = tf.add_n([main_loss], model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
self._optimizer_policy.apply_gradients(
zip(gradients, model.trainable_variables))
return main_loss
with tf.device(self._train_device):
data = self._get_strategy_dataset()
for d in data.take(self._policy_network_train_steps):
main_loss = train_step(*d)
return main_loss
| open_spiel-master | open_spiel/python/algorithms/deep_cfr_tf2.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regression counterfactual regret minimization (RCFR) [Waugh et al., 2015; Morrill, 2016].
In contrast to (tabular) counterfactual regret minimization (CFR)
[Zinkevich et al., 2007], RCFR replaces the table of regrets that generate the
current policy profile with a profile of regression models. The average
policy is still tracked exactly with a full game-size table. The exploitability
of the average policy in zero-sum games decreases as the model accuracy and
the number of iterations increase [Waugh et al., 2015; Morrill, 2016]. As long
as the regression model errors decrease across iterations, the average policy
converges toward a Nash equilibrium in zero-sum games.
# References
Dustin Morrill. Using Regret Estimation to Solve Games Compactly.
M.Sc. thesis, Computing Science Department, University of Alberta,
Apr 1, 2016, Edmonton Alberta, Canada.
Kevin Waugh, Dustin Morrill, J. Andrew Bagnell, and Michael Bowling.
Solving Games with Functional Regret Estimation. At the Twenty-Ninth AAAI
Conference on Artificial Intelligence, January 25-29, 2015, Austin Texas,
USA. Pages 2138-2145.
Martin Zinkevich, Michael Johanson, Michael Bowling, and Carmelo Piccione.
Regret Minimization in Games with Incomplete Information.
At Advances in Neural Information Processing Systems 20 (NeurIPS). 2007.
"""
import numpy as np
import tensorflow.compat.v1 as tf
# Temporarily disable TF2 behavior while the code is not updated.
tf.disable_v2_behavior()
def tensor_to_matrix(tensor):
"""Converts `tensor` to a matrix (a rank-2 tensor) or raises an exception.
Args:
tensor: The tensor to convert.
Returns:
A TensorFlow matrix (rank-2 `tf.Tensor`).
Raises:
ValueError: If `tensor` cannot be trivially converted to a matrix, i.e.
`tensor` has a rank > 2.
"""
tensor = tf.convert_to_tensor(tensor)
rank = tensor.shape.rank
if rank > 2:
raise ValueError(
("Tensor {} cannot be converted into a matrix as it is rank "
"{} > 2.").format(tensor, rank))
elif rank < 2:
num_columns = 1 if rank == 0 else tensor.shape[0].value
tensor = tf.reshape(tensor, [1, num_columns])
return tensor
def with_one_hot_action_features(state_features, legal_actions,
num_distinct_actions):
"""Constructs features for each sequence by extending state features.
Sequences features are constructed by concatenating one-hot features
indicating each action to the information state features and stacking them.
Args:
state_features: The features for the information state alone. Must be a
`tf.Tensor` with a rank less than or equal to (if batched) 2.
legal_actions: The list of legal actions in this state. Determines the
number of rows in the returned feature matrix.
num_distinct_actions: The number of globally distinct actions in the game.
Determines the length of the action feature vector concatenated onto the
state features.
Returns:
A `tf.Tensor` feature matrix with one row for each sequence and # state
features plus `num_distinct_actions`-columns.
Raises:
ValueError: If `state_features` has a rank > 2.
"""
state_features = tensor_to_matrix(state_features)
with_action_features = []
for action in legal_actions:
action_features = tf.one_hot([action], num_distinct_actions)
action_features = tf.tile(action_features, [tf.shape(state_features)[0], 1])
all_features = tf.concat([state_features, action_features], axis=1)
with_action_features.append(all_features)
return tf.concat(with_action_features, axis=0)
def sequence_features(state, num_distinct_actions):
"""The sequence features at `state`.
Features are constructed by concatenating `state`'s normalized feature
vector with one-hot vectors indicating each action (see
`with_one_hot_action_features`).
Args:
state: An OpenSpiel `State`.
num_distinct_actions: The number of globally distinct actions in `state`'s
game.
Returns:
A `tf.Tensor` feature matrix with one row for each sequence.
"""
return with_one_hot_action_features(state.information_state_tensor(),
state.legal_actions(),
num_distinct_actions)
def num_features(game):
"""Returns the number of features returned by `sequence_features`.
Args:
game: An OpenSpiel `Game`.
"""
return game.information_state_tensor_size() + game.num_distinct_actions()
class RootStateWrapper(object):
"""Analyzes the subgame at a given root state.
It enumerates features for each player sequence, creates a mapping between
information states to sequence index offsets, and caches terminal values
in a dictionary with history string keys.
Properties:
root: An OpenSpiel `State`.
sequence_features: A `list` of sequence feature matrices, one for each
player. This list uses depth-first, information state-major ordering, so
sequences are grouped by information state. I.e. the first legal action
in the first state has index 0, the second action in the same information
state has index 1, the third action will have index 3, and so on.
Sequences in the next information state descendant of the first action
will begin indexing its sequences at the number of legal actions in the
ancestor information state.
num_player_sequences: The number of sequences for each player.
info_state_to_sequence_idx: A `dict` mapping each information state string
to the `sequence_features` index of the first sequence in the
corresponding information state.
terminal_values: A `dict` mapping history strings to terminal values for
each player.
"""
def __init__(self, state):
self.root = state
self._num_distinct_actions = len(state.legal_actions_mask(0))
self.sequence_features = [[] for _ in range(state.num_players())]
self.num_player_sequences = [0] * state.num_players()
self.info_state_to_sequence_idx = {}
self.terminal_values = {}
self._walk_descendants(state)
self.sequence_features = [
tf.concat(rows, axis=0) for rows in self.sequence_features
]
def _walk_descendants(self, state):
"""Records information about `state` and its descendants."""
if state.is_terminal():
self.terminal_values[state.history_str()] = np.array(state.returns())
return
elif state.is_chance_node():
for action, _ in state.chance_outcomes():
self._walk_descendants(state.child(action))
return
player = state.current_player()
info_state = state.information_state_string(player)
actions = state.legal_actions()
if info_state not in self.info_state_to_sequence_idx:
n = self.num_player_sequences[player]
self.info_state_to_sequence_idx[info_state] = n
self.sequence_features[player].append(
sequence_features(state, self._num_distinct_actions))
self.num_player_sequences[player] += len(actions)
for action in actions:
self._walk_descendants(state.child(action))
def sequence_weights_to_policy(self, sequence_weights, state):
"""Returns a behavioral policy at `state` from sequence weights.
Args:
sequence_weights: An array of non-negative weights, one for each of
`state.current_player()`'s sequences in `state`'s game.
state: An OpenSpiel `State` that represents an information state in an
alternating-move game.
Returns:
A `np.array<double>` probability distribution representing the policy in
`state` encoded by `sequence_weights`. Weights corresponding to actions
in `state` are normalized by their sum.
Raises:
ValueError: If there are too few sequence weights at `state`.
"""
info_state = state.information_state_string()
sequence_offset = self.info_state_to_sequence_idx[info_state]
actions = state.legal_actions()
sequence_idx_end = sequence_offset + len(actions)
weights = sequence_weights[sequence_offset:sequence_idx_end]
if len(weights) < len(actions):
raise ValueError(
("Invalid policy: Policy {player} at sequence offset "
"{sequence_offset} has only {policy_len} elements but there "
"are {num_actions} legal actions.").format(
player=state.current_player(),
sequence_offset=sequence_offset,
policy_len=len(weights),
num_actions=len(actions)))
return normalized_by_sum(weights)
def sequence_weights_to_policy_fn(self, player_sequence_weights):
"""Returns a policy function based on sequence weights for each player.
Args:
player_sequence_weights: A list of weight arrays, one for each player.
Each array should have a weight for each of that player's sequences in
`state`'s game.
Returns:
A `State` -> `np.array<double>` function. The output of this function is
a probability distribution that represents the policy at the given
`State` encoded by `player_sequence_weights` according to
`sequence_weights_to_policy`.
"""
def policy_fn(state):
player = state.current_player()
return self.sequence_weights_to_policy(player_sequence_weights[player],
state)
return policy_fn
def sequence_weights_to_tabular_profile(self, player_sequence_weights):
"""Returns the tabular profile-form of `player_sequence_weights`."""
return sequence_weights_to_tabular_profile(
self.root, self.sequence_weights_to_policy_fn(player_sequence_weights))
def counterfactual_regrets_and_reach_weights(self, regret_player,
reach_weight_player,
*sequence_weights):
"""Returns counterfactual regrets and reach weights as a tuple.
Args:
regret_player: The player for whom counterfactual regrets are computed.
reach_weight_player: The player for whom reach weights are computed.
*sequence_weights: A list of non-negative sequence weights for each player
determining the policy profile. Behavioral policies are generated by
normalizing sequence weights corresponding to actions in each
information state by their sum.
Returns:
The counterfactual regrets and reach weights as an `np.array`-`np.array`
tuple.
Raises:
ValueError: If there are too few sequence weights at any information state
for any player.
"""
num_players = len(sequence_weights)
regrets = np.zeros(self.num_player_sequences[regret_player])
reach_weights = np.zeros(self.num_player_sequences[reach_weight_player])
def _walk_descendants(state, reach_probabilities, chance_reach_probability):
"""Compute `state`'s counterfactual regrets and reach weights.
Args:
state: An OpenSpiel `State`.
reach_probabilities: The probability that each player plays to reach
`state`'s history.
chance_reach_probability: The probability that all chance outcomes in
`state`'s history occur.
Returns:
The counterfactual value of `state`'s history.
Raises:
ValueError if there are too few sequence weights at any information
state for any player.
"""
if state.is_terminal():
player_reach = (
np.prod(reach_probabilities[:regret_player]) *
np.prod(reach_probabilities[regret_player + 1:]))
counterfactual_reach_prob = player_reach * chance_reach_probability
u = self.terminal_values[state.history_str()]
return u[regret_player] * counterfactual_reach_prob
elif state.is_chance_node():
v = 0.0
for action, action_prob in state.chance_outcomes():
v += _walk_descendants(
state.child(action), reach_probabilities,
chance_reach_probability * action_prob)
return v
player = state.current_player()
info_state = state.information_state_string(player)
sequence_idx_offset = self.info_state_to_sequence_idx[info_state]
actions = state.legal_actions(player)
sequence_idx_end = sequence_idx_offset + len(actions)
my_sequence_weights = sequence_weights[player][
sequence_idx_offset:sequence_idx_end]
if len(my_sequence_weights) < len(actions):
raise ValueError(
("Invalid policy: Policy {player} at sequence offset "
"{sequence_idx_offset} has only {policy_len} elements but there "
"are {num_actions} legal actions.").format(
player=player,
sequence_idx_offset=sequence_idx_offset,
policy_len=len(my_sequence_weights),
num_actions=len(actions)))
policy = normalized_by_sum(my_sequence_weights)
action_values = np.zeros(len(actions))
state_value = 0.0
is_reach_weight_player_node = player == reach_weight_player
is_regret_player_node = player == regret_player
reach_prob = reach_probabilities[player]
for action_idx, action in enumerate(actions):
action_prob = policy[action_idx]
next_reach_prob = reach_prob * action_prob
if is_reach_weight_player_node:
reach_weight_player_plays_down_this_line = next_reach_prob > 0
if not reach_weight_player_plays_down_this_line:
continue
sequence_idx = sequence_idx_offset + action_idx
reach_weights[sequence_idx] += next_reach_prob
reach_probabilities[player] = next_reach_prob
action_value = _walk_descendants(
state.child(action), reach_probabilities, chance_reach_probability)
if is_regret_player_node:
state_value = state_value + action_prob * action_value
else:
state_value = state_value + action_value
action_values[action_idx] = action_value
reach_probabilities[player] = reach_prob
if is_regret_player_node:
regrets[sequence_idx_offset:sequence_idx_end] += (
action_values - state_value)
return state_value
# End of _walk_descendants
_walk_descendants(self.root, np.ones(num_players), 1.0)
return regrets, reach_weights
def normalized_by_sum(v, axis=0, mutate=False):
"""Divides each element of `v` along `axis` by the sum of `v` along `axis`.
Assumes `v` is non-negative. Sets of `v` elements along `axis` that sum to
zero are normalized to `1 / v.shape[axis]` (a uniform distribution).
Args:
v: Non-negative array of values.
axis: An integer axis.
mutate: Whether or not to store the result in `v`.
Returns:
The normalized array.
"""
v = np.asarray(v)
denominator = v.sum(axis=axis, keepdims=True)
denominator_is_zero = denominator == 0
# Every element of `denominator_is_zero` that is true corresponds to a
# set of elements in `v` along `axis` that are all zero. By setting these
# denominators to `v.shape[axis]` and adding 1 to each of the corresponding
# elements in `v`, these elements are normalized to `1 / v.shape[axis]`
# (a uniform distribution).
denominator += v.shape[axis] * denominator_is_zero
if mutate:
v += denominator_is_zero
v /= denominator
else:
v = (v + denominator_is_zero) / denominator
return v
def relu(v):
"""Returns the element-wise maximum between `v` and 0."""
return np.maximum(v, 0)
def _descendant_states(state, depth_limit, depth, include_terminals,
include_chance_states):
"""Recursive descendant state generator.
Decision states are always yielded.
Args:
state: The current state.
depth_limit: The descendant depth limit. Zero will ensure only
`initial_state` is generated and negative numbers specify the absence of a
limit.
depth: The current descendant depth.
include_terminals: Whether or not to include terminal states.
include_chance_states: Whether or not to include chance states.
Yields:
`State`, a state that is `initial_state` or one of its descendants.
"""
if state.is_terminal():
if include_terminals:
yield state
return
if depth > depth_limit >= 0:
return
if not state.is_chance_node() or include_chance_states:
yield state
for action in state.legal_actions():
state_for_search = state.child(action)
for substate in _descendant_states(state_for_search, depth_limit, depth + 1,
include_terminals,
include_chance_states):
yield substate
def all_states(initial_state,
depth_limit=-1,
include_terminals=False,
include_chance_states=False):
"""Generates states from `initial_state`.
Generates the set of states that includes only the `initial_state` and its
descendants that satisfy the inclusion criteria specified by the remaining
parameters. Decision states are always included.
Args:
initial_state: The initial state from which to generate states.
depth_limit: The descendant depth limit. Zero will ensure only
`initial_state` is generated and negative numbers specify the absence of a
limit. Defaults to no limit.
include_terminals: Whether or not to include terminal states. Defaults to
`False`.
include_chance_states: Whether or not to include chance states. Defaults to
`False`.
Returns:
A generator that yields the `initial_state` and its descendants that
satisfy the inclusion criteria specified by the remaining parameters.
"""
return _descendant_states(
state=initial_state,
depth_limit=depth_limit,
depth=0,
include_terminals=include_terminals,
include_chance_states=include_chance_states)
def sequence_weights_to_tabular_profile(root, policy_fn):
"""Returns the `dict` of `list`s of action-prob pairs-form of `policy_fn`."""
tabular_policy = {}
players = range(root.num_players())
for state in all_states(root):
for player in players:
legal_actions = state.legal_actions(player)
if len(legal_actions) < 1:
continue
info_state = state.information_state_string(player)
if info_state in tabular_policy:
continue
my_policy = policy_fn(state)
tabular_policy[info_state] = list(zip(legal_actions, my_policy))
return tabular_policy
@tf.function
def feedforward_evaluate(layers,
x,
use_skip_connections=False,
hidden_are_factored=False):
"""Evaluates `layers` as a feedforward neural network on `x`.
Args:
layers: The neural network layers (`tf.Tensor` -> `tf.Tensor` callables).
x: The array-like input to evaluate. Must be trivially convertible to a
matrix (tensor rank <= 2).
use_skip_connections: Whether or not to use skip connections between layers.
If the layer input has too few features to be added to the layer output,
then the end of input is padded with zeros. If it has too many features,
then the input is truncated.
hidden_are_factored: Whether or not hidden logical layers are factored into
two separate linear transformations stored as adjacent elements of
`layers`.
Returns:
The `tf.Tensor` evaluation result.
Raises:
ValueError: If `x` has a rank greater than 2.
"""
x = tensor_to_matrix(x)
i = 0
while i < len(layers) - 1:
y = layers[i](x)
i += 1
if hidden_are_factored:
y = layers[i](y)
i += 1
if use_skip_connections:
my_num_features = x.shape[1].value
padding = y.shape[1].value - my_num_features
if padding > 0:
zeros = tf.zeros([tf.shape(x)[0], padding])
x = tf.concat([x, zeros], axis=1)
elif padding < 0:
x = tf.strided_slice(x, [0, 0], [tf.shape(x)[0], y.shape[1].value])
y = x + y
x = y
return layers[-1](x)
class DeepRcfrModel(object):
"""A flexible deep feedforward RCFR model class.
Properties:
layers: The `tf.keras.Layer` layers describing this model.
trainable_variables: The trainable `tf.Variable`s in this model's `layers`.
losses: This model's layer specific losses (e.g. regularizers).
"""
def __init__(self,
game,
num_hidden_units,
num_hidden_layers=1,
num_hidden_factors=0,
hidden_activation=tf.nn.relu,
use_skip_connections=False,
regularizer=None):
"""Creates a new `DeepRcfrModel.
Args:
game: The OpenSpiel game being solved.
num_hidden_units: The number of units in each hidden layer.
num_hidden_layers: The number of hidden layers. Defaults to 1.
num_hidden_factors: The number of hidden factors or the matrix rank of the
layer. If greater than zero, hidden layers will be split into two
separate linear transformations, the first with
`num_hidden_factors`-columns and the second with
`num_hidden_units`-columns. The result is that the logical hidden layer
is a rank-`num_hidden_units` matrix instead of a rank-`num_hidden_units`
matrix. When `num_hidden_units < num_hidden_units`, this is effectively
implements weight sharing. Defaults to 0.
hidden_activation: The activation function to apply over hidden layers.
Defaults to `tf.nn.relu`.
use_skip_connections: Whether or not to apply skip connections (layer
output = layer(x) + x) on hidden layers. Zero padding or truncation is
used to match the number of columns on layer inputs and outputs.
regularizer: A regularizer to apply to each layer. Defaults to `None`.
"""
self._use_skip_connections = use_skip_connections
self._hidden_are_factored = num_hidden_factors > 0
self.layers = []
for _ in range(num_hidden_layers):
if self._hidden_are_factored:
self.layers.append(
tf.keras.layers.Dense(
num_hidden_factors,
use_bias=True,
kernel_regularizer=regularizer))
self.layers.append(
tf.keras.layers.Dense(
num_hidden_units,
use_bias=True,
activation=hidden_activation,
kernel_regularizer=regularizer))
self.layers.append(
tf.keras.layers.Dense(1, use_bias=True, kernel_regularizer=regularizer))
# Construct variables for all layers by exercising the network.
x = tf.zeros([1, num_features(game)])
for layer in self.layers:
x = layer(x)
self.trainable_variables = sum(
[layer.trainable_variables for layer in self.layers], [])
self.losses = sum([layer.losses for layer in self.layers], [])
def __call__(self, x):
"""Evaluates this model on `x`."""
return feedforward_evaluate(
layers=self.layers,
x=x,
use_skip_connections=self._use_skip_connections,
hidden_are_factored=self._hidden_are_factored)
class _RcfrSolver(object):
"""An abstract RCFR solver class.
Requires that subclasses implement `evaluate_and_update_policy`.
"""
def __init__(self, game, models, truncate_negative=False, session=None):
"""Creates a new `_RcfrSolver`.
Args:
game: An OpenSpiel `Game`.
models: Current policy models (optimizable array-like -> `tf.Tensor`
callables) for both players.
truncate_negative: Whether or not to truncate negative (approximate)
cumulative regrets to zero to implement RCFR+. Defaults to `False`.
session: A TensorFlow `Session` to convert sequence weights from
`tf.Tensor`s produced by `models` to `np.array`s. If `None`, it is
assumed that eager mode is enabled. Defaults to `None`.
"""
self._game = game
self._models = models
self._truncate_negative = truncate_negative
self._root_wrapper = RootStateWrapper(game.new_initial_state())
self._session = session
self._cumulative_seq_probs = [
np.zeros(n) for n in self._root_wrapper.num_player_sequences
]
def _sequence_weights(self, player=None):
"""Returns regret-like weights for each sequence as an `np.array`.
Negative weights are truncated to zero.
Args:
player: The player to compute weights for, or both if `player` is `None`.
Defaults to `None`.
"""
if player is None:
return [
self._sequence_weights(player)
for player in range(self._game.num_players())
]
else:
tensor = tf.nn.relu(
tf.squeeze(self._models[player](
self._root_wrapper.sequence_features[player])))
return tensor.numpy() if self._session is None else self._session(tensor)
def evaluate_and_update_policy(self, train_fn):
"""Performs a single step of policy evaluation and policy improvement.
Args:
train_fn: A (model, `tf.data.Dataset`) function that trains the given
regression model to accurately reproduce the x to y mapping given x-y
data.
Raises:
NotImplementedError: If not overridden by child class.
"""
raise NotImplementedError()
def current_policy(self):
"""Returns the current policy profile.
Returns:
A `dict<info state, list<Action, probability>>` that maps info state
strings to `Action`-probability pairs describing each player's policy.
"""
return self._root_wrapper.sequence_weights_to_tabular_profile(
self._sequence_weights())
def average_policy(self):
"""Returns the average of all policies iterated.
This average policy converges toward a Nash policy as the number of
iterations increases as long as the regret prediction error decreases
continually [Morrill, 2016].
The policy is computed using the accumulated policy probabilities computed
using `evaluate_and_update_policy`.
Returns:
A `dict<info state, list<Action, probability>>` that maps info state
strings to (Action, probability) pairs describing each player's policy.
"""
return self._root_wrapper.sequence_weights_to_tabular_profile(
self._cumulative_seq_probs)
def _previous_player(self, player):
"""The previous player in the turn ordering."""
return player - 1 if player > 0 else self._game.num_players() - 1
def _average_policy_update_player(self, regret_player):
"""The player for whom the average policy should be updated."""
return self._previous_player(regret_player)
class RcfrSolver(_RcfrSolver):
"""RCFR with an effectively infinite regret data buffer.
Exact or bootstrapped cumulative regrets are stored as if an infinitely
large data buffer. The average strategy is updated and stored in a full
game-size table. Reproduces the RCFR versions used in experiments by
Waugh et al. [2015] and Morrill [2016] except that this class does not
restrict the user to regression tree models.
"""
def __init__(self,
game,
models,
bootstrap=None,
truncate_negative=False,
session=None):
self._bootstrap = bootstrap
super(RcfrSolver, self).__init__(
game, models, truncate_negative=truncate_negative, session=session)
self._regret_targets = [
np.zeros(n) for n in self._root_wrapper.num_player_sequences
]
def evaluate_and_update_policy(self, train_fn):
"""Performs a single step of policy evaluation and policy improvement.
Args:
train_fn: A (model, `tf.data.Dataset`) function that trains the given
regression model to accurately reproduce the x to y mapping given x-y
data.
"""
sequence_weights = self._sequence_weights()
player_seq_features = self._root_wrapper.sequence_features
for regret_player in range(self._game.num_players()):
seq_prob_player = self._average_policy_update_player(regret_player)
regrets, seq_probs = (
self._root_wrapper.counterfactual_regrets_and_reach_weights(
regret_player, seq_prob_player, *sequence_weights))
if self._bootstrap:
self._regret_targets[regret_player][:] = sequence_weights[regret_player]
if self._truncate_negative:
regrets = np.maximum(-relu(self._regret_targets[regret_player]),
regrets)
self._regret_targets[regret_player] += regrets
self._cumulative_seq_probs[seq_prob_player] += seq_probs
targets = tf.expand_dims(self._regret_targets[regret_player], axis=1)
data = tf.data.Dataset.from_tensor_slices(
(player_seq_features[regret_player], targets))
regret_player_model = self._models[regret_player]
train_fn(regret_player_model, data)
sequence_weights[regret_player] = self._sequence_weights(regret_player)
class ReservoirBuffer(object):
"""A generic reservoir buffer data structure.
After every insertion, its contents represents a `size`-size uniform
random sample from the stream of candidates that have been encountered.
"""
def __init__(self, size):
self.size = size
self.num_elements = 0
self._buffer = np.full([size], None, dtype=object)
self._num_candidates = 0
@property
def buffer(self):
return self._buffer[:self.num_elements]
def insert(self, candidate):
"""Consider this `candidate` for inclusion in this sampling buffer."""
self._num_candidates += 1
if self.num_elements < self.size:
self._buffer[self.num_elements] = candidate
self.num_elements += 1
return
idx = np.random.choice(self._num_candidates)
if idx < self.size:
self._buffer[idx] = candidate
def insert_all(self, candidates):
"""Consider all `candidates` for inclusion in this sampling buffer."""
for candidate in candidates:
self.insert(candidate)
def num_available_spaces(self):
"""The number of freely available spaces in this buffer."""
return self.size - self.num_elements
class ReservoirRcfrSolver(_RcfrSolver):
"""RCFR with a reservoir buffer for storing regret data.
The average strategy is updated and stored in a full game-size table.
"""
def __init__(self,
game,
models,
buffer_size,
truncate_negative=False,
session=None):
self._buffer_size = buffer_size
super(ReservoirRcfrSolver, self).__init__(
game, models, truncate_negative=truncate_negative, session=session)
self._reservoirs = [
ReservoirBuffer(self._buffer_size) for _ in range(game.num_players())
]
def evaluate_and_update_policy(self, train_fn):
"""Performs a single step of policy evaluation and policy improvement.
Args:
train_fn: A (model, `tf.data.Dataset`) function that trains the given
regression model to accurately reproduce the x to y mapping given x-y
data.
"""
sequence_weights = self._sequence_weights()
player_seq_features = self._root_wrapper.sequence_features
for regret_player in range(self._game.num_players()):
seq_prob_player = self._average_policy_update_player(regret_player)
regrets, seq_probs = (
self._root_wrapper.counterfactual_regrets_and_reach_weights(
regret_player, seq_prob_player, *sequence_weights))
if self._truncate_negative:
regrets = np.maximum(-relu(sequence_weights[regret_player]), regrets)
next_data = list(
zip(player_seq_features[regret_player], tf.expand_dims(regrets, 1)))
self._reservoirs[regret_player].insert_all(next_data)
self._cumulative_seq_probs[seq_prob_player] += seq_probs
my_buffer = tuple(
tf.stack(a) for a in zip(*self._reservoirs[regret_player].buffer))
data = tf.data.Dataset.from_tensor_slices(my_buffer)
regret_player_model = self._models[regret_player]
train_fn(regret_player_model, data)
sequence_weights[regret_player] = self._sequence_weights(regret_player)
| open_spiel-master | open_spiel/python/algorithms/rcfr.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.cfr."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import outcome_sampling_mccfr
import pyspiel
# Convergence results change depending on
# the seed specified for running the tests.
# For this reason, test thresholds have been adapted
# taking the maximum Nash exploitability value obtained
# from multiple runs.
# For more details see https://github.com/deepmind/open_spiel/pull/458
SEED = 39823987
class OutcomeSamplingMCCFRTest(absltest.TestCase):
def test_outcome_sampling_leduc_2p(self):
np.random.seed(SEED)
game = pyspiel.load_game("leduc_poker")
os_solver = outcome_sampling_mccfr.OutcomeSamplingSolver(game)
for _ in range(10000):
os_solver.iteration()
conv = exploitability.nash_conv(game, os_solver.average_policy())
print("Leduc2P, conv = {}".format(conv))
self.assertLess(conv, 3.07)
def test_outcome_sampling_kuhn_2p(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker")
os_solver = outcome_sampling_mccfr.OutcomeSamplingSolver(game)
for _ in range(10000):
os_solver.iteration()
conv = exploitability.nash_conv(game, os_solver.average_policy())
print("Kuhn2P, conv = {}".format(conv))
self.assertLess(conv, 0.17)
# ensure that to_tabular() works on the returned policy
# and the tabular policy is equivalent
tabular_policy = os_solver.average_policy().to_tabular()
conv2 = exploitability.nash_conv(game, tabular_policy)
self.assertEqual(conv, conv2)
def test_outcome_sampling_kuhn_3p(self):
np.random.seed(SEED)
game = pyspiel.load_game("kuhn_poker", {"players": 3})
os_solver = outcome_sampling_mccfr.OutcomeSamplingSolver(game)
for _ in range(10000):
os_solver.iteration()
conv = exploitability.nash_conv(game, os_solver.average_policy())
print("Kuhn3P, conv = {}".format(conv))
self.assertLess(conv, 0.22)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/outcome_sampling_mccfr_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.cfr."""
from absl.testing import absltest
import numpy as np
from open_spiel.python import policy
from open_spiel.python.algorithms import expected_game_score
from open_spiel.python.algorithms import exploitability
from open_spiel.python.algorithms import fictitious_play
import pyspiel
class FictitiousPlayTest(absltest.TestCase):
def test_xfp(self):
game = pyspiel.load_game("kuhn_poker")
xfp_solver = fictitious_play.XFPSolver(game)
for _ in range(100):
xfp_solver.iteration()
average_policies = xfp_solver.average_policy_tables()
tabular_policy = policy.TabularPolicy(game)
for player_id in range(2):
for info_state, state_policy in average_policies[player_id].items():
policy_to_update = tabular_policy.policy_for_key(info_state)
for action, probability in state_policy.items():
policy_to_update[action] = probability
average_policy_values = expected_game_score.policy_value(
game.new_initial_state(), [tabular_policy, tabular_policy])
print("Kuhn 2P average values after 10 iterations")
print("P0: {}".format(average_policy_values[0]))
print("P1: {}".format(average_policy_values[1]))
self.assertIsNotNone(average_policy_values)
self.assertTrue(
np.allclose(average_policy_values, [-1 / 18, 1 / 18], atol=1e-3))
def test_meta_game_kuhn2p(self):
print("Kuhn 2p")
game = pyspiel.load_game("kuhn_poker")
xfp_solver = fictitious_play.XFPSolver(game, save_oracles=True)
for _ in range(3):
xfp_solver.iteration()
meta_games = xfp_solver.get_empirical_metagame(10, seed=1)
self.assertIsNotNone(meta_games)
# Metagame utility matrices for each player
for i in range(2):
print("player {}: \n{}".format(i + 1, meta_games[i]))
def test_meta_game_kuhn3p(self):
print("Kuhn 3p")
game = pyspiel.load_game("kuhn_poker", {"players": 3})
xfp_solver = fictitious_play.XFPSolver(game, save_oracles=True)
for _ in range(3):
xfp_solver.iteration()
meta_games = xfp_solver.get_empirical_metagame(10, seed=3)
self.assertIsNotNone(meta_games)
# Metagame utility tensors for each player
for i in range(3):
print("player {}: \n{}".format(i + 1, meta_games[i]))
def test_meta_game_kuhn4p(self):
print("Kuhn 4p")
game = pyspiel.load_game("kuhn_poker", {"players": 4})
xfp_solver = fictitious_play.XFPSolver(game, save_oracles=True)
for _ in range(3):
xfp_solver.iteration()
meta_games = xfp_solver.get_empirical_metagame(10, seed=1)
self.assertIsNotNone(meta_games)
# Metagame utility tensors for each player
for i in range(4):
print("player {}: \n{}".format(i + 1, meta_games[i]))
def test_meta_game_leduc2p(self):
print("Leduc 2p")
game = pyspiel.load_game("leduc_poker")
xfp_solver = fictitious_play.XFPSolver(game, save_oracles=True)
for _ in range(3):
xfp_solver.iteration()
meta_games = xfp_solver.get_empirical_metagame(10, seed=86487)
self.assertIsNotNone(meta_games)
# Metagame utility matrices for each player
for i in range(2):
print("player {}: \n{}".format(i + 1, meta_games[i]))
def test_matching_pennies_3p(self):
game = pyspiel.load_game_as_turn_based("matching_pennies_3p")
xfp_solver = fictitious_play.XFPSolver(game)
for i in range(1000):
xfp_solver.iteration()
if i % 10 == 0:
conv = exploitability.nash_conv(game, xfp_solver.average_policy())
print("FP in Matching Pennies 3p. Iter: {}, NashConv: {}".format(
i, conv))
def test_shapleys_game(self):
game = pyspiel.load_game_as_turn_based("matrix_shapleys_game")
xfp_solver = fictitious_play.XFPSolver(game)
for i in range(1000):
xfp_solver.iteration()
if i % 10 == 0:
conv = exploitability.nash_conv(game, xfp_solver.average_policy())
print("FP in Shapley's Game. Iter: {}, NashConv: {}".format(i, conv))
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/fictitious_play_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Nash averaging.
Based on https://arxiv.org/abs/1806.02643. An axiomatic strategy evaluation
metric for Agent-vs-Agent or Agent-vs-Task two-player zero-sum games.
"""
import cvxpy as cp
import numpy as np
from open_spiel.python.egt.utils import game_payoffs_array
def _max_entropy_symmetric_nash(p_mat, eps=1e-9):
"""Solves for the maxent symmetric nash for symmetric 2P zero-sum games.
Using convex programming:
min p^Tlog(p)
s.t.
p_mat.dot(p) <= 0, since game value must be 0
p >= 0
1^T * p = 1
Args:
p_mat: an N*N anti-symmetric payoff matrix for the row player
eps: minimum probability threshold
Returns:
p*: a maxent symmetric nash
"""
assert np.array_equal(p_mat, -p_mat.T) and eps >= 0 and eps <= 0.5
n = len(p_mat)
x = cp.Variable(shape=n)
obj = cp.Maximize(cp.sum(cp.entr(x)))
constraints = [p_mat @ x <= 0, x >= eps * np.ones(n)]
constraints.append(cp.sum(x) == 1)
prob = cp.Problem(obj, constraints)
prob.solve()
return x.value.reshape((-1, 1))
def _max_entropy_symmetric_nash_avt(p_mat, num_agents, num_tasks, eps=1e-9):
"""Solves for the maxent symmetric nash for symmetric 2P zero-sum games.
This covers the agent-vs-task cases.
Using convex programming:
min x^Tlog(x) + y^Tlog(y)
s.t.
x >= 0
1^T * x = 1
y >= 0
1^T * y = 1
forall s, such that s has exactly one unit mass on an agent strategy
and one unit mass on a task strategy,
s^T*p_mat*z <= 0, where z = [x, y], since game-value is 0.
Args:
p_mat: an N*N anti-symmetric payoff matrix for the row player
num_agents: number of agents
num_tasks: number of tasks
eps: minimum probability threshold
Returns:
(x*, y*): a maxent symmetric nash
"""
assert np.array_equal(p_mat, -p_mat.T) and eps >= 0 and eps <= 0.5
n = len(p_mat)
assert n == num_agents + num_tasks
x = cp.Variable(shape=num_agents)
y = cp.Variable(shape=num_tasks)
z = cp.hstack([x, y])
obj = cp.Maximize(cp.sum(cp.entr(z)))
constraints = [
x >= eps * np.ones(num_agents),
cp.sum(x) == 1,
y >= eps * np.ones(num_tasks),
cp.sum(y) == 1,
]
dev_payoffs = p_mat @ z
for a_idx in range(num_agents):
for t_idx in range(num_tasks):
pure_strategy = np.zeros(n)
pure_strategy[a_idx] = 1
pure_strategy[num_agents + t_idx] = 1
pure_strategy = pure_strategy.reshape((1, -1))
constraints.append(pure_strategy @ dev_payoffs <= 0)
prob = cp.Problem(obj, constraints)
prob.solve()
return x.value.reshape((-1, 1)), y.value.reshape((-1, 1))
def nash_averaging_avt_matrix(s_mat, eps=0.0):
"""Apply the agent-vs-task Nash Averaging from Appendix D, from a matrix.
Args:
s_mat: The S matrix from the paper, representing m rows (agents) and n
columns (tasks), with scores for the agent on the task. Note that the
values need not be normalized, but will be normalized across tasks before
being processed.
eps: minimum probability threshold.
Returns:
maxent_nash: nash mixture for row player and column player
nash_avg_score: the expected payoff under maxent_nash
"""
m, n = s_mat.shape
min_payoffs = np.min(s_mat, axis=0)
max_payoffs = np.max(s_mat, axis=0)
std_p_mat = (s_mat - min_payoffs) / (max_payoffs - min_payoffs)
a_mat = np.block([
[np.zeros(shape=(m, m)), std_p_mat],
[-std_p_mat.T, np.zeros(shape=(n, n))],
])
pa_sol, pe_sol = _max_entropy_symmetric_nash_avt(
a_mat, num_agents=m, num_tasks=n, eps=eps)
pa, pe = np.asarray(pa_sol), np.asarray(pe_sol)
return (pa, pe), (std_p_mat.dot(pe), -std_p_mat.T.dot(pa))
def nash_averaging(game, eps=0.0, a_v_a=True):
"""Nash averaging, see https://arxiv.org/abs/1806.02643.
Args:
game: a pyspiel game
eps: minimum probability mass for maxent nash
a_v_a: whether it is Agent-vs-Agent or Agent-vs-Task
Returns:
maxent_nash: nash mixture for row player and column player
nash_avg_score: the expected payoff under maxent_nash
"""
p_mat = game_payoffs_array(game)
if len(p_mat) != 2:
raise ValueError("Nash Averaging works only for two players.")
if np.max(np.abs(p_mat[0] + p_mat[1])) > 0:
raise ValueError("Must be zero-sum")
if a_v_a:
if not np.array_equal(p_mat[0], -p_mat[0].T):
raise ValueError(
"AvA only works for symmetric two-player zero-sum games.")
maxent_nash = np.array(_max_entropy_symmetric_nash(p_mat[0], eps=eps))
return maxent_nash, p_mat[0].dot(maxent_nash)
# For AvT, see appendix D of the paper.
# Here assumes the row player represents agents and the column player
# represents tasks.
# game does not have to be symmetric
return nash_averaging_avt_matrix(p_mat[0], eps=eps)
| open_spiel-master | open_spiel/python/algorithms/nash_averaging.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.mcts."""
import math
import random
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import evaluate_bots
from open_spiel.python.algorithms import mcts
import pyspiel
UCT_C = math.sqrt(2)
def _get_action(state, action_str):
for action in state.legal_actions():
if action_str == state.action_to_string(state.current_player(), action):
return action
raise ValueError("invalid action string: {}".format(action_str))
def search_tic_tac_toe_state(initial_actions):
game = pyspiel.load_game("tic_tac_toe")
state = game.new_initial_state()
for action_str in initial_actions.split(" "):
state.apply_action(_get_action(state, action_str))
rng = np.random.RandomState(42)
bot = mcts.MCTSBot(
game,
UCT_C,
max_simulations=10000,
solve=True,
random_state=rng,
evaluator=mcts.RandomRolloutEvaluator(n_rollouts=20, random_state=rng))
return bot.mcts_search(state), state
def make_node(action, player=0, prior=1, **kwargs):
node = mcts.SearchNode(action, player, prior)
for k, v in kwargs.items():
setattr(node, k, v)
return node
class MctsBotTest(absltest.TestCase):
def assertTTTStateStr(self, state, expected):
expected = expected.replace(" ", "").strip()
self.assertEqual(str(state), expected)
def test_can_play_tic_tac_toe(self):
game = pyspiel.load_game("tic_tac_toe")
max_simulations = 100
evaluator = mcts.RandomRolloutEvaluator(n_rollouts=20)
bots = [
mcts.MCTSBot(game, UCT_C, max_simulations, evaluator),
mcts.MCTSBot(game, UCT_C, max_simulations, evaluator),
]
v = evaluate_bots.evaluate_bots(game.new_initial_state(), bots, np.random)
self.assertEqual(v[0] + v[1], 0)
def test_can_play_both_sides(self):
game = pyspiel.load_game("tic_tac_toe")
bot = mcts.MCTSBot(game, UCT_C, max_simulations=100,
evaluator=mcts.RandomRolloutEvaluator(n_rollouts=20))
bots = [bot, bot]
v = evaluate_bots.evaluate_bots(game.new_initial_state(), bots, np.random)
self.assertEqual(v[0] + v[1], 0)
def test_can_play_single_player(self):
game = pyspiel.load_game("catch")
max_simulations = 100
evaluator = mcts.RandomRolloutEvaluator(n_rollouts=20)
bots = [mcts.MCTSBot(game, UCT_C, max_simulations, evaluator)]
v = evaluate_bots.evaluate_bots(game.new_initial_state(), bots, np.random)
self.assertGreater(v[0], 0)
def test_throws_on_simultaneous_game(self):
game = pyspiel.load_game("matrix_mp")
evaluator = mcts.RandomRolloutEvaluator(n_rollouts=20)
with self.assertRaises(ValueError):
mcts.MCTSBot(game, UCT_C, max_simulations=100, evaluator=evaluator)
def test_can_play_three_player_stochastic_games(self):
game = pyspiel.load_game("pig(players=3,winscore=20,horizon=30)")
max_simulations = 100
evaluator = mcts.RandomRolloutEvaluator(n_rollouts=5)
bots = [
mcts.MCTSBot(game, UCT_C, max_simulations, evaluator),
mcts.MCTSBot(game, UCT_C, max_simulations, evaluator),
mcts.MCTSBot(game, UCT_C, max_simulations, evaluator),
]
v = evaluate_bots.evaluate_bots(game.new_initial_state(), bots, np.random)
self.assertEqual(sum(v), 0)
def test_solve_draw(self):
root, state = search_tic_tac_toe_state("x(1,1) o(0,0) x(2,2)")
self.assertTTTStateStr(state, """
o..
.x.
..x
""")
self.assertEqual(root.outcome[root.player], 0)
for c in root.children:
self.assertLessEqual(c.outcome[c.player], 0) # No winning moves.
best = root.best_child()
self.assertEqual(best.outcome[best.player], 0)
self.assertIn(
state.action_to_string(best.player, best.action),
("o(0,2)", "o(2,0)")) # All others lose.
def test_solve_loss(self):
root, state = search_tic_tac_toe_state("x(1,1) o(0,0) x(2,2) o(0,1) x(0,2)")
self.assertTTTStateStr(state, """
oox
.x.
..x
""")
self.assertEqual(root.outcome[root.player], -1)
for c in root.children:
self.assertEqual(c.outcome[c.player], -1) # All losses.
def test_solve_win(self):
root, state = search_tic_tac_toe_state("x(0,1) o(2,2)")
self.assertTTTStateStr(state, """
.x.
...
..o
""")
self.assertEqual(root.outcome[root.player], 1)
best = root.best_child()
self.assertEqual(best.outcome[best.player], 1)
self.assertEqual(state.action_to_string(best.player, best.action), "x(0,2)")
def assertBestChild(self, choice, children):
# If this causes flakiness, the key in `SearchNode.best_child` is bad.
random.shuffle(children)
root = make_node(-1, children=children)
self.assertEqual(root.best_child().action, choice)
def test_choose_most_visited_when_not_solved(self):
self.assertBestChild(0, [
make_node(0, explore_count=50, total_reward=30),
make_node(1, explore_count=40, total_reward=40),
])
def test_choose_win_over_most_visited(self):
self.assertBestChild(1, [
make_node(0, explore_count=50, total_reward=30),
make_node(1, explore_count=40, total_reward=40, outcome=[1]),
])
def test_choose_best_over_good(self):
self.assertBestChild(1, [
make_node(0, explore_count=50, total_reward=30, outcome=[0.5]),
make_node(1, explore_count=40, total_reward=40, outcome=[0.8]),
])
def test_choose_bad_over_worst(self):
self.assertBestChild(0, [
make_node(0, explore_count=50, total_reward=30, outcome=[-0.5]),
make_node(1, explore_count=40, total_reward=40, outcome=[-0.8]),
])
def test_choose_positive_reward_over_promising(self):
self.assertBestChild(
1,
[
make_node(0, explore_count=50, total_reward=40), # more promising
make_node(1, explore_count=10, total_reward=1, outcome=[0.1
]), # solved
])
def test_choose_most_visited_over_loss(self):
self.assertBestChild(0, [
make_node(0, explore_count=50, total_reward=30),
make_node(1, explore_count=40, total_reward=40, outcome=[-1]),
])
def test_choose_most_visited_over_draw(self):
self.assertBestChild(0, [
make_node(0, explore_count=50, total_reward=30),
make_node(1, explore_count=40, total_reward=40, outcome=[0]),
])
def test_choose_uncertainty_over_most_visited_loss(self):
self.assertBestChild(1, [
make_node(0, explore_count=50, total_reward=30, outcome=[-1]),
make_node(1, explore_count=40, total_reward=40),
])
def test_choose_slowest_loss(self):
self.assertBestChild(1, [
make_node(0, explore_count=50, total_reward=10, outcome=[-1]),
make_node(1, explore_count=60, total_reward=15, outcome=[-1]),
])
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/mcts_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.