python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Monte-Carlo Tree Search algorithm for game play."""
import math
import time
import numpy as np
import pyspiel
class Evaluator(object):
"""Abstract class representing an evaluation function for a game.
The evaluation function takes in an intermediate state in the game and returns
an evaluation of that state, which should correlate with chances of winning
the game. It returns the evaluation from all player's perspectives.
"""
def evaluate(self, state):
"""Returns evaluation on given state."""
raise NotImplementedError
def prior(self, state):
"""Returns a probability for each legal action in the given state."""
raise NotImplementedError
class RandomRolloutEvaluator(Evaluator):
"""A simple evaluator doing random rollouts.
This evaluator returns the average outcome of playing random actions from the
given state until the end of the game. n_rollouts is the number of random
outcomes to be considered.
"""
def __init__(self, n_rollouts=1, random_state=None):
self.n_rollouts = n_rollouts
self._random_state = random_state or np.random.RandomState()
def evaluate(self, state):
"""Returns evaluation on given state."""
result = None
for _ in range(self.n_rollouts):
working_state = state.clone()
while not working_state.is_terminal():
if working_state.is_chance_node():
outcomes = working_state.chance_outcomes()
action_list, prob_list = zip(*outcomes)
action = self._random_state.choice(action_list, p=prob_list)
else:
action = self._random_state.choice(working_state.legal_actions())
working_state.apply_action(action)
returns = np.array(working_state.returns())
result = returns if result is None else result + returns
return result / self.n_rollouts
def prior(self, state):
"""Returns equal probability for all actions."""
if state.is_chance_node():
return state.chance_outcomes()
else:
legal_actions = state.legal_actions(state.current_player())
return [(action, 1.0 / len(legal_actions)) for action in legal_actions]
class SearchNode(object):
"""A node in the search tree.
A SearchNode represents a state and possible continuations from it. Each child
represents a possible action, and the expected result from doing so.
Attributes:
action: The action from the parent node's perspective. Not important for the
root node, as the actions that lead to it are in the past.
player: Which player made this action.
prior: A prior probability for how likely this action will be selected.
explore_count: How many times this node was explored.
total_reward: The sum of rewards of rollouts through this node, from the
parent node's perspective. The average reward of this node is
`total_reward / explore_count`
outcome: The rewards for all players if this is a terminal node or the
subtree has been proven, otherwise None.
children: A list of SearchNodes representing the possible actions from this
node, along with their expected rewards.
"""
__slots__ = [
"action",
"player",
"prior",
"explore_count",
"total_reward",
"outcome",
"children",
]
def __init__(self, action, player, prior):
self.action = action
self.player = player
self.prior = prior
self.explore_count = 0
self.total_reward = 0.0
self.outcome = None
self.children = []
def uct_value(self, parent_explore_count, uct_c):
"""Returns the UCT value of child."""
if self.outcome is not None:
return self.outcome[self.player]
if self.explore_count == 0:
return float("inf")
return self.total_reward / self.explore_count + uct_c * math.sqrt(
math.log(parent_explore_count) / self.explore_count)
def puct_value(self, parent_explore_count, uct_c):
"""Returns the PUCT value of child."""
if self.outcome is not None:
return self.outcome[self.player]
return ((self.explore_count and self.total_reward / self.explore_count) +
uct_c * self.prior * math.sqrt(parent_explore_count) /
(self.explore_count + 1))
def sort_key(self):
"""Returns the best action from this node, either proven or most visited.
This ordering leads to choosing:
- Highest proven score > 0 over anything else, including a promising but
unproven action.
- A proven draw only if it has higher exploration than others that are
uncertain, or the others are losses.
- Uncertain action with most exploration over loss of any difficulty
- Hardest loss if everything is a loss
- Highest expected reward if explore counts are equal (unlikely).
- Longest win, if multiple are proven (unlikely due to early stopping).
"""
return (0 if self.outcome is None else self.outcome[self.player],
self.explore_count, self.total_reward)
def best_child(self):
"""Returns the best child in order of the sort key."""
return max(self.children, key=SearchNode.sort_key)
def children_str(self, state=None):
"""Returns the string representation of this node's children.
They are ordered based on the sort key, so order of being chosen to play.
Args:
state: A `pyspiel.State` object, to be used to convert the action id into
a human readable format. If None, the action integer id is used.
"""
return "\n".join([
c.to_str(state)
for c in reversed(sorted(self.children, key=SearchNode.sort_key))
])
def to_str(self, state=None):
"""Returns the string representation of this node.
Args:
state: A `pyspiel.State` object, to be used to convert the action id into
a human readable format. If None, the action integer id is used.
"""
action = (
state.action_to_string(state.current_player(), self.action)
if state and self.action is not None else str(self.action))
return ("{:>6}: player: {}, prior: {:5.3f}, value: {:6.3f}, sims: {:5d}, "
"outcome: {}, {:3d} children").format(
action, self.player, self.prior, self.explore_count and
self.total_reward / self.explore_count, self.explore_count,
("{:4.1f}".format(self.outcome[self.player])
if self.outcome else "none"), len(self.children))
def __str__(self):
return self.to_str(None)
class MCTSBot(pyspiel.Bot):
"""Bot that uses Monte-Carlo Tree Search algorithm."""
def __init__(self,
game,
uct_c,
max_simulations,
evaluator,
solve=True,
random_state=None,
child_selection_fn=SearchNode.uct_value,
dirichlet_noise=None,
verbose=False,
dont_return_chance_node=False):
"""Initializes a MCTS Search algorithm in the form of a bot.
In multiplayer games, or non-zero-sum games, the players will play the
greedy strategy.
Args:
game: A pyspiel.Game to play.
uct_c: The exploration constant for UCT.
max_simulations: How many iterations of MCTS to perform. Each simulation
will result in one call to the evaluator. Memory usage should grow
linearly with simulations * branching factor. How many nodes in the
search tree should be evaluated. This is correlated with memory size and
tree depth.
evaluator: A `Evaluator` object to use to evaluate a leaf node.
solve: Whether to back up solved states.
random_state: An optional numpy RandomState to make it deterministic.
child_selection_fn: A function to select the child in the descent phase.
The default is UCT.
dirichlet_noise: A tuple of (epsilon, alpha) for adding dirichlet noise to
the policy at the root. This is from the alpha-zero paper.
verbose: Whether to print information about the search tree before
returning the action. Useful for confirming the search is working
sensibly.
dont_return_chance_node: If true, do not stop expanding at chance nodes.
Enabled for AlphaZero.
Raises:
ValueError: if the game type isn't supported.
"""
pyspiel.Bot.__init__(self)
# Check that the game satisfies the conditions for this MCTS implemention.
game_type = game.get_type()
if game_type.reward_model != pyspiel.GameType.RewardModel.TERMINAL:
raise ValueError("Game must have terminal rewards.")
if game_type.dynamics != pyspiel.GameType.Dynamics.SEQUENTIAL:
raise ValueError("Game must have sequential turns.")
self._game = game
self.uct_c = uct_c
self.max_simulations = max_simulations
self.evaluator = evaluator
self.verbose = verbose
self.solve = solve
self.max_utility = game.max_utility()
self._dirichlet_noise = dirichlet_noise
self._random_state = random_state or np.random.RandomState()
self._child_selection_fn = child_selection_fn
self.dont_return_chance_node = dont_return_chance_node
def restart_at(self, state):
pass
def step_with_policy(self, state):
"""Returns bot's policy and action at given state."""
t1 = time.time()
root = self.mcts_search(state)
best = root.best_child()
if self.verbose:
seconds = time.time() - t1
print("Finished {} sims in {:.3f} secs, {:.1f} sims/s".format(
root.explore_count, seconds, root.explore_count / seconds))
print("Root:")
print(root.to_str(state))
print("Children:")
print(root.children_str(state))
if best.children:
chosen_state = state.clone()
chosen_state.apply_action(best.action)
print("Children of chosen:")
print(best.children_str(chosen_state))
mcts_action = best.action
policy = [(action, (1.0 if action == mcts_action else 0.0))
for action in state.legal_actions(state.current_player())]
return policy, mcts_action
def step(self, state):
return self.step_with_policy(state)[1]
def _apply_tree_policy(self, root, state):
"""Applies the UCT policy to play the game until reaching a leaf node.
A leaf node is defined as a node that is terminal or has not been evaluated
yet. If it reaches a node that has been evaluated before but hasn't been
expanded, then expand it's children and continue.
Args:
root: The root node in the search tree.
state: The state of the game at the root node.
Returns:
visit_path: A list of nodes descending from the root node to a leaf node.
working_state: The state of the game at the leaf node.
"""
visit_path = [root]
working_state = state.clone()
current_node = root
while (not working_state.is_terminal() and
current_node.explore_count > 0) or (
working_state.is_chance_node() and self.dont_return_chance_node):
if not current_node.children:
# For a new node, initialize its state, then choose a child as normal.
legal_actions = self.evaluator.prior(working_state)
if current_node is root and self._dirichlet_noise:
epsilon, alpha = self._dirichlet_noise
noise = self._random_state.dirichlet([alpha] * len(legal_actions))
legal_actions = [(a, (1 - epsilon) * p + epsilon * n)
for (a, p), n in zip(legal_actions, noise)]
# Reduce bias from move generation order.
self._random_state.shuffle(legal_actions)
player = working_state.current_player()
current_node.children = [
SearchNode(action, player, prior) for action, prior in legal_actions
]
if working_state.is_chance_node():
# For chance nodes, rollout according to chance node's probability
# distribution
outcomes = working_state.chance_outcomes()
action_list, prob_list = zip(*outcomes)
action = self._random_state.choice(action_list, p=prob_list)
chosen_child = next(
c for c in current_node.children if c.action == action)
else:
# Otherwise choose node with largest UCT value
chosen_child = max(
current_node.children,
key=lambda c: self._child_selection_fn( # pylint: disable=g-long-lambda
c, current_node.explore_count, self.uct_c))
working_state.apply_action(chosen_child.action)
current_node = chosen_child
visit_path.append(current_node)
return visit_path, working_state
def mcts_search(self, state):
"""A vanilla Monte-Carlo Tree Search algorithm.
This algorithm searches the game tree from the given state.
At the leaf, the evaluator is called if the game state is not terminal.
A total of max_simulations states are explored.
At every node, the algorithm chooses the action with the highest PUCT value,
defined as: `Q/N + c * prior * sqrt(parent_N) / N`, where Q is the total
reward after the action, and N is the number of times the action was
explored in this position. The input parameter c controls the balance
between exploration and exploitation; higher values of c encourage
exploration of under-explored nodes. Unseen actions are always explored
first.
At the end of the search, the chosen action is the action that has been
explored most often. This is the action that is returned.
This implementation supports sequential n-player games, with or without
chance nodes. All players maximize their own reward and ignore the other
players' rewards. This corresponds to max^n for n-player games. It is the
norm for zero-sum games, but doesn't have any special handling for
non-zero-sum games. It doesn't have any special handling for imperfect
information games.
The implementation also supports backing up solved states, i.e. MCTS-Solver.
The implementation is general in that it is based on a max^n backup (each
player greedily chooses their maximum among proven children values, or there
exists one child whose proven value is game.max_utility()), so it will work
for multiplayer, general-sum, and arbitrary payoff games (not just win/loss/
draw games). Also chance nodes are considered proven only if all children
have the same value.
Some references:
- Sturtevant, An Analysis of UCT in Multi-Player Games, 2008,
https://web.cs.du.edu/~sturtevant/papers/multi-player_UCT.pdf
- Nijssen, Monte-Carlo Tree Search for Multi-Player Games, 2013,
https://project.dke.maastrichtuniversity.nl/games/files/phd/Nijssen_thesis.pdf
- Silver, AlphaGo Zero: Starting from scratch, 2017
https://deepmind.com/blog/article/alphago-zero-starting-scratch
- Winands, Bjornsson, and Saito, "Monte-Carlo Tree Search Solver", 2008.
https://dke.maastrichtuniversity.nl/m.winands/documents/uctloa.pdf
Arguments:
state: pyspiel.State object, state to search from
Returns:
The most visited move from the root node.
"""
root = SearchNode(None, state.current_player(), 1)
for _ in range(self.max_simulations):
visit_path, working_state = self._apply_tree_policy(root, state)
if working_state.is_terminal():
returns = working_state.returns()
visit_path[-1].outcome = returns
solved = self.solve
else:
returns = self.evaluator.evaluate(working_state)
solved = False
while visit_path:
# For chance nodes, walk up the tree to find the decision-maker.
decision_node_idx = -1
while visit_path[decision_node_idx].player == pyspiel.PlayerId.CHANCE:
decision_node_idx -= 1
# Chance node targets are for the respective decision-maker.
target_return = returns[visit_path[decision_node_idx].player]
node = visit_path.pop()
node.total_reward += target_return
node.explore_count += 1
if solved and node.children:
player = node.children[0].player
if player == pyspiel.PlayerId.CHANCE:
# Only back up chance nodes if all have the same outcome.
# An alternative would be to back up the weighted average of
# outcomes if all children are solved, but that is less clear.
outcome = node.children[0].outcome
if (outcome is not None and
all(np.array_equal(c.outcome, outcome) for c in node.children)):
node.outcome = outcome
else:
solved = False
else:
# If any have max utility (won?), or all children are solved,
# choose the one best for the player choosing.
best = None
all_solved = True
for child in node.children:
if child.outcome is None:
all_solved = False
elif best is None or child.outcome[player] > best.outcome[player]:
best = child
if (best is not None and
(all_solved or best.outcome[player] == self.max_utility)):
node.outcome = best.outcome
else:
solved = False
if root.outcome is not None:
break
return root
| open_spiel-master | open_spiel/python/algorithms/mcts.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes the value of a given policy."""
from typing import List, Union
import numpy as np
from open_spiel.python import policy
def _transitions(state, policies):
"""Returns iterator over (action, prob) from the given state."""
if state.is_chance_node():
return state.chance_outcomes()
elif state.is_simultaneous_node():
return policy.joint_action_probabilities(state, policies)
else:
player = state.current_player()
return policies[player].action_probabilities(state).items()
def policy_value(state,
policies: Union[List[policy.Policy], policy.Policy],
probability_threshold: float = 0):
"""Returns the expected values for the state for players following `policies`.
Computes the expected value of the`state` for each player, assuming player `i`
follows the policy given in `policies[i]`.
Args:
state: A `pyspiel.State`.
policies: A `list` of `policy.Policy` objects, one per player for sequential
games, one policy for simulatenous games.
probability_threshold: only sum over entries with prob greater than this
(default: 0).
Returns:
A `numpy.array` containing the expected value for each player.
"""
if state.is_terminal():
return np.array(state.returns())
else:
return sum(prob * policy_value(policy.child(state, action), policies)
for action, prob in _transitions(state, policies)
if prob > probability_threshold)
| open_spiel-master | open_spiel/python/algorithms/expected_game_score.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MIP-Nash.
Based on the first formulation of
https://dl.acm.org/doi/10.5555/1619410.1619413.
Compute optimal Nash equilibrium of two-player general-sum games
by solving a mixed-integer programming problem.
"""
import cvxpy as cp
import numpy as np
from open_spiel.python.algorithms.projected_replicator_dynamics import _simplex_projection
from open_spiel.python.egt.utils import game_payoffs_array
def mip_nash(game, objective, solver='GLPK_MI'):
"""Solves for the optimal Nash for two-player general-sum games.
Using mixed-integer programming:
min f(x_0, x_1, p_mat)
s.t.
(u_0, u_1 are Nash payoffs variables of player 0 and 1)
p_mat[0] * x_1 <= u_0
x_0^T*p_mat[1] <= u_1
(if a pure strategy is in the support then its payoff is Nash payoff)
u_0 - p_mat[0] * x_1 <= u_max_0 * b_0
u_1 - x_0^T*p_mat[1] <= u_max_1 * b_1
(if a pure strategy is not in the support its probability mass is 0)
x_0 <= 1 - b_0
x_1 <= 1 - b_1
(probability constraints)
x_0 >= 0
1^T * x_0 = 1
x_1 >= 0
1^T * x_1 = 1
for all n, b_0[n] in {0, 1},
for all m, b_1[m] in {0, 1},
u_max_0, u_max_1 are the maximum payoff differences of player 0 and 1.
Note: this formulation is a basic one that may only work well
for simple objective function or low-dimensional inputs.
GLPK_MI solver only handles linear objective.
To handle nonlinear and high-dimensional cases,
it is recommended to use advance solvers such as GUROBI,
or use a piecewise linear approximation of the objective.
Args:
game: a pyspiel matrix game object
objective: a string representing the objective (e.g., MAX_SOCIAL_WELFARE)
solver: the mixed-integer solver used by cvxpy
Returns:
optimal Nash (x_0, x_1)
"""
p_mat = game_payoffs_array(game)
if len(p_mat) != 2:
raise ValueError('MIP-Nash only works for two players.')
assert len(p_mat) == 2
assert p_mat[0].shape == p_mat[1].shape
(m_0, m_1) = p_mat[0].shape
u_max_0 = np.max(p_mat[0]) - np.min(p_mat[0])
u_max_1 = np.max(p_mat[1]) - np.min(p_mat[1])
x_0 = cp.Variable(m_0)
x_1 = cp.Variable(m_1)
u_0 = cp.Variable(1)
u_1 = cp.Variable(1)
b_0 = cp.Variable(m_0, boolean=True)
b_1 = cp.Variable(m_1, boolean=True)
u_m = p_mat[0] @ x_1
u_n = x_0 @ p_mat[1]
# probabilities constraints
constraints = [x_0 >= 0, x_1 >= 0, cp.sum(x_0) == 1, cp.sum(x_1) == 1]
# support constraints
constraints.extend([u_m <= u_0, u_0 - u_m <= u_max_0 * b_0, x_0 <= 1 - b_0])
constraints.extend([u_n <= u_1, u_1 - u_n <= u_max_1 * b_1, x_1 <= 1 - b_1])
variables = {
'x_0': x_0,
'x_1': x_1,
'u_0': u_0,
'u_1': u_1,
'b_0': b_0,
'b_1': b_1,
'p_mat': p_mat,
}
obj = TWO_PLAYER_OBJECTIVE[objective](variables)
prob = cp.Problem(obj, constraints)
prob.solve(solver=solver)
return _simplex_projection(x_0.value.reshape(-1)), _simplex_projection(
x_1.value.reshape(-1)
)
def max_social_welfare_two_player(variables):
"""Max social welfare objective."""
return cp.Maximize(variables['u_0'] + variables['u_1'])
def min_social_welfare_two_player(variables):
"""Min social welfare objective."""
return cp.Minimize(variables['u_0'] + variables['u_1'])
def max_support_two_player(variables):
"""Max support objective."""
return cp.Minimize(cp.sum(variables['b_0']) + cp.sum(variables['b_1']))
def min_support_two_player(variables):
"""Min support objective."""
return cp.Maximize(cp.sum(variables['b_0']) + cp.sum(variables['b_1']))
def max_gini_two_player(variables):
"""Max gini objective."""
return cp.Minimize(
cp.sum(cp.square(variables['x_0'])) + cp.sum(cp.square(variables['x_1']))
)
TWO_PLAYER_OBJECTIVE = {
'MAX_SOCIAL_WELFARE': max_social_welfare_two_player,
'MIN_SOCIAL_WELFARE': min_social_welfare_two_player,
'MAX_SUPPORT': max_support_two_player,
'MIN_SUPPORT': min_support_two_player,
'MAX_GINI': max_gini_two_player,
}
| open_spiel-master | open_spiel/python/algorithms/mip_nash.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perturbates any policy with tabular-saved, fixed noise.
The policy's probabilities P' on each state s are computed as
P'(s) = alpha * epsilon + (1-alpha) * P(s),
with P the former policy's probabilities, and epsilon ~ Softmax(beta *
Uniform)
"""
import numpy as np
from open_spiel.python import policy as openspiel_policy
class NoisyPolicy(openspiel_policy.Policy):
"""Pyspiel Best Response with added noise.
This policy's probabilities P' on each `player_id` state s is computed as
P'(s) = alpha * epsilon + (1-alpha) * P(s),
with P the former policy's probabilities, and epsilon ~ Softmax(beta *
Uniform)
"""
def __init__(self, policy, player_id=None, alpha=0.1, beta=1.0):
"""Initializes the noisy policy.
Note that this noise only affects `player_id`.
Args:
policy: Any OpenSpiel `policy.Policy` object.
player_id: The player id, the policy of whom will be made noisy. If `None`
noise will be added to the policies for all players.
alpha: Mixing noise factor.
beta: Softmax inverse temperature factor.
"""
self._policy = policy
self.game = policy.game
self.game_type = self.game.get_type()
self.player_id = player_id
self._noise_dict = {}
self._alpha = alpha
self._beta = beta
def _state_key(self, state, player):
"""Returns the key to use to look up this (state, player) pair."""
if self.game_type.provides_information_state_string:
if player is None:
return state.information_state_string()
else:
return state.information_state_string(player)
elif self.game_type.provides_observation_string:
if player is None:
return state.observation_string()
else:
return state.observation_string(player)
else:
return str(state)
def get_or_create_noise(self, state, player_id=None):
"""Get noisy policy or create it and return it.
Args:
state: the state to which the policy will be applied.
player_id: the player id that will apply the noisy policy. Default to
current_player. Should be defined in the case of simultaneous games.
Returns:
noise_action_probs: The noisy probability distribution on the set of legal
actions.
"""
if player_id is None:
player_id = state.current_player()
info_state = self._state_key(state, player_id)
if info_state not in self._noise_dict:
action_ids = state.legal_actions(player_id)
noise = self._beta * np.random.normal(size=len(action_ids))
noise = np.exp(noise - noise.max())
noise /= np.sum(noise)
self._noise_dict[info_state] = {
action_ids[i]: noise[i] for i in range(len(noise))
}
return self._noise_dict[info_state]
def mix_probs(self, probs, noise_probs):
return {
i: (1 - self._alpha) * probs[i] + self._alpha * noise_probs[i]
for i in probs
}
@property
def policy(self):
return self._policy
def action_probabilities(self, state, player_id=None):
"""Returns the policy for a player in a state.
Args:
state: A `pyspiel.State` object.
player_id: Optional, the player id for whom we want an action. Optional
unless this is a simultabeous state at which multiple players can act.
Returns:
A `dict` of `{action: probability}` for the specified player in the
supplied state.
"""
# If self._player_id is None, or if self.player_id == current_player, add
# noise.
if ((self.player_id is None) or
(state.current_player() == self.player_id) or
(player_id == self.player_id)):
noise_probs = self.get_or_create_noise(state, player_id)
probs = self._policy.action_probabilities(state, player_id)
probs = self.mix_probs(probs, noise_probs)
return probs
# Send the default probabilities for all other players
return self._policy.action_probabilities(state, player_id)
| open_spiel-master | open_spiel/python/algorithms/noisy_policy.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural Replicator Dynamics [Omidshafiei et al, 2019].
A policy gradient-like extension to replicator dynamics and the hedge algorithm
that incorporates function approximation.
# References
Shayegan Omidshafiei, Daniel Hennes, Dustin Morrill, Remi Munos,
Julien Perolat, Marc Lanctot, Audrunas Gruslys, Jean-Baptiste Lespiau,
Karl Tuyls. Neural Replicator Dynamics. https://arxiv.org/abs/1906.00190.
2019.
"""
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python.algorithms import rcfr
# Temporarily disable TF2 behavior while the code is not updated.
tf.disable_v2_behavior()
def thresholded(logits, regrets, threshold=2.0):
"""Zeros out `regrets` where `logits` are too negative or too large."""
can_decrease = tf.cast(tf.greater(logits, -threshold), tf.float32)
can_increase = tf.cast(tf.less(logits, threshold), tf.float32)
regrets_negative = tf.minimum(regrets, 0.0)
regrets_positive = tf.maximum(regrets, 0.0)
return can_decrease * regrets_negative + can_increase * regrets_positive
@tf.function
def train(model,
data,
batch_size,
step_size=1.0,
threshold=2.0,
random_shuffle_size=None,
autoencoder_loss=None):
"""Train NeuRD `model` on `data`."""
if random_shuffle_size is None:
random_shuffle_size = 10 * batch_size
data = data.shuffle(random_shuffle_size)
data = data.batch(batch_size)
data = data.repeat(1)
for x, regrets in data:
with tf.GradientTape() as tape:
output = model(x, training=True)
logits = output[:, :1]
logits = logits - tf.reduce_mean(logits, keepdims=True)
regrets = tf.stop_gradient(
thresholded(logits, regrets, threshold=threshold))
utility = tf.reduce_mean(logits * regrets)
if autoencoder_loss is not None:
utility = utility - autoencoder_loss(x, output[:, 1:])
grad = tape.gradient(utility, model.trainable_variables)
for i, var in enumerate(model.trainable_variables):
var.assign_add(step_size * grad[i])
class DeepNeurdModel(object):
"""A flexible deep feedforward NeuRD model class.
Properties:
layers: The `tf.keras.Layer` layers describing this model.
trainable_variables: The trainable `tf.Variable`s in this model's `layers`.
losses: This model's layer specific losses (e.g. regularizers).
"""
def __init__(self,
game,
num_hidden_units,
num_hidden_layers=1,
num_hidden_factors=0,
hidden_activation=tf.nn.relu,
use_skip_connections=False,
regularizer=None,
autoencode=False):
"""Creates a new `DeepNeurdModel.
Args:
game: The OpenSpiel game being solved.
num_hidden_units: The number of units in each hidden layer.
num_hidden_layers: The number of hidden layers. Defaults to 1.
num_hidden_factors: The number of hidden factors or the matrix rank of the
layer. If greater than zero, hidden layers will be split into two
separate linear transformations, the first with
`num_hidden_factors`-columns and the second with
`num_hidden_units`-columns. The result is that the logical hidden layer
is a rank-`num_hidden_units` matrix instead of a rank-`num_hidden_units`
matrix. When `num_hidden_units < num_hidden_units`, this is effectively
implements weight sharing. Defaults to 0.
hidden_activation: The activation function to apply over hidden layers.
Defaults to `tf.nn.relu`.
use_skip_connections: Whether or not to apply skip connections (layer
output = layer(x) + x) on hidden layers. Zero padding or truncation is
used to match the number of columns on layer inputs and outputs.
regularizer: A regularizer to apply to each layer. Defaults to `None`.
autoencode: Whether or not to output a reconstruction of the inputs upon
being called. Defaults to `False`.
"""
self._autoencode = autoencode
self._use_skip_connections = use_skip_connections
self._hidden_are_factored = num_hidden_factors > 0
self.layers = []
for _ in range(num_hidden_layers):
if self._hidden_are_factored:
self.layers.append(
tf.keras.layers.Dense(
num_hidden_factors,
use_bias=True,
kernel_regularizer=regularizer))
self.layers.append(
tf.keras.layers.Dense(
num_hidden_units,
use_bias=True,
activation=hidden_activation,
kernel_regularizer=regularizer))
self.layers.append(
tf.keras.layers.Dense(
1 + self._autoencode * rcfr.num_features(game),
use_bias=True,
kernel_regularizer=regularizer))
# Construct variables for all layers by exercising the network.
x = tf.zeros([1, rcfr.num_features(game)])
for layer in self.layers:
x = layer(x)
self.trainable_variables = sum(
[layer.trainable_variables for layer in self.layers], [])
self.losses = sum([layer.losses for layer in self.layers], [])
def __call__(self, x, training=False):
"""Evaluates this model on x.
Args:
x: Model input.
training: Whether or not this is being called during training. If
`training` and the constructor argument `autoencode` was `True`, then
the output will contain the estimated regrets concatenated with a
reconstruction of the input, otherwise only regrets will be returned.
Defaults to `False`.
Returns:
The `tf.Tensor` resulting from evaluating this model on `x`. If
`training` and the constructor argument `autoencode` was `True`, then
it will contain the estimated regrets concatenated with a
reconstruction of the input, otherwise only regrets will be returned.
"""
y = rcfr.feedforward_evaluate(
layers=self.layers,
x=x,
use_skip_connections=self._use_skip_connections,
hidden_are_factored=self._hidden_are_factored)
return y if training else y[:, :1]
class CounterfactualNeurdSolver(object):
"""All-actions, strong NeuRD on counterfactual regrets.
No regularization bonus is applied, so the current policy likely will not
converge. The average policy profile is updated and stored in a full
game-size table and may converge to an approximate Nash equilibrium in
two-player, zero-sum games.
"""
def __init__(self, game, models, session=None):
"""Creates a new `CounterfactualNeurdSolver`.
Args:
game: An OpenSpiel `Game`.
models: Current policy models (optimizable array-like -> `tf.Tensor`
callables) for both players.
session: A TensorFlow `Session` to convert sequence weights from
`tf.Tensor`s produced by `models` to `np.array`s. If `None`, it is
assumed that eager mode is enabled. Defaults to `None`.
"""
self._game = game
self._models = models
self._root_wrapper = rcfr.RootStateWrapper(game.new_initial_state())
self._session = session
self._cumulative_seq_probs = [
np.zeros(n) for n in self._root_wrapper.num_player_sequences
]
def _sequence_weights(self, player=None):
"""Returns exponentiated weights for each sequence as an `np.array`."""
if player is None:
return [
self._sequence_weights(player)
for player in range(self._game.num_players())
]
else:
tensor = tf.squeeze(self._models[player](
self._root_wrapper.sequence_features[player]))
tensor = tensor - tf.reduce_max(tensor, keepdims=True)
tensor = tf.math.exp(tensor)
return tensor.numpy() if self._session is None else self._session(tensor)
def current_policy(self):
"""Returns the current policy profile.
Returns:
A `dict<info state, list<Action, probability>>` that maps info state
strings to `Action`-probability pairs describing each player's policy.
"""
return self._root_wrapper.sequence_weights_to_tabular_profile(
self._sequence_weights())
def average_policy(self):
"""Returns the average of all policies iterated.
The policy is computed using the accumulated policy probabilities computed
using `evaluate_and_update_policy`.
Returns:
A `dict<info state, list<Action, probability>>` that maps info state
strings to (Action, probability) pairs describing each player's policy.
"""
return self._root_wrapper.sequence_weights_to_tabular_profile(
self._cumulative_seq_probs)
def _previous_player(self, player):
"""The previous player in the turn ordering."""
return player - 1 if player > 0 else self._game.num_players() - 1
def _average_policy_update_player(self, regret_player):
"""The player for whom the average policy should be updated."""
return self._previous_player(regret_player)
def evaluate_and_update_policy(self, train_fn):
"""Performs a single step of policy evaluation and policy improvement.
Args:
train_fn: A (model, `tf.data.Dataset`) function that trains the given
regression model to accurately reproduce the x to y mapping given x-y
data.
"""
sequence_weights = self._sequence_weights()
player_seq_features = self._root_wrapper.sequence_features
for regret_player in range(self._game.num_players()):
seq_prob_player = self._average_policy_update_player(regret_player)
regrets, seq_probs = (
self._root_wrapper.counterfactual_regrets_and_reach_weights(
regret_player, seq_prob_player, *sequence_weights))
self._cumulative_seq_probs[seq_prob_player] += seq_probs
targets = tf.expand_dims(regrets.astype('float32'), axis=1)
data = tf.data.Dataset.from_tensor_slices(
(player_seq_features[regret_player], targets))
regret_player_model = self._models[regret_player]
train_fn(regret_player_model, data)
sequence_weights[regret_player] = self._sequence_weights(regret_player)
| open_spiel-master | open_spiel/python/algorithms/neurd.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for ResponseGraphUCB."""
import itertools
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
from open_spiel.python.algorithms import fictitious_play
from open_spiel.python.egt import utils as egt_utils
import pyspiel
from open_spiel.python.utils import file_utils
def get_method_tuple_acronym(method_tuple):
"""Returns pretty acronym for specified ResponseGraphUCB method tuple."""
if isinstance(method_tuple, tuple):
acronyms = [get_method_acronym(m) for m in method_tuple]
return ', '.join(acronyms)
else:
return get_method_acronym(method_tuple)
def get_method_tuple_linespecs(method):
"""Gets plot linespecs for the specified ResponseGraphUCB method."""
sampling_strats = [
'uniform-exhaustive', 'uniform', 'valence-weighted', 'count-weighted'
]
conf_methods = ['ucb-standard', 'clopper-pearson-ucb']
method_to_id_map = dict(
(m, i)
for i, m in enumerate(itertools.product(sampling_strats, conf_methods)))
# Create palette
num_colors = len(method_to_id_map.keys())
colors = plt.get_cmap('Set1', num_colors).colors
# Spec out the linestyle
base_method = (method[0], method[1].replace('-relaxed', '')
) # Method name without -relaxed suffix
linespecs = {
'color': colors[method_to_id_map[base_method]]
} # Use base method for color (ignoring relaxed vs non-relaxed)
if 'relaxed' in method[1]: # Use actual method for linestyle
linespecs['linestyle'] = 'dashed'
else:
linespecs['linestyle'] = 'solid'
return linespecs
def get_method_acronym(method):
"""Gets pretty acronym for specified ResponseGraphUCB method."""
if method == 'uniform-exhaustive':
return r'$\mathcal{S}$: UE'
elif method == 'uniform':
return r'$\mathcal{S}$: U'
elif method == 'valence-weighted':
return r'$\mathcal{S}$: VW'
elif method == 'count-weighted':
return r'$\mathcal{S}$: CW'
elif method == 'ucb-standard':
return r'$\mathcal{C}(\delta)$: UCB'
elif method == 'ucb-standard-relaxed':
return r'$\mathcal{C}(\delta)$: R-UCB'
elif method == 'clopper-pearson-ucb':
return r'$\mathcal{C}(\delta)$: CP-UCB'
elif method == 'clopper-pearson-ucb-relaxed':
return r'$\mathcal{C}(\delta)$: R-CP-UCB'
elif method == 'fixedbudget-uniform':
return r'$\mathcal{S}$: U, $\mathcal{C}(\delta)$: FB'
else:
raise ValueError('Unknown sampler method: {}!'.format(method))
def digraph_edge_hamming_dist(g1, g2):
"""Returns number of directed edge mismatches between digraphs g1 and g2."""
dist = 0
for e1 in g1.edges:
if e1 not in g2.edges:
dist += 1
return dist
class BernoulliGameSampler(object):
"""A sampler for a game with Bernoulli-distributed payoffs."""
def __init__(self, strategy_spaces, means, payoff_bounds):
"""Initializes the Bernoulli game sampler.
Payoffs are automatically scaled to lie between 0 and 1.
Args:
strategy_spaces: a list of sizes of player strategy spaces.
means: 1+num_players dimensional array of mean payoffs.
payoff_bounds: min/max observable value of payoffs, necessary since one
may seek Bernoulli-sampling for games with different payoff ranges.
"""
self.strategy_spaces = strategy_spaces
self.n_players = len(strategy_spaces)
self.raw_means = means
self.payoff_bounds = payoff_bounds
self.means = self.rescale_payoff(means)
# Specific to the Bernoulli case. Probas in [0,1], proportional to payoffs
self.p_max = self.means
def rescale_payoff(self, payoff):
"""Rescales payoffs to be in [0,1]."""
# Assumes payoffs are bounded between [-payoff_bound, payoff_bound]
return (payoff - self.payoff_bounds[0]) / (
self.payoff_bounds[1] - self.payoff_bounds[0])
def observe_result(self, strat_profile):
"""Returns empirical payoffs for each agent."""
outcomes = np.zeros(self.n_players)
for k in range(self.n_players):
# compute Bernoulli probabilities
outcomes[k] = np.random.choice(
[1, 0],
p=[self.p_max[k][strat_profile], 1. - self.p_max[k][strat_profile]])
return outcomes
class ZeroSumBernoulliGameSampler(BernoulliGameSampler):
"""A sampler for a zero-sum game with Bernoulli-distributed payoffs."""
def __init__(self, strategy_spaces, means, payoff_bounds):
super(ZeroSumBernoulliGameSampler, self).__init__(strategy_spaces, means,
payoff_bounds)
# Verify the game is zero-sum
assert np.allclose(np.sum(self.means, axis=0), 1.)
def observe_result(self, strat_profile):
outcomes = np.zeros(self.n_players)
win_ix = np.random.choice(
self.n_players, p=self.means[(slice(None),) + strat_profile])
outcomes[win_ix] = 1.
return outcomes
def get_payoffs_bernoulli_game(size=(2, 2, 2)):
"""Gets randomly-generated zero-sum symmetric two-player game."""
too_close = True
while too_close:
M = np.random.uniform(-1, 1, size=size) # pylint: disable=invalid-name
M[0, :, :] = 0.5 * (M[0, :, :] - M[0, :, :].T)
M[1, :, :] = -M[0, :, :]
if np.abs(M[0, 0, 1]) < 0.1:
too_close = True
else:
too_close = False
return M
def get_soccer_data():
"""Returns the payoffs and strategy labels for MuJoCo soccer experiments."""
payoff_file = file_utils.find_file(
'open_spiel/data/paper_data/response_graph_ucb/soccer.txt', 2)
payoffs = np.loadtxt(payoff_file)
return payoffs
def get_kuhn_poker_data(num_players=4, iterations=3):
"""Returns the kuhn poker data for the number of players specified."""
game = pyspiel.load_game('kuhn_poker', {'players': num_players})
xfp_solver = fictitious_play.XFPSolver(game, save_oracles=True)
for _ in range(iterations):
xfp_solver.iteration()
# Results are seed-dependent, so show some interesting cases
if num_players == 2:
meta_games = xfp_solver.get_empirical_metagame(100, seed=1)
elif num_players == 3:
meta_games = xfp_solver.get_empirical_metagame(100, seed=5)
elif num_players == 4:
meta_games = xfp_solver.get_empirical_metagame(100, seed=2)
# Metagame utility matrices for each player
payoff_tables = []
for i in range(num_players):
payoff_tables.append(meta_games[i])
return payoff_tables
def get_game_for_sampler(game_name):
"""Returns pre-processed game data for ResponseGraphUCB examples."""
# pylint: disable=invalid-name
if game_name == 'bernoulli':
M = get_payoffs_bernoulli_game()
strategy_spaces = [2, 2]
G = ZeroSumBernoulliGameSampler(
strategy_spaces, means=M, payoff_bounds=[-1., 1.])
elif game_name == 'soccer':
M = get_soccer_data()
M = M * 2. - 1 # Convert to zero-sum
strategy_spaces = np.shape(M)
M = np.asarray([M, M.T])
G = ZeroSumBernoulliGameSampler(strategy_spaces, means=M,
payoff_bounds=[np.min(M), np.max(M)])
elif game_name in ['kuhn_poker_2p', 'kuhn_poker_3p', 'kuhn_poker_4p']:
if '2p' in game_name:
num_players = 2
elif '3p' in game_name:
num_players = 3
elif '4p' in game_name:
num_players = 4
M = get_kuhn_poker_data(num_players, iterations=2) # pylint: disable=invalid-name
strategy_spaces = egt_utils.get_num_strats_per_population(M, False)
G = BernoulliGameSampler(
strategy_spaces, means=M, payoff_bounds=[np.min(M), np.max(M)])
else:
raise ValueError('Game', game_name, 'not implemented!')
# pylint: enable=invalid-name
return G
def plot_timeseries(ax, id_ax, data, xticks, xlabel='', ylabel='',
label='', logx=False, logy=False, zorder=10,
linespecs=None):
"""Plots timeseries data with error bars."""
if logx:
ax[id_ax].set_xscale('log')
if logy:
ax[id_ax].set_yscale('log')
if linespecs:
kwargs = {'color': linespecs['color']}
else:
kwargs = {}
# Seaborn's bootstrapped confidence intervals were used in the original paper
se = scipy.stats.sem(data, axis=0)
ax[id_ax].fill_between(xticks, data.mean(0)-se, data.mean(0)+se,
zorder=zorder, alpha=0.2, **kwargs)
ax[id_ax].plot(xticks, data.mean(0), label=label, zorder=zorder, **kwargs)
# There may be multiple lines on the current axis, some from previous calls to
# plot_timeseries, so reference just the latest
if linespecs:
ax[id_ax].get_lines()[-1].set_dashes([5, 5])
ax[id_ax].get_lines()[-1].set_linestyle(linespecs['linestyle'])
ax[id_ax].set(xlabel=xlabel, ylabel=ylabel)
ax[id_ax].set_axisbelow(True)
ax[id_ax].grid(True)
for _, spine in ax[id_ax].spines.items():
spine.set_zorder(-1)
| open_spiel-master | open_spiel/python/algorithms/response_graph_ucb_utils.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds useful functions for working with dictionaries representing policies."""
from open_spiel.python.algorithms import get_all_states
def policy_to_dict(player_policy,
game,
all_states=None,
state_to_information_state=None):
"""Converts a Policy instance into a tabular policy represented as a dict.
This is compatible with the C++ TabularExploitability code (i.e.
pyspiel.exploitability, pyspiel.TabularBestResponse, etc.).
While you do not have to pass the all_states and state_to_information_state
arguments, creating them outside of this funciton will speed your code up
dramatically.
Args:
player_policy: The policy you want to convert to a dict.
game: The game the policy is for.
all_states: The result of calling get_all_states.get_all_states. Can be
cached for improved performance.
state_to_information_state: A dict mapping str(state) to
state.information_state for every state in the game. Can be cached for
improved performance.
Returns:
A dictionary version of player_policy that can be passed to the C++
TabularBestResponse, Exploitability, and BestResponse functions/classes.
"""
if all_states is None:
all_states = get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False)
state_to_information_state = {
state: all_states[state].information_state_string()
for state in all_states
}
tabular_policy = dict()
for state in all_states:
information_state = state_to_information_state[state]
tabular_policy[information_state] = list(
player_policy.action_probabilities(all_states[state]).items())
return tabular_policy
def get_best_response_actions_as_string(best_response_actions):
"""Turns a dict<bytes, int> into a bytestring compatible with C++.
i.e. the bytestring can be copy-pasted as the brace initialization for a
{std::unordered_,std::,absl::flat_hash_}map<std::string, int>.
Args:
best_response_actions: A dict mapping bytes to ints.
Returns:
A bytestring that can be copy-pasted to brace-initialize a C++
std::map<std::string, T>.
"""
best_response_keys = sorted(best_response_actions.keys())
best_response_strings = [
"%s: %i" % (k, best_response_actions[k]) for k in best_response_keys
]
return "{%s}" % (", ".join(best_response_strings))
def tabular_policy_to_cpp_map(policy):
"""Turns a policy into a C++ compatible bytestring for brace-initializing.
Args:
policy: A dict representing a tabular policy. The keys are infostate
bytestrings.
Returns:
A bytestring that can be copy-pasted to brace-initialize a C++
std::map<std::string, open_spiel::ActionsAndProbs>.
"""
cpp_entries = []
policy_keys = sorted(policy.keys())
for key in policy_keys:
tuple_strs = ["{%i, %s}" % (p[0], p[1].astype(str)) for p in policy[key]]
value = "{" + ", ".join(tuple_strs) + "}"
cpp_entries.append('{"%s", %s}' % (key, value))
return "{%s}" % (",\n".join(cpp_entries))
| open_spiel-master | open_spiel/python/algorithms/policy_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.tabular_qlearner."""
from absl.testing import absltest
import numpy as np
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import tabular_qlearner
import pyspiel
# Fixed seed to make test non stochastic.
SEED = 10000
# A simple two-action game encoded as an EFG game. Going left gets -1, going
# right gets a +1.
SIMPLE_EFG_DATA = """
EFG 2 R "Simple single-agent problem" { "Player 1" } ""
p "ROOT" 1 1 "ROOT" { "L" "R" } 0
t "L" 1 "Outcome L" { -1.0 }
t "R" 2 "Outcome R" { 1.0 }
"""
class QlearnerTest(absltest.TestCase):
def test_simple_game(self):
game = pyspiel.load_efg_game(SIMPLE_EFG_DATA)
env = rl_environment.Environment(game=game)
agent = tabular_qlearner.QLearner(0, game.num_distinct_actions())
total_reward = 0
for _ in range(100):
total_eval_reward = 0
for _ in range(1000):
time_step = env.reset()
while not time_step.last():
agent_output = agent.step(time_step)
time_step = env.step([agent_output.action])
total_reward += time_step.rewards[0]
agent.step(time_step)
self.assertGreaterEqual(total_reward, 75)
for _ in range(1000):
time_step = env.reset()
while not time_step.last():
agent_output = agent.step(time_step, is_evaluation=True)
time_step = env.step([agent_output.action])
total_eval_reward += time_step.rewards[0]
self.assertGreaterEqual(total_eval_reward, 250)
if __name__ == "__main__":
np.random.seed(SEED)
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/tabular_qlearner_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Find Nash equilibria for constant- or general-sum 2-player games.
Non-matrix games are handled by computing the normal (bimatrix) form.
The algorithms used are:
* direct computation of pure equilibria.
* linear programming to find equilibria for constant-sum games.
* iterated dominance to reduce the action space.
* reverse search vertex enumeration (if using lrsnash) to find all general-sum
equilibria.
* support enumeration (if using nashpy) to find all general-sum equilibria.
* Lemke-Howson enumeration (if using nashpy) to find one general-sum
equilibrium.
The general-sum mixed-equilibrium algorithms are likely to work well for tens of
actions, but less likely to scale beyond that.
"""
import fractions
import os
import subprocess
import tempfile
import warnings
import nashpy
import numpy as np
@np.vectorize
def to_fraction_str(x, lrsnash_max_denom):
return str(fractions.Fraction(x).limit_denominator(lrsnash_max_denom))
def lrs_solve(row_payoffs, col_payoffs, lrsnash_max_denom, lrsnash_path):
"""Find all Nash equilibria using the lrsnash solver.
`lrsnash` uses reverse search vertex enumeration on rational polytopes.
For more info, see: http://cgm.cs.mcgill.ca/~avis/C/lrslib/USERGUIDE.html#nash
Args:
row_payoffs: payoffs for row player
col_payoffs: payoffs for column player
lrsnash_max_denom: maximum denominator
lrsnash_path: path for temporary files
Yields:
(row_mixture, col_mixture), numpy vectors of float64s.
"""
num_rows, num_cols = row_payoffs.shape
game_file, game_file_path = tempfile.mkstemp()
try:
game_file = os.fdopen(game_file, "w")
# write dimensions
game_file.write("%d %d\n\n" % (num_rows, num_cols))
# write row-player payoff matrix as fractions
for row in range(num_rows):
game_file.write(
" ".join(to_fraction_str(row_payoffs[row], lrsnash_max_denom)) + "\n")
game_file.write("\n")
# write col-player payoff matrix as fractions
for row in range(num_rows):
game_file.write(
" ".join(to_fraction_str(col_payoffs[row], lrsnash_max_denom)) + "\n")
game_file.write("\n")
game_file.close()
lrs = subprocess.Popen([lrsnash_path or "lrsnash", "-s", game_file_path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
col_mixtures = []
for line in lrs.stdout:
if len(line) <= 1 or line[:1] == b"*":
continue
line = np.asfarray([fractions.Fraction(x) for x in line.decode().split()])
if line[0] == 2: # col-player
col_mixtures.append(line[1:-1])
else: # row-player
row_mixture = line[1:-1]
# row-mixture forms a Nash with every col-mixture listed directly above
for col_mixture in col_mixtures:
yield (row_mixture, col_mixture)
col_mixtures = []
finally:
os.remove(game_file_path)
def lemke_howson_solve(row_payoffs, col_payoffs):
"""Find Nash equilibria using the Lemke-Howson algorithm.
The algorithm is not guaranteed to find all equilibria. Also it can yield
wrong answers if the game is degenerate (but raises warnings in that case).
Args:
row_payoffs: payoffs for row player
col_payoffs: payoffs for column player
Yields:
(row_mixture, col_mixture), numpy vectors of float64s.
"""
showwarning = warnings.showwarning
warned_degenerate = [False]
def showwarning_check_degenerate(message, *args, **kwargs):
if "Your game could be degenerate." in str(message):
warned_degenerate[0] = True
showwarning(message, *args, **kwargs)
try:
warnings.showwarning = showwarning_check_degenerate
for row_mixture, col_mixture in nashpy.Game(
row_payoffs, col_payoffs).lemke_howson_enumeration():
if warned_degenerate[0]:
# attempt to discard obviously-wrong results
if (row_mixture.shape != row_payoffs.shape[:1] or
col_mixture.shape != row_payoffs.shape[1:]):
warnings.warn("Discarding ill-shaped solution.")
continue
if (not np.isfinite(row_mixture).all() or
not np.isfinite(col_mixture).all()):
warnings.warn("Discarding non-finite solution.")
continue
yield row_mixture, col_mixture
finally:
warnings.showwarning = showwarning
| open_spiel-master | open_spiel/python/algorithms/matrix_nash.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LP Solver for two-player zero-sum games."""
import cvxopt
import numpy as np
from open_spiel.python.egt import utils
import pyspiel
# Constants that determine the type of objective (max vs. min) and type of
# constraints (<=, >=, =).
OBJ_MAX = 1
OBJ_MIN = 2
CONS_TYPE_LEQ = 3
CONS_TYPE_GEQ = 4
CONS_TYPE_EQ = 5
# Constants that determine the type of dominance to find.
DOMINANCE_STRICT = 1
DOMINANCE_VERY_WEAK = 2
DOMINANCE_WEAK = 3
class _Variable(object):
"""A variable in an LP."""
def __init__(self, vid, lb=None, ub=None):
"""Creates a variable in a linear program.
Args:
vid: (integer) the variable id (should be unique for each variable)
lb: the lower bound on the variable's value (None means no lower bound)
ub: the upper bound on the variable's valie (None means no upper bound)
"""
self.vid = vid
self.lb = lb
self.ub = ub
class _Constraint(object):
"""A constraint in an LP."""
def __init__(self, cid, ctype):
"""Creates a constraint in a linear program.
Args:
cid: (integer) the constraint id (should be unique for each constraint)
ctype: the constraint type (CONS_TYPE_{LEQ, GEQ, EQ})
"""
self.cid = cid
self.ctype = ctype
self.coeffs = {} # var label -> value
self.rhs = None
class LinearProgram(object):
"""A object used to provide a user-friendly API for building LPs."""
def __init__(self, objective):
assert objective == OBJ_MIN or objective == OBJ_MAX
self._valid_constraint_types = [CONS_TYPE_EQ, CONS_TYPE_LEQ, CONS_TYPE_GEQ]
self._objective = objective
self._obj_coeffs = {} # var label -> value
self._vars = {} # var label -> var
self._cons = {} # cons label -> constraint
self._var_list = []
self._leq_cons_list = []
self._eq_cons_list = []
def add_or_reuse_variable(self, label, lb=None, ub=None):
"""Adds a variable to this LP, or reuses one if the label exists.
If the variable already exists, simply checks that the upper and lower
bounds are the same as previously specified.
Args:
label: a label to assign to this constraint
lb: a lower-bound value for this variable
ub: an upper-bound value for this variable
"""
var = self._vars.get(label)
if var is not None:
# Do not re-add, but ensure it's the same
assert var.lb == lb and var.ub == ub
return
var = _Variable(len(self._var_list), lb, ub)
self._vars[label] = var
self._var_list.append(var)
def add_or_reuse_constraint(self, label, ctype):
"""Adds a constraint to this LP, or reuses one if the label exists.
If the constraint is already present, simply checks it's the same type as
previously specified.
Args:
label: a label to assign to this constraint
ctype: the constraint type (in CONS_TYPE_{LEQ,GEQ,EQ})
"""
assert ctype in self._valid_constraint_types
cons = self._cons.get(label)
if cons is not None:
# Do not re-add, but ensure it's the same type
assert cons.ctype == ctype
return
if ctype == CONS_TYPE_LEQ or ctype == CONS_TYPE_GEQ:
cons = _Constraint(len(self._leq_cons_list), ctype)
self._cons[label] = cons
self._leq_cons_list.append(cons)
elif ctype == CONS_TYPE_EQ:
cons = _Constraint(len(self._eq_cons_list), ctype)
self._cons[label] = cons
self._eq_cons_list.append(cons)
else:
assert False, "Unknown constraint type"
def set_obj_coeff(self, var_label, coeff):
"""Sets a coefficient of a variable in the objective."""
self._obj_coeffs[var_label] = coeff
def set_cons_coeff(self, cons_label, var_label, coeff):
"""Sets a coefficient of a constraint in the LP."""
self._cons[cons_label].coeffs[var_label] = coeff
def add_to_cons_coeff(self, cons_label, var_label, add_coeff):
"""Sets a coefficient of a constraint in the LP."""
val = self._cons[cons_label].coeffs.get(var_label)
if val is None:
val = 0
self._cons[cons_label].coeffs[var_label] = val + add_coeff
def set_cons_rhs(self, cons_label, value):
"""Sets the right-hand side of a constraint."""
self._cons[cons_label].rhs = value
def get_var_id(self, label):
var = self._vars.get(label)
assert var is not None
return var.vid
def get_num_cons(self):
return len(self._leq_cons_list), len(self._eq_cons_list)
def solve(self, solver=None):
"""Solves the LP.
Args:
solver: the solver to use ('blas', 'lapack', 'glpk'). Defaults to None,
which then uses the cvxopt internal default.
Returns:
The solution as a dict of var label -> value, one for each variable.
"""
# From http://cvxopt.org/userguide/coneprog.html#linear-programming,
# CVXOPT uses the formulation:
# minimize: c^t x
# s.t. Gx <= h
# Ax = b
#
# Here:
# - x is the vector the variables
# - c is the vector of objective coefficients
# - G is the matrix of LEQ (and GEQ) constraint coefficients
# - h is the vector or right-hand side values of the LEQ/GEQ constraints
# - A is the matrix of equality constraint coefficients
# - b is the vector of right-hand side values of the equality constraints
#
# This function builds these sparse matrices from the information it has
# gathered, flipping signs where necessary, and adding equality constraints
# for the upper and lower bounds of variables. It then calls the cvxopt
# solver and maps back the values.
num_vars = len(self._var_list)
num_eq_cons = len(self._eq_cons_list)
num_leq_cons = len(self._leq_cons_list)
for var in self._var_list:
if var.lb is not None:
num_leq_cons += 1
if var.ub is not None:
num_leq_cons += 1
# Make the matrices (some need to be dense).
c = cvxopt.matrix([0.0] * num_vars)
h = cvxopt.matrix([0.0] * num_leq_cons)
g_mat = cvxopt.spmatrix([], [], [], (num_leq_cons, num_vars))
a_mat = None
b = None
if num_eq_cons > 0:
a_mat = cvxopt.spmatrix([], [], [], (num_eq_cons, num_vars))
b = cvxopt.matrix([0.0] * num_eq_cons)
# Objective coefficients: c
for var_label in self._obj_coeffs:
value = self._obj_coeffs[var_label]
vid = self._vars[var_label].vid
if self._objective == OBJ_MAX:
c[vid] = -value # negate the value because it's a max
else:
c[vid] = value # min objective matches cvxopt
# Inequality constraints: G, h
row = 0
for cons in self._leq_cons_list:
# If it's >= then need to negate all coeffs and the rhs
if cons.rhs is not None:
h[row] = cons.rhs if cons.ctype == CONS_TYPE_LEQ else -cons.rhs
for var_label in cons.coeffs:
value = cons.coeffs[var_label]
vid = self._vars[var_label].vid
g_mat[(row, vid)] = value if cons.ctype == CONS_TYPE_LEQ else -value
row += 1
# Inequality constraints: variables upper and lower bounds
for var in self._var_list:
if var.lb is not None: # x_i >= lb has to be -x_i <= -lb
g_mat[(row, var.vid)] = -1.0
h[row] = -var.lb
row += 1
if var.ub is not None: # x_i <= ub
g_mat[(row, var.vid)] = 1.0
h[row] = var.ub
row += 1
# Equality constraints: A, b
if num_eq_cons > 0:
row = 0
for cons in self._eq_cons_list:
b[row] = cons.rhs if cons.rhs is not None else 0.0
for var_label in cons.coeffs:
value = cons.coeffs[var_label]
vid = self._vars[var_label].vid
a_mat[(row, vid)] = value
row += 1
# Solve!
if num_eq_cons > 0:
sol = cvxopt.solvers.lp(c, g_mat, h, a_mat, b, solver=solver)
else:
sol = cvxopt.solvers.lp(c, g_mat, h, solver=solver)
return sol["x"]
def solve_zero_sum_matrix_game(game):
"""Solves a matrix game by using linear programming.
Args:
game: a pyspiel MatrixGame
Returns:
A 4-tuple containing:
- p0_sol (array-like): probability distribution over row actions
- p1_sol (array-like): probability distribution over column actions
- p0_sol_value, expected value to the first player
- p1_sol_value, expected value to the second player
"""
# Solving a game for player i (e.g. row player) requires finding a mixed
# policy over player i's pure strategies (actions) such that a value of the
# mixed strategy against every opponent pure strategy is maximized.
#
# For more detail, please refer to Sec 4.1 of Shoham & Leyton-Brown, 2009:
# Multiagent Systems: Algorithmic, Game-Theoretic, and Logical Foundations
# http://www.masfoundations.org/mas.pdf
#
# For the row player the LP looks like:
# max V
# st. sigma_a1 \dot col_0 >= V
# sigma_a2 \dot col_1 >= V
# .
# .
# sigma_am \cot col_n >= V
# for all i, sigma_ai >= 0
# sigma \dot 1 = 1
assert isinstance(game, pyspiel.MatrixGame)
assert game.get_type().information == pyspiel.GameType.Information.ONE_SHOT
assert game.get_type().utility == pyspiel.GameType.Utility.ZERO_SUM
num_rows = game.num_rows()
num_cols = game.num_cols()
cvxopt.solvers.options["show_progress"] = False
# First, do the row player (player 0).
lp0 = LinearProgram(OBJ_MAX)
for r in range(num_rows): # one var per action / pure strategy
lp0.add_or_reuse_variable(r, lb=0)
lp0.add_or_reuse_variable(num_rows) # V
lp0.set_obj_coeff(num_rows, 1.0) # max V
for c in range(num_cols):
lp0.add_or_reuse_constraint(c, CONS_TYPE_GEQ)
for r in range(num_rows):
lp0.set_cons_coeff(c, r, game.player_utility(0, r, c))
lp0.set_cons_coeff(c, num_rows, -1.0) # -V >= 0
lp0.add_or_reuse_constraint(num_cols + 1, CONS_TYPE_EQ)
lp0.set_cons_rhs(num_cols + 1, 1.0)
for r in range(num_rows):
lp0.set_cons_coeff(num_cols + 1, r, 1.0)
sol = lp0.solve()
p0_sol = sol[:-1]
p0_sol_val = sol[-1]
# Now, the column player (player 1).
lp1 = LinearProgram(OBJ_MAX)
for c in range(num_cols): # one var per action / pure strategy
lp1.add_or_reuse_variable(c, lb=0)
lp1.add_or_reuse_variable(num_cols) # V
lp1.set_obj_coeff(num_cols, 1) # max V
for r in range(num_rows):
lp1.add_or_reuse_constraint(r, CONS_TYPE_GEQ)
for c in range(num_cols):
lp1.set_cons_coeff(r, c, game.player_utility(1, r, c))
lp1.set_cons_coeff(r, num_cols, -1.0) # -V >= 0
lp1.add_or_reuse_constraint(num_rows + 1, CONS_TYPE_EQ)
lp1.set_cons_rhs(num_rows + 1, 1.0)
for c in range(num_cols):
lp1.set_cons_coeff(num_rows + 1, c, 1.0)
sol = lp1.solve()
p1_sol = sol[:-1]
p1_sol_val = sol[-1]
return p0_sol, p1_sol, p0_sol_val, p1_sol_val
def is_dominated(action,
game_or_payoffs,
player,
mode=DOMINANCE_STRICT,
tol=1e-7,
return_mixture=False):
"""Determines whether a pure strategy is dominated by any mixture strategies.
Args:
action: index of an action for `player`
game_or_payoffs: either a pyspiel matrix- or normal-form game, or a payoff
tensor for `player` with ndim == number of players
player: index of the player (an integer)
mode: dominance criterion: strict, weak, or very weak
tol: tolerance
return_mixture: whether to return the dominating strategy if one exists
Returns:
If `return_mixture`:
a dominating mixture strategy if one exists, or `None`.
the strategy is provided as a 1D numpy array of mixture weights.
Otherwise: True if a dominating strategy exists, False otherwise.
"""
# For more detail, please refer to Sec 4.5.2 of Shoham & Leyton-Brown, 2009:
# Multiagent Systems: Algorithmic, Game-Theoretic, and Logical Foundations
# http://www.masfoundations.org/mas.pdf
assert mode in (DOMINANCE_STRICT, DOMINANCE_VERY_WEAK, DOMINANCE_WEAK)
payoffs = utils.game_payoffs_array(game_or_payoffs)[player] if isinstance(
game_or_payoffs, pyspiel.NormalFormGame) else np.asfarray(game_or_payoffs)
# Reshape payoffs so rows correspond to `player` and cols to the joint action
# of all other players
payoffs = np.moveaxis(payoffs, player, 0)
payoffs = payoffs.reshape((payoffs.shape[0], -1))
num_rows, num_cols = payoffs.shape
cvxopt.solvers.options["show_progress"] = False
cvxopt.solvers.options["maxtol"] = tol
cvxopt.solvers.options["feastol"] = tol
lp = LinearProgram(OBJ_MAX)
# One var for every row probability, fixed to 0 if inactive
for r in range(num_rows):
if r == action:
lp.add_or_reuse_variable(r, lb=0, ub=0)
else:
lp.add_or_reuse_variable(r, lb=0)
# For the strict LP we normalize the payoffs to be strictly positive
if mode == DOMINANCE_STRICT:
to_subtract = payoffs.min() - 1
else:
to_subtract = 0
# For non-strict LPs the probabilities must sum to 1
lp.add_or_reuse_constraint(num_cols, CONS_TYPE_EQ)
lp.set_cons_rhs(num_cols, 1)
for r in range(num_rows):
if r != action:
lp.set_cons_coeff(num_cols, r, 1)
# The main dominance constraint
for c in range(num_cols):
lp.add_or_reuse_constraint(c, CONS_TYPE_GEQ)
lp.set_cons_rhs(c, payoffs[action, c] - to_subtract)
for r in range(num_rows):
if r != action:
lp.set_cons_coeff(c, r, payoffs[r, c] - to_subtract)
if mode == DOMINANCE_STRICT:
# Minimize sum of probabilities
for r in range(num_rows):
if r != action:
lp.set_obj_coeff(r, -1)
mixture = lp.solve()
if mixture is not None and np.sum(mixture) < 1 - tol:
mixture = np.squeeze(mixture, 1) / np.sum(mixture)
else:
mixture = None
if mode == DOMINANCE_VERY_WEAK:
# Check feasibility
mixture = lp.solve()
if mixture is not None:
mixture = np.squeeze(mixture, 1)
if mode == DOMINANCE_WEAK:
# Check feasibility and whether there's any advantage
for r in range(num_rows):
lp.set_obj_coeff(r, payoffs[r].sum())
mixture = lp.solve()
if mixture is not None:
mixture = np.squeeze(mixture, 1)
if (np.dot(mixture, payoffs) - payoffs[action]).sum() <= tol:
mixture = None
return mixture if return_mixture else (mixture is not None)
def _pure_dominated_from_advantages(advantages, mode, tol=1e-7):
if mode == DOMINANCE_STRICT:
return (advantages > tol).all(1)
if mode == DOMINANCE_WEAK:
return (advantages >= -tol).all(1) & (advantages.sum(1) > tol)
if mode == DOMINANCE_VERY_WEAK:
return (advantages >= -tol).all(1)
def iterated_dominance(game_or_payoffs, mode, tol=1e-7):
"""Reduces a strategy space using iterated dominance.
See: http://www.smallparty.com/yoram/classes/principles/nash.pdf
Args:
game_or_payoffs: either a pyspiel matrix- or normal-form game, or a payoff
tensor of dimension `num_players` + 1. First dimension is the player,
followed by the actions of all players, e.g. a 3x3 game (2 players) has
dimension [2,3,3].
mode: DOMINANCE_STRICT, DOMINANCE_WEAK, or DOMINANCE_VERY_WEAK
tol: tolerance
Returns:
A tuple (`reduced_game`, `live_actions`).
* if `game_or_payoffs` is an instance of `pyspiel.MatrixGame`, so is
`reduced_game`; otherwise `reduced_game` is a payoff tensor.
* `live_actions` is a tuple of length `num_players`, where
`live_actions[player]` is a boolean vector of shape `num_actions`;
`live_actions[player][action]` is `True` if `action` wasn't dominated for
`player`.
"""
payoffs = utils.game_payoffs_array(game_or_payoffs) if isinstance(
game_or_payoffs, pyspiel.NormalFormGame) else np.asfarray(game_or_payoffs)
live_actions = [
np.ones(num_actions, bool) for num_actions in payoffs.shape[1:]
]
progress = True
while progress:
progress = False
# trying faster method first
for method in ("pure", "mixed"):
if progress:
continue
for player, live in enumerate(live_actions):
if live.sum() == 1:
# one action is dominant
continue
# discarding all dominated opponent actions
payoffs_live = payoffs[player]
for opponent in range(payoffs.shape[0]):
if opponent != player:
payoffs_live = payoffs_live.compress(live_actions[opponent],
opponent)
# reshaping to (player_actions, joint_opponent_actions)
payoffs_live = np.moveaxis(payoffs_live, player, 0)
payoffs_live = payoffs_live.reshape((payoffs_live.shape[0], -1))
for action in range(live.size):
if not live[action]:
continue
if method == "pure":
# mark all actions that `action` dominates
advantage = payoffs_live[action] - payoffs_live
dominated = _pure_dominated_from_advantages(advantage, mode, tol)
dominated[action] = False
dominated &= live
if dominated.any():
progress = True
live &= ~dominated
if live.sum() == 1:
break
if method == "mixed":
# test if `action` is dominated by a mixed policy
mixture = is_dominated(
live[:action].sum(),
payoffs_live[live],
0,
mode,
tol,
return_mixture=True)
if mixture is None:
continue
# if it is, mark any other actions dominated by that policy
progress = True
advantage = mixture.dot(payoffs_live[live]) - payoffs_live[live]
dominated = _pure_dominated_from_advantages(advantage, mode, tol)
dominated[mixture > tol] = False
assert dominated[live[:action].sum()]
live.put(live.nonzero()[0], ~dominated)
if live.sum() == 1:
break
for player, live in enumerate(live_actions):
payoffs = payoffs.compress(live, player + 1)
if isinstance(game_or_payoffs, pyspiel.MatrixGame):
return pyspiel.MatrixGame(game_or_payoffs.get_type(),
game_or_payoffs.get_parameters(), [
game_or_payoffs.row_action_name(action)
for action in live_actions[0].nonzero()[0]
], [
game_or_payoffs.col_action_name(action)
for action in live_actions[1].nonzero()[0]
], *payoffs), live_actions
else:
return payoffs, live_actions
# TODO(author5): add a function for sequential games using sequence-form LPs.
| open_spiel-master | open_spiel/python/algorithms/lp_solver.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/algorithms/losses/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.losses.rl_losses."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from open_spiel.python.algorithms.losses import rl_losses
# Temporarily disable v2 behavior until code is updated.
tf.disable_v2_behavior()
class RLLossesTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(('no_entropy_cost', 0.),
('with_entropy_cost', 1.))
def test_batch_qpg_loss_with_entropy_cost(self, entropy_cost):
batch_qpg_loss = rl_losses.BatchQPGLoss(entropy_cost=entropy_cost)
q_values = tf.constant([[0., -1., 1.], [1., -1., 0]], dtype=tf.float32)
policy_logits = tf.constant([[1., 1., 1.], [1., 1., 4.]], dtype=tf.float32)
total_loss = batch_qpg_loss.loss(policy_logits, q_values)
# Compute expected quantities.
expected_policy_entropy_loss = -1 * (1.0986 + 0.3665) / 2
# baseline = \sum_a pi_a * Q_a = 0.
# -\sum_a pi_a * (Q_a - baseline)
expected_policy_loss = (0.0 + 0.0) / 2
expected_total_loss = (
expected_policy_loss + entropy_cost * expected_policy_entropy_loss)
with self.session() as sess:
np.testing.assert_allclose(
sess.run(total_loss), expected_total_loss, atol=1e-4)
@parameterized.named_parameters(('no_entropy_cost', 0.),
('with_entropy_cost', 1.))
def test_batch_rm_loss_with_entropy_cost(self, entropy_cost):
batch_rpg_loss = rl_losses.BatchRMLoss(entropy_cost=entropy_cost)
q_values = tf.constant([[0., -1., 1.], [1., -1., 0]], dtype=tf.float32)
policy_logits = tf.constant([[1., 1., 1.], [1., 1., 4.]], dtype=tf.float32)
total_loss = batch_rpg_loss.loss(policy_logits, q_values)
# Compute expected quantities.
expected_policy_entropy_loss = -(1.0986 + 0.3665) / 2
# baseline = \sum_a pi_a * Q_a = 0.
# -\sum_a pi_a * relu(Q_a - baseline)
# negative sign as it's a loss term and loss needs to be minimized.
expected_policy_loss = -(.3333 + .0452) / 2
expected_total_loss = (
expected_policy_loss + entropy_cost * expected_policy_entropy_loss)
with self.session() as sess:
np.testing.assert_allclose(
sess.run(total_loss), expected_total_loss, atol=1e-3)
@parameterized.named_parameters(('no_entropy_cost', 0.),
('with_entropy_cost', 1.))
def test_batch_rpg_loss_with_entropy_cost(self, entropy_cost):
batch_rpg_loss = rl_losses.BatchRPGLoss(entropy_cost=entropy_cost)
q_values = tf.constant([[0., -1., 1.], [1., -1., 0]], dtype=tf.float32)
policy_logits = tf.constant([[1., 1., 1.], [1., 1., 4.]], dtype=tf.float32)
total_loss = batch_rpg_loss.loss(policy_logits, q_values)
# Compute expected quantities.
expected_policy_entropy_loss = -1 * (1.0986 + 0.3665) / 2
# baseline = \sum_a pi_a * Q_a = 0.
# \sum_a relu(Q_a - baseline)
expected_policy_loss = (1.0 + 1.0) / 2
expected_total_loss = (
expected_policy_loss + entropy_cost * expected_policy_entropy_loss)
with self.session() as sess:
np.testing.assert_allclose(
sess.run(total_loss), expected_total_loss, atol=1e-4)
@parameterized.named_parameters(('no_entropy_cost', 0.),
('with_entropy_cost', 1.))
def test_batch_a2c_loss_with_entropy_cost(self, entropy_cost):
batch_a2c_loss = rl_losses.BatchA2CLoss(entropy_cost=entropy_cost)
policy_logits = tf.constant([[1., 1., 1.], [1., 1., 4.]], dtype=tf.float32)
baseline = tf.constant([1. / 3, 0.5], dtype=tf.float32)
actions = tf.constant([1, 2], dtype=tf.int32)
returns = tf.constant([0., 1.], dtype=tf.float32)
total_loss = batch_a2c_loss.loss(policy_logits, baseline, actions, returns)
# Compute expected quantities.
# advantages = returns - baseline = [-1./3, 0.5]
# cross_entropy = [-log(e^1./3 * e^1), -log(e^4/(e^4+ e + e))]
# = [1.0986, 0.09492]
# policy_loss = cross_entropy * advantages = [-0.3662, 0.04746]
expected_policy_entropy_loss = -1 * (1.0986 + 0.3665) / 2
expected_policy_loss = (-0.3662 + 0.04746) / 2
expected_total_loss = (
expected_policy_loss + entropy_cost * expected_policy_entropy_loss)
with self.session() as sess:
np.testing.assert_allclose(
sess.run(total_loss), expected_total_loss, atol=1e-4)
if __name__ == '__main__':
tf.test.main()
| open_spiel-master | open_spiel/python/algorithms/losses/rl_losses_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reinforcement learning loss functions.
All the loss functions implemented here compute the loss for the policy (actor).
The critic loss functions are typically regression loss are omitted for their
simplicity.
For the batch QPG, RM and RPG loss, please refer to the paper:
https://papers.nips.cc/paper/7602-actor-critic-policy-optimization-in-partially-observable-multiagent-environments.pdf
The BatchA2C loss uses code from the `TRFL` library:
https://github.com/deepmind/trfl/blob/master/trfl/discrete_policy_gradient_ops.py
"""
import tensorflow.compat.v1 as tf
# Temporarily disable v2 behavior until code is updated.
tf.disable_v2_behavior()
def _assert_rank_and_shape_compatibility(tensors, rank):
if not tensors:
raise ValueError("List of tensors cannot be empty")
union_of_shapes = tf.TensorShape(None)
for tensor in tensors:
tensor_shape = tensor.get_shape()
tensor_shape.assert_has_rank(rank)
union_of_shapes = union_of_shapes.merge_with(tensor_shape)
def compute_baseline(policy, action_values):
# V = pi * Q, backprop through pi but not Q.
return tf.reduce_sum(
tf.multiply(policy, tf.stop_gradient(action_values)), axis=1)
def compute_regrets(policy_logits, action_values):
"""Compute regrets using pi and Q."""
# Compute regret.
policy = tf.nn.softmax(policy_logits, axis=1)
# Avoid computing gradients for action_values.
action_values = tf.stop_gradient(action_values)
baseline = compute_baseline(policy, action_values)
regrets = tf.reduce_sum(
tf.nn.relu(action_values - tf.expand_dims(baseline, 1)), axis=1)
return regrets
def compute_advantages(policy_logits, action_values, use_relu=False):
"""Compute advantages using pi and Q."""
# Compute advantage.
policy = tf.nn.softmax(policy_logits, axis=1)
# Avoid computing gradients for action_values.
action_values = tf.stop_gradient(action_values)
baseline = compute_baseline(policy, action_values)
advantages = action_values - tf.expand_dims(baseline, 1)
if use_relu:
advantages = tf.nn.relu(advantages)
# Compute advantage weighted by policy.
policy_advantages = -tf.multiply(policy, tf.stop_gradient(advantages))
return tf.reduce_sum(policy_advantages, axis=1)
def compute_a2c_loss(policy_logits, actions, advantages):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=actions, logits=policy_logits)
advantages = tf.stop_gradient(advantages)
advantages.get_shape().assert_is_compatible_with(cross_entropy.get_shape())
return tf.multiply(cross_entropy, advantages)
def compute_entropy(policy_logits):
return tf.reduce_sum(
-tf.nn.softmax(policy_logits) * tf.nn.log_softmax(policy_logits), axis=-1)
def compute_entropy_loss(policy_logits):
"""Compute an entropy loss.
We want a value that we can minimize along with other losses, and where
minimizing means driving the policy towards a uniform distribution over
the actions. We thus scale it by negative one so that it can be simply
added to other losses (and so it can be considered a bonus for having
entropy).
Args:
policy_logits: the policy logits.
Returns:
entropy loss (negative entropy).
"""
entropy = compute_entropy(policy_logits)
scale = tf.constant(-1.0, dtype=tf.float32)
entropy_loss = tf.multiply(scale, entropy, name="entropy_loss")
return entropy_loss
class BatchQPGLoss(object):
"""Defines the batch QPG loss op."""
def __init__(self, entropy_cost=None, name="batch_qpg_loss"):
self._entropy_cost = entropy_cost
self._name = name
def loss(self, policy_logits, action_values):
"""Constructs a TF graph that computes the QPG loss for batches.
Args:
policy_logits: `B x A` tensor corresponding to policy logits.
action_values: `B x A` tensor corresponding to Q-values.
Returns:
loss: A 0-D `float` tensor corresponding the loss.
"""
_assert_rank_and_shape_compatibility([policy_logits, action_values], 2)
advantages = compute_advantages(policy_logits, action_values)
_assert_rank_and_shape_compatibility([advantages], 1)
total_adv = tf.reduce_mean(advantages, axis=0)
total_loss = total_adv
if self._entropy_cost:
entropy_loss = tf.reduce_mean(compute_entropy_loss(policy_logits))
scaled_entropy_loss = tf.multiply(
float(self._entropy_cost), entropy_loss, name="scaled_entropy_loss")
total_loss = tf.add(
total_loss, scaled_entropy_loss, name="total_loss_with_entropy")
return total_loss
class BatchRMLoss(object):
"""Defines the batch RM loss op."""
def __init__(self, entropy_cost=None, name="batch_rm_loss"):
self._entropy_cost = entropy_cost
self._name = name
def loss(self, policy_logits, action_values):
"""Constructs a TF graph that computes the RM loss for batches.
Args:
policy_logits: `B x A` tensor corresponding to policy logits.
action_values: `B x A` tensor corresponding to Q-values.
Returns:
loss: A 0-D `float` tensor corresponding the loss.
"""
_assert_rank_and_shape_compatibility([policy_logits, action_values], 2)
advantages = compute_advantages(policy_logits, action_values, use_relu=True)
_assert_rank_and_shape_compatibility([advantages], 1)
total_adv = tf.reduce_mean(advantages, axis=0)
total_loss = total_adv
if self._entropy_cost:
entropy_loss = tf.reduce_mean(compute_entropy_loss(policy_logits))
scaled_entropy_loss = tf.multiply(
float(self._entropy_cost), entropy_loss, name="scaled_entropy_loss")
total_loss = tf.add(
total_loss, scaled_entropy_loss, name="total_loss_with_entropy")
return total_loss
class BatchRPGLoss(object):
"""Defines the batch RPG loss op."""
def __init__(self, entropy_cost=None, name="batch_rpg_loss"):
self._entropy_cost = entropy_cost
self._name = name
def loss(self, policy_logits, action_values):
"""Constructs a TF graph that computes the RPG loss for batches.
Args:
policy_logits: `B x A` tensor corresponding to policy logits.
action_values: `B x A` tensor corresponding to Q-values.
Returns:
loss: A 0-D `float` tensor corresponding the loss.
"""
_assert_rank_and_shape_compatibility([policy_logits, action_values], 2)
regrets = compute_regrets(policy_logits, action_values)
_assert_rank_and_shape_compatibility([regrets], 1)
total_regret = tf.reduce_mean(regrets, axis=0)
total_loss = total_regret
if self._entropy_cost:
entropy_loss = tf.reduce_mean(compute_entropy_loss(policy_logits))
scaled_entropy_loss = tf.multiply(
float(self._entropy_cost), entropy_loss, name="scaled_entropy_loss")
total_loss = tf.add(
total_loss, scaled_entropy_loss, name="total_loss_with_entropy")
return total_loss
class BatchA2CLoss(object):
"""Defines the batch A2C loss op."""
def __init__(self, entropy_cost=None, name="batch_a2c_loss"):
self._entropy_cost = entropy_cost
self._name = name
def loss(self, policy_logits, baseline, actions, returns):
"""Constructs a TF graph that computes the A2C loss for batches.
Args:
policy_logits: `B x A` tensor corresponding to policy logits.
baseline: `B` tensor corresponding to baseline (V-values).
actions: `B` tensor corresponding to actions taken.
returns: `B` tensor corresponds to returns accumulated.
Returns:
loss: A 0-D `float` tensor corresponding the loss.
"""
_assert_rank_and_shape_compatibility([policy_logits], 2)
_assert_rank_and_shape_compatibility([baseline, actions, returns], 1)
advantages = returns - baseline
policy_loss = compute_a2c_loss(policy_logits, actions, advantages)
total_loss = tf.reduce_mean(policy_loss, axis=0)
if self._entropy_cost:
entropy_loss = tf.reduce_mean(compute_entropy_loss(policy_logits))
scaled_entropy_loss = tf.multiply(
float(self._entropy_cost), entropy_loss, name="scaled_entropy_loss")
total_loss = tf.add(
total_loss, scaled_entropy_loss, name="total_loss_with_entropy")
return total_loss
| open_spiel-master | open_spiel/python/algorithms/losses/rl_losses.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.psro_v2.best_response_oracle."""
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python import policy
from open_spiel.python.algorithms import best_response
from open_spiel.python.algorithms.psro_v2 import best_response_oracle
import pyspiel
class BestResponseOracleTest(parameterized.TestCase, absltest.TestCase):
@parameterized.parameters(("kuhn_poker", 2), ("kuhn_poker", 3),
("leduc_poker", 2))
def test_cpp_python_best_response_oracle(self, game_name, num_players):
# Tests that these best responses interface well with Best Response Oracle
game = pyspiel.load_game(game_name, {"players": num_players})
all_states, _ = best_response.compute_states_and_info_states_if_none(
game, all_states=None, state_to_information_state=None)
current_best = [
[policy.TabularPolicy(game).__copy__()] for _ in range(num_players)
]
probabilities_of_playing_policies = [[1.] for _ in range(num_players)]
# Construct the python oracle
py_oracle = best_response_oracle.BestResponseOracle(
best_response_backend="py")
# Construct the cpp oracle. Note that in this regime, BestResponseOracle
# uses base_policy to construct and cache TabularBestResponse internally.
cpp_oracle = best_response_oracle.BestResponseOracle(
game=game, best_response_backend="cpp")
# Prepare the computation of the best responses with each backend
# pylint:disable=g-complex-comprehension
training_params = [[{
"total_policies": current_best,
"current_player": i,
"probabilities_of_playing_policies": probabilities_of_playing_policies
}] for i in range(num_players)]
# pylint:enable=g-complex-comprehension
py_best_rep = py_oracle(game, training_params)
cpp_best_rep = cpp_oracle(game, training_params)
# Compare the policies
for state in all_states.values():
i_player = state.current_player()
py_dict = py_best_rep[i_player][0].action_probabilities(state)
cpp_dict = cpp_best_rep[i_player][0].action_probabilities(state)
for action in py_dict.keys():
self.assertEqual(py_dict.get(action, 0.0), cpp_dict.get(action, 0.0))
for action in cpp_dict.keys():
self.assertEqual(py_dict.get(action, 0.0), cpp_dict.get(action, 0.0))
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/psro_v2/best_response_oracle_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN as a policy.
Treating RL Oracles as policies allows us to streamline their use with tabular
policies and other policies in OpenSpiel, and freely mix populations using
different types of oracles.
"""
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import dqn
from open_spiel.python.algorithms import policy_gradient
def rl_policy_factory(rl_class):
"""Transforms an RL Agent into an OpenSpiel policy.
Args:
rl_class: An OpenSpiel class inheriting from 'rl_agent.AbstractAgent' such
as policy_gradient.PolicyGradient or dqn.DQN.
Returns:
An RLPolicy class that wraps around an instance of rl_class to transform it
into a policy.
"""
class RLPolicy(policy.Policy):
"""A 'policy.Policy' wrapper around an 'rl_agent.AbstractAgent' instance."""
def __init__(self, env, player_id, **kwargs):
"""Constructs an RL Policy.
Args:
env: An OpenSpiel RL Environment instance.
player_id: The ID of the DQN policy's player.
**kwargs: Various kwargs used to initialize rl_class.
"""
game = env.game
super(RLPolicy, self).__init__(game, player_id)
self._policy = rl_class(**{"player_id": player_id, **kwargs})
self._frozen = False
self._rl_class = rl_class
self._env = env
self._obs = {
"info_state": [None] * self.game.num_players(),
"legal_actions": [None] * self.game.num_players()
}
def get_time_step(self):
time_step = self._env.get_time_step()
return time_step
def action_probabilities(self, state, player_id=None):
cur_player = state.current_player()
legal_actions = state.legal_actions(cur_player)
step_type = rl_environment.StepType.LAST if state.is_terminal(
) else rl_environment.StepType.MID
self._obs["current_player"] = cur_player
self._obs["info_state"][cur_player] = (
state.information_state_tensor(cur_player))
self._obs["legal_actions"][cur_player] = legal_actions
# pylint: disable=protected-access
rewards = state.rewards()
if rewards:
time_step = rl_environment.TimeStep(
observations=self._obs, rewards=rewards,
discounts=self._env._discounts, step_type=step_type)
else:
rewards = [0] * self._num_players
time_step = rl_environment.TimeStep(
observations=self._obs, rewards=rewards,
discounts=self._env._discounts,
step_type=rl_environment.StepType.FIRST)
# pylint: enable=protected-access
p = self._policy.step(time_step, is_evaluation=True).probs
prob_dict = {action: p[action] for action in legal_actions}
return prob_dict
def step(self, time_step, is_evaluation=False):
# The _frozen attribute freezes the weights of the current policy. This
# effect is achieved by considering that we always are evaluating when the
# current policy's weights are frozen. For more details, see the freeze()
# method.
is_evaluation = (is_evaluation) or (self._frozen)
return self._policy.step(time_step, is_evaluation)
def freeze(self):
"""This method freezes the policy's weights.
The weight freezing effect is implemented by preventing any training to
take place through calls to the step function. The weights are therefore
not effectively frozen, and unconventional calls may trigger weights
training.
The weight-freezing effect is especially needed in PSRO, where all
policies that aren't being trained by the oracle must be static. Freezing
trained policies permitted us not to change how 'step' was called when
introducing self-play (By not changing 'is_evaluation' depending on the
current player).
"""
self._frozen = True
def unfreeze(self):
self._frozen = False
def is_frozen(self):
return self._frozen
def get_weights(self):
return self._policy.get_weights()
def copy_with_noise(self, sigma=0.0):
copied_object = RLPolicy.__new__(RLPolicy)
super(RLPolicy, copied_object).__init__(self.game, self.player_ids)
setattr(copied_object, "_rl_class", self._rl_class)
setattr(copied_object, "_obs", self._obs)
setattr(copied_object, "_policy",
self._policy.copy_with_noise(sigma=sigma))
setattr(copied_object, "_env", self._env)
copied_object.unfreeze()
return copied_object
return RLPolicy
# Generating policy classes for Policy Gradient and DQN
# pylint: disable=invalid-name
PGPolicy = rl_policy_factory(policy_gradient.PolicyGradient)
DQNPolicy = rl_policy_factory(dqn.DQN)
# pylint: enable=invalid-name
| open_spiel-master | open_spiel/python/algorithms/psro_v2/rl_policy.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class of Optimization Oracles generating best response against opponents.
Oracles are as defined in (Lanctot et Al., 2017,
https://arxiv.org/pdf/1711.00832.pdf ), functions generating a best response
against a probabilistic mixture of opponents. This class implements the abstract
class of oracles, and a simple oracle using Evolutionary Strategy as
optimization method.
"""
import numpy as np
def strategy_sampler_fun(total_policies, probabilities_of_playing_policies):
"""Samples strategies according to distribution over them.
Args:
total_policies: List of lists of policies for each player.
probabilities_of_playing_policies: List of numpy arrays representing the
probability of playing a strategy.
Returns:
One sampled joint strategy.
"""
policies_selected = []
for k in range(len(total_policies)):
selected_opponent = np.random.choice(
total_policies[k],
1,
p=probabilities_of_playing_policies[k]).reshape(-1)[0]
policies_selected.append(selected_opponent)
return policies_selected
class AbstractOracle(object):
"""The abstract class representing oracles, a hidden optimization process."""
def __init__(self,
number_policies_sampled=100,
**oracle_specific_kwargs):
"""Initialization method for oracle.
Args:
number_policies_sampled: Number of different opponent policies sampled
during evaluation of policy.
**oracle_specific_kwargs: Oracle specific args, compatibility
purpose. Since oracles can vary so much in their implementation, no
specific argument constraint is put on this function.
"""
self._number_policies_sampled = number_policies_sampled
self._kwargs = oracle_specific_kwargs
def set_iteration_numbers(self, number_policies_sampled):
"""Changes the number of iterations used for computing episode returns.
Args:
number_policies_sampled: Number of different opponent policies sampled
during evaluation of policy.
"""
self._number_policies_sampled = number_policies_sampled
def __call__(self, game, policy, total_policies, current_player,
probabilities_of_playing_policies,
**oracle_specific_execution_kwargs):
"""Call method for oracle, returns best response against a set of policies.
Args:
game: The game on which the optimization process takes place.
policy: The current policy, in policy.Policy, from which we wish to start
optimizing.
total_policies: A list of all policy.Policy strategies used for training,
including the one for the current player.
current_player: Integer representing the current player.
probabilities_of_playing_policies: A list of arrays representing, per
player, the probabilities of playing each policy in total_policies for
the same player.
**oracle_specific_execution_kwargs: Other set of arguments, for
compatibility purposes. Can for example represent whether to Rectify
Training or not.
"""
raise NotImplementedError("Calling Abstract class method.")
def sample_episode(self, game, policies_selected):
raise NotImplementedError("Calling Abstract class method.")
def evaluate_policy(self, game, pol, total_policies, current_player,
probabilities_of_playing_policies,
strategy_sampler=strategy_sampler_fun,
**oracle_specific_execution_kwargs):
"""Evaluates a specific policy against a nash mixture of policies.
Args:
game: The game on which the optimization process takes place.
pol: The current policy, in policy.Policy, from which we wish to start
optimizing.
total_policies: A list of all policy.Policy strategies used for training,
including the one for the current player.
current_player: Integer representing the current player.
probabilities_of_playing_policies: A list of arrays representing, per
player, the probabilities of playing each policy in total_policies for
the same player.
strategy_sampler: callable sampling strategy.
**oracle_specific_execution_kwargs: Other set of arguments, for
compatibility purposes. Can for example represent whether to Rectify
Training or not.
Returns:
Average return for policy when played against policies_played_against.
"""
del oracle_specific_execution_kwargs # Unused.
totals = 0
count = 0
for _ in range(self._number_policies_sampled):
policies_selected = strategy_sampler(total_policies,
probabilities_of_playing_policies)
policies_selected[current_player] = pol
new_return = self.sample_episode(
game,
policies_selected)[current_player]
totals += new_return
count += 1
# Avoid the 0 / 0 case.
return totals / max(1, count)
| open_spiel-master | open_spiel/python/algorithms/psro_v2/optimization_oracle.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Meta-strategy solvers for PSRO."""
import numpy as np
from open_spiel.python.algorithms import lp_solver
from open_spiel.python.algorithms import projected_replicator_dynamics
from open_spiel.python.algorithms import regret_matching
import pyspiel
EPSILON_MIN_POSITIVE_PROBA = 1e-8
def uniform_strategy(solver, return_joint=False):
"""Returns a Random Uniform distribution on policies.
Args:
solver: GenPSROSolver instance.
return_joint: If true, only returns marginals. Otherwise marginals as well
as joint probabilities.
Returns:
uniform distribution on strategies.
"""
policies = solver.get_policies()
policy_lengths = [len(pol) for pol in policies]
result = [np.ones(pol_len) / pol_len for pol_len in policy_lengths]
if not return_joint:
return result
else:
joint_strategies = get_joint_strategy_from_marginals(result)
return result, joint_strategies
def softmax_on_range(number_policies):
x = np.array(list(range(number_policies)))
x = np.exp(x-x.max())
x /= np.sum(x)
return x
def uniform_biased_strategy(solver, return_joint=False):
"""Returns a Biased Random Uniform distribution on policies.
The uniform distribution is biased to prioritize playing against more recent
policies (Policies that were appended to the policy list later in training)
instead of older ones.
Args:
solver: GenPSROSolver instance.
return_joint: If true, only returns marginals. Otherwise marginals as well
as joint probabilities.
Returns:
uniform distribution on strategies.
"""
policies = solver.get_policies()
if not isinstance(policies[0], list):
policies = [policies]
policy_lengths = [len(pol) for pol in policies]
result = [softmax_on_range(pol_len) for pol_len in policy_lengths]
if not return_joint:
return result
else:
joint_strategies = get_joint_strategy_from_marginals(result)
return result, joint_strategies
def renormalize(probabilities):
"""Replaces all negative entries with zeroes and normalizes the result.
Args:
probabilities: probability vector to renormalize. Has to be one-dimensional.
Returns:
Renormalized probabilities.
"""
probabilities[probabilities < 0] = 0
probabilities = probabilities / np.sum(probabilities)
return probabilities
def get_joint_strategy_from_marginals(probabilities):
"""Returns a joint strategy matrix from a list of marginals.
Args:
probabilities: list of probabilities.
Returns:
A joint strategy from a list of marginals.
"""
probas = []
for i in range(len(probabilities)):
probas_shapes = [1] * len(probabilities)
probas_shapes[i] = -1
probas.append(probabilities[i].reshape(*probas_shapes))
result = np.prod(probas)
return result.reshape(-1)
def nash_strategy(solver, return_joint=False):
"""Returns nash distribution on meta game matrix.
This method only works for two player zero-sum games.
Args:
solver: GenPSROSolver instance.
return_joint: If true, only returns marginals. Otherwise marginals as well
as joint probabilities.
Returns:
Nash distribution on strategies.
"""
meta_games = solver.get_meta_game()
if not isinstance(meta_games, list):
meta_games = [meta_games, -meta_games]
meta_games = [x.tolist() for x in meta_games]
if len(meta_games) != 2:
raise NotImplementedError(
"nash_strategy solver works only for 2p zero-sum"
"games, but was invoked for a {} player game".format(len(meta_games)))
nash_prob_1, nash_prob_2, _, _ = (
lp_solver.solve_zero_sum_matrix_game(
pyspiel.create_matrix_game(*meta_games)))
result = [
renormalize(np.array(nash_prob_1).reshape(-1)),
renormalize(np.array(nash_prob_2).reshape(-1))
]
if not return_joint:
return result
else:
joint_strategies = get_joint_strategy_from_marginals(result)
return result, joint_strategies
def prd_strategy(solver, return_joint=False):
"""Computes Projected Replicator Dynamics strategies.
Args:
solver: GenPSROSolver instance.
return_joint: If true, only returns marginals. Otherwise marginals as well
as joint probabilities.
Returns:
PRD-computed strategies.
"""
meta_games = solver.get_meta_game()
if not isinstance(meta_games, list):
meta_games = [meta_games, -meta_games]
kwargs = solver.get_kwargs()
result = projected_replicator_dynamics.projected_replicator_dynamics(
meta_games, **kwargs)
if not return_joint:
return result
else:
joint_strategies = get_joint_strategy_from_marginals(result)
return result, joint_strategies
def rm_strategy(solver, return_joint=False):
"""Computes regret-matching strategies.
Args:
solver: GenPSROSolver instance.
return_joint: If true, only returns marginals. Otherwise marginals as well
as joint probabilities.
Returns:
PRD-computed strategies.
"""
meta_games = solver.get_meta_game()
if not isinstance(meta_games, list):
meta_games = [meta_games, -meta_games]
kwargs = solver.get_kwargs()
result = regret_matching.regret_matching(meta_games, **kwargs)
if not return_joint:
return result
else:
joint_strategies = get_joint_strategy_from_marginals(result)
return result, joint_strategies
META_STRATEGY_METHODS = {
"uniform_biased": uniform_biased_strategy,
"uniform": uniform_strategy,
"nash": nash_strategy,
"prd": prd_strategy,
"rm": rm_strategy,
}
| open_spiel-master | open_spiel/python/algorithms/psro_v2/meta_strategies.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/algorithms/psro_v2/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class for meta trainers (Generalized PSRO, RNR, ...)
Meta-algorithm with modular behaviour, allowing implementation of PSRO, RNR, and
other variations.
"""
import numpy as np
from open_spiel.python.algorithms.psro_v2 import meta_strategies
from open_spiel.python.algorithms.psro_v2 import strategy_selectors
from open_spiel.python.algorithms.psro_v2 import utils
_DEFAULT_STRATEGY_SELECTION_METHOD = "probabilistic"
_DEFAULT_META_STRATEGY_METHOD = "prd"
def _process_string_or_callable(string_or_callable, dictionary):
"""Process a callable or a string representing a callable.
Args:
string_or_callable: Either a string or a callable
dictionary: Dictionary of shape {string_reference: callable}
Returns:
string_or_callable if string_or_callable is a callable ; otherwise,
dictionary[string_or_callable]
Raises:
NotImplementedError: If string_or_callable is of the wrong type, or has an
unexpected value (Not present in dictionary).
"""
if callable(string_or_callable):
return string_or_callable
try:
return dictionary[string_or_callable]
except KeyError as e:
raise NotImplementedError("Input type / value not supported. Accepted types"
": string, callable. Acceptable string values : "
"{}. Input provided : {}".format(
list(dictionary.keys()),
string_or_callable)) from e
def sample_episode(state, policies):
"""Samples an episode using policies, starting from state.
Args:
state: Pyspiel state representing the current state.
policies: List of policy representing the policy executed by each player.
Returns:
The result of the call to returns() of the final state in the episode.
Meant to be a win/loss integer.
"""
if state.is_terminal():
return np.array(state.returns(), dtype=np.float32)
if state.is_simultaneous_node():
actions = [None] * state.num_players()
for player in range(state.num_players()):
state_policy = policies[player](state, player)
outcomes, probs = zip(*state_policy.items())
actions[player] = utils.random_choice(outcomes, probs)
state.apply_actions(actions)
return sample_episode(state, policies)
if state.is_chance_node():
outcomes, probs = zip(*state.chance_outcomes())
else:
player = state.current_player()
state_policy = policies[player](state)
outcomes, probs = zip(*state_policy.items())
state.apply_action(utils.random_choice(outcomes, probs))
return sample_episode(state, policies)
class AbstractMetaTrainer(object):
"""Abstract class implementing meta trainers.
If a trainer is something that computes a best response to given environment &
agents, a meta trainer will compute which best responses to compute (Against
what, how, etc)
This class can support PBT, Hyperparameter Evolution, etc.
"""
# pylint:disable=dangerous-default-value
def __init__(self,
game,
oracle,
initial_policies=None,
meta_strategy_method=_DEFAULT_META_STRATEGY_METHOD,
training_strategy_selector=_DEFAULT_STRATEGY_SELECTION_METHOD,
symmetric_game=False,
number_policies_selected=1,
**kwargs):
"""Abstract Initialization for meta trainers.
Args:
game: A pyspiel game object.
oracle: An oracle object, from an implementation of the AbstractOracle
class.
initial_policies: A list of initial policies, to set up a default for
training. Resorts to tabular policies if not set.
meta_strategy_method: String, or callable taking a MetaTrainer object and
returning a list of meta strategies (One list entry per player).
String value can be:
- "uniform": Uniform distribution on policies.
- "nash": Taking nash distribution. Only works for 2 player, 0-sum
games.
- "prd": Projected Replicator Dynamics, as described in Lanctot et
Al.
training_strategy_selector: A callable or a string. If a callable, takes
as arguments: - An instance of `PSROSolver`, - a
`number_policies_selected` integer. and returning a list of
`num_players` lists of selected policies to train from.
When a string, supported values are:
- "top_k_probabilites": selects the first
'number_policies_selected' policies with highest selection
probabilities.
- "probabilistic": randomly selects 'number_policies_selected'
with probabilities determined by the meta strategies.
- "exhaustive": selects every policy of every player.
- "rectified": only selects strategies that have nonzero chance of
being selected.
- "uniform": randomly selects 'number_policies_selected' policies
with uniform probabilities.
symmetric_game: Whether to consider the current game as symmetric (True)
game or not (False).
number_policies_selected: Maximum number of new policies to train for each
player at each PSRO iteration.
**kwargs: kwargs for meta strategy computation and training strategy
selection
"""
self._iterations = 0
self._game = game
self._oracle = oracle
self._num_players = self._game.num_players()
self.symmetric_game = symmetric_game
self._game_num_players = self._num_players
self._num_players = 1 if symmetric_game else self._num_players
self._number_policies_selected = number_policies_selected
meta_strategy_method = _process_string_or_callable(
meta_strategy_method, meta_strategies.META_STRATEGY_METHODS)
print("Using {} as strategy method.".format(meta_strategy_method))
self._training_strategy_selector = _process_string_or_callable(
training_strategy_selector,
strategy_selectors.TRAINING_STRATEGY_SELECTORS)
print("Using {} as training strategy selector.".format(
self._training_strategy_selector))
self._meta_strategy_method = meta_strategy_method
self._kwargs = kwargs
self._initialize_policy(initial_policies)
self._initialize_game_state()
self.update_meta_strategies()
def _initialize_policy(self, initial_policies):
return NotImplementedError(
"initialize_policy not implemented. Initial policies passed as"
" arguments : {}".format(initial_policies))
def _initialize_game_state(self):
return NotImplementedError("initialize_game_state not implemented.")
def iteration(self, seed=None):
"""Main trainer loop.
Args:
seed: Seed for random BR noise generation.
"""
self._iterations += 1
self.update_agents() # Generate new, Best Response agents via oracle.
self.update_empirical_gamestate(seed=seed) # Update gamestate matrix.
self.update_meta_strategies() # Compute meta strategy (e.g. Nash)
def update_meta_strategies(self):
self._meta_strategy_probabilities = self._meta_strategy_method(self)
if self.symmetric_game:
self._meta_strategy_probabilities = [self._meta_strategy_probabilities[0]]
def update_agents(self):
return NotImplementedError("update_agents not implemented.")
def update_empirical_gamestate(self, seed=None):
return NotImplementedError("update_empirical_gamestate not implemented."
" Seed passed as argument : {}".format(seed))
def sample_episodes(self, policies, num_episodes):
"""Samples episodes and averages their returns.
Args:
policies: A list of policies representing the policies executed by each
player.
num_episodes: Number of episodes to execute to estimate average return of
policies.
Returns:
Average episode return over num episodes.
"""
totals = np.zeros(self._num_players)
for _ in range(num_episodes):
totals += sample_episode(self._game.new_initial_state(),
policies).reshape(-1)
return totals / num_episodes
def get_meta_strategies(self):
"""Returns the Nash Equilibrium distribution on meta game matrix."""
meta_strategy_probabilities = self._meta_strategy_probabilities
if self.symmetric_game:
meta_strategy_probabilities = (self._game_num_players *
meta_strategy_probabilities)
return [np.copy(a) for a in meta_strategy_probabilities]
def get_meta_game(self):
"""Returns the meta game matrix."""
meta_games = self._meta_games
return [np.copy(a) for a in meta_games]
def get_policies(self):
"""Returns the players' policies."""
policies = self._policies
if self.symmetric_game:
# Notice that the following line returns N references to the same policy
# This might not be correct for certain applications.
# E.g., a DQN BR oracle with player_id information
policies = self._game_num_players * policies
return policies
def get_kwargs(self):
return self._kwargs
| open_spiel-master | open_spiel/python/algorithms/psro_v2/abstract_meta_trainer.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Strategy selectors repository."""
import numpy as np
DEFAULT_STRATEGY_SELECTION_METHOD = "probabilistic"
# Constant, specifying the threshold below which probabilities are considered 0.
EPSILON_MIN_POSITIVE_PROBA = 1e-8
def exhaustive(solver, number_policies_selected=1):
"""Returns every player's policies.
Args:
solver: A GenPSROSolver instance.
number_policies_selected: Number of policies to return for each player.
(Compatibility argument)
Returns:
used_policies : List of size 'num_players' of lists of size
min('number_policies_selected', num_policies') containing selected
policies.
used_policies_indexes: List of lists of the same shape as used_policies,
containing the list indexes of selected policies.
"""
del number_policies_selected
policies = solver.get_policies()
indexes = [list(range(len(pol))) for pol in policies]
return policies, indexes
# Factory function for more complex filters.
def filter_function_factory(filter_function):
"""Returns a function filtering players' strategies wrt.
'filter_function'.
This function is used to select which strategy to start training from. As
such, and in the Rectified Nash Response logic, filter_function expects a
certain set of arguments:
- player_policies: The list of policies for the current player.
- player: The current player id.
- effective_number_selected: The effective number of policies to select.
- solver: In case the above arguments weren't enough, the solver instance so
the filter_function can have more complex behavior.
And returns the selected policies and policy indexes for the current player.
Args:
filter_function: A filter function following the specifications above, used
to filter which strategy to start training from for each player.
Returns:
A filter function on all players.
"""
def filter_policies(solver, number_policies_selected=1):
"""Filters each player's policies according to 'filter_function'.
Args:
solver: The PSRO solver.
number_policies_selected: The expected number of policies to select. If
there are fewer policies than 'number_policies_selected', behavior will
saturate at num_policies.
Returns:
used_policies : List of length 'num_players' of lists of length
min('number_policies_selected', num_policies') containing selected
policies.
used_policies_indexes: List of lists of the same shape as used_policies,
containing the list indexes of selected policies.
"""
policies = solver.get_policies()
num_players = len(policies)
meta_strategy_probabilities = solver.get_meta_strategies()
used_policies = []
used_policy_indexes = []
for player in range(num_players):
player_policies = policies[player]
current_selection_probabilities = meta_strategy_probabilities[player]
effective_number = min(number_policies_selected, len(player_policies))
used_policy, used_policy_index = filter_function(
player_policies, current_selection_probabilities, player,
effective_number, solver)
used_policies.append(used_policy)
used_policy_indexes.append(used_policy_index)
return used_policies, used_policy_indexes
# Return the created function.
return filter_policies
def rectified_filter(player_policies, selection_probabilities, player,
effective_number_to_select, solver):
"""Returns every strategy with nonzero selection probability.
Args:
player_policies: A list of policies for the current player.
selection_probabilities: Selection probabilities for 'player_policies'.
player: Player id.
effective_number_to_select: Effective number of policies to select.
solver: PSRO solver instance if kwargs needed.
Returns:
selected_policies : List of size 'effective_number_to_select'
containing selected policies.
selected_indexes: List of the same shape as selected_policies,
containing the list indexes of selected policies.
"""
del effective_number_to_select, solver, player
selected_indexes = [
i for i in range(len(player_policies))
if selection_probabilities[i] > EPSILON_MIN_POSITIVE_PROBA
]
selected_policies = [player_policies[i] for i in selected_indexes]
return selected_policies, selected_indexes
def probabilistic_filter(player_policies, selection_probabilities, player,
effective_number_to_select, solver):
"""Returns every strategy with nonzero selection probability.
Args:
player_policies: A list of policies for the current player.
selection_probabilities: Selection probabilities for 'player_policies'.
player: Player id.
effective_number_to_select: Effective number of policies to select.
solver: PSRO solver instance if kwargs needed.
Returns:
selected_policies : List of size 'effective_number_to_select'
containing selected policies.
selected_indexes: List of the same shape as selected_policies,
containing the list indexes of selected policies.
"""
del solver, player
selected_indexes = list(
np.random.choice(
list(range(len(player_policies))),
effective_number_to_select,
replace=False,
p=selection_probabilities))
selected_policies = [player_policies[i] for i in selected_indexes]
return selected_policies, selected_indexes
def top_k_probabilities_filter(player_policies, selection_probabilities, player,
effective_number_to_select, solver):
"""Returns top 'effective_number_to_select' highest probability policies.
Args:
player_policies: A list of policies for the current player.
selection_probabilities: Selection probabilities for 'player_policies'.
player: Player id.
effective_number_to_select: Effective number of policies to select.
solver: PSRO solver instance if kwargs needed.
Returns:
selected_policies : List of size 'effective_number_to_select'
containing selected policies.
selected_indexes: List of the same shape as selected_policies,
containing the list indexes of selected policies.
"""
del player, solver
selected_indexes = [
index for _, index in sorted(
zip(selection_probabilities, list(range(len(player_policies)))),
key=lambda pair: pair[0])
][:effective_number_to_select]
selected_policies = [player_policies[i] for i in selected_indexes]
return selected_policies, selected_indexes
def uniform_filter(player_policies, selection_probabilities, player,
effective_number_to_select, solver):
"""Returns 'effective_number_to_select' uniform-randomly selected policies.
Args:
player_policies: A list of policies for the current player.
selection_probabilities: Selection probabilities for 'player_policies'.
player: Player id.
effective_number_to_select: Effective number of policies to select.
solver: PSRO solver instance if kwargs needed.
Returns:
selected_policies : List of size 'effective_number_to_select'
containing selected policies.
selected_indexes: List of the same shape as selected_policies,
containing the list indexes of selected policies.
"""
del solver, selection_probabilities, player
selected_indexes = list(
np.random.choice(
list(range(len(player_policies))),
effective_number_to_select,
replace=False,
p=np.ones(len(player_policies)) / len(player_policies)))
selected_policies = [player_policies[i] for i in selected_indexes]
return selected_policies, selected_indexes
def functional_probabilistic_filter(player_policies, selection_probabilities,
player, effective_number_to_select, solver):
"""Returns effective_number_to_select randomly selected policies by function.
Args:
player_policies: A list of policies for the current player.
selection_probabilities: Selection probabilities for 'player_policies'.
player: Player id.
effective_number_to_select: Effective number of policies to select.
solver: PSRO solver instance if kwargs needed.
Returns:
selected_policies : List of size 'effective_number_to_select'
containing selected policies.
selected_indexes: List of the same shape as selected_policies,
containing the list indexes of selected policies.
"""
kwargs = solver.get_kwargs()
# By default, use meta strategies.
probability_computation_function = kwargs.get(
"selection_probability_function") or (lambda x: x.get_meta_strategies())
selection_probabilities = probability_computation_function(solver)[player]
selected_indexes = list(
np.random.choice(
list(range(len(player_policies))),
effective_number_to_select,
replace=False,
p=selection_probabilities))
selected_policies = [player_policies[i] for i in selected_indexes]
return selected_policies, selected_indexes
# Introducing aliases:
uniform = filter_function_factory(uniform_filter)
rectified = filter_function_factory(rectified_filter)
probabilistic = filter_function_factory(probabilistic_filter)
top_k_probabilities = filter_function_factory(top_k_probabilities_filter)
functional_probabilistic = filter_function_factory(
functional_probabilistic_filter)
# pylint:disable=pointless-string-statement
# ---
"""Selectors below are used to rectify probabilities.
"""
# ---
# pylint:enable=pointless-string-statement
def get_current_and_average_payoffs(ps2ro_trainer, current_player,
current_strategy):
"""Returns the current player's and average players' payoffs.
These payoffs are returned when current_player's strategy's index is
'current_strategy'.
Args:
ps2ro_trainer: A ps2ro object.
current_player: Integer, current player index.
current_strategy: Integer, current player's strategy index.
Returns:
Payoff tensor for current player, Average payoff tensor over all players.
"""
# Get the vector of payoffs associated with current_player's strategy ind
meta_games = ps2ro_trainer.meta_games
current_payoff = meta_games[current_player]
current_payoff = np.take(
current_payoff, current_strategy, axis=current_player)
# Get average per-player payoff matrix.
average_payoffs = np.mean(meta_games, axis=0)
average_payoffs = np.take(
average_payoffs, current_strategy, axis=current_player)
return current_payoff, average_payoffs
def rectified_selector(ps2ro_trainer, current_player, current_strategy):
current_payoff, average_payoffs = get_current_and_average_payoffs(
ps2ro_trainer, current_player, current_strategy)
# Rectified Nash condition : select those strategies where we do better
# than others.
res = current_payoff >= average_payoffs
return np.expand_dims(res, axis=current_player)
# pylint:disable=pointless-string-statement
# ---
"""When using joint strategies, use the selectors below.
"""
# ---
# pylint:enable=pointless-string-statement
def empty_list_generator(number_dimensions):
result = []
for _ in range(number_dimensions - 1):
result = [result]
return result
def get_indices_from_non_marginalized(policies):
"""Get a list of lists of indices from joint policies.
These are the ones used for training strategy selector.
Args:
policies: a list of joint policies.
Returns:
A list of lists of indices.
"""
num_players = len(policies[0])
num_strategies = len(policies)
return [list(range(num_strategies)) for _ in range(num_players)]
# In case we want to select strategies to train based on
# non-marginalized probabilities.
def rectified_non_marginalized(solver):
"""Returns every strategy with nonzero selection probability.
Args:
solver: A GenPSROSolver instance.
"""
used_policies = []
policies = solver.get_policies()
num_players = len(policies)
meta_strategy_probabilities = (
solver.get_and_update_non_marginalized_meta_strategies(update=False))
for k in range(num_players):
current_policies = policies[k]
current_probabilities = meta_strategy_probabilities[k]
current_policies = [
current_policies[i]
for i in range(len(current_policies))
if current_probabilities[i] > EPSILON_MIN_POSITIVE_PROBA
]
used_policies.append(current_policies)
return used_policies, get_indices_from_non_marginalized(used_policies)
def exhaustive_non_marginalized(solver):
"""Returns every player's policies.
Args:
solver: A GenPSROSolver instance.
"""
used_policies = solver.get_policies()
return used_policies, get_indices_from_non_marginalized(used_policies)
def probabilistic_non_marginalized(solver):
"""Returns [kwargs] policies randomly, proportionally with selection probas.
Args:
solver: A GenPSROSolver instance.
"""
kwargs = solver.get_kwargs()
# By default, select only 1 new policy to optimize from.
number_policies_to_select = kwargs.get("number_policies_selected") or 1
# Get integer IDs and probabilities of meta-strategies
ids = solver.get_joint_policy_ids()
joint_strategy_probabilities = (
solver.get_and_update_non_marginalized_meta_strategies(update=False))
effective_number = min(number_policies_to_select, len(ids))
selected_policy_ids = list(
np.random.choice(
ids, effective_number, replace=False, p=joint_strategy_probabilities))
used_policies = solver.get_joint_policies_from_id_list(selected_policy_ids)
return used_policies, get_indices_from_non_marginalized(used_policies)
def top_k_probabilites_non_marginalized(solver):
"""Returns [kwargs] policies with highest selection probabilities.
Args:
solver: A GenPSROSolver instance.
"""
kwargs = solver.get_kwargs()
# By default, select only 1 new policy to optimize from.
number_policies_to_select = kwargs.get("number_policies_selected") or 1
ids = solver.get_joint_policy_ids()
effective_number = min(number_policies_to_select, len(ids))
joint_strategy_probabilities = (
solver.get_and_update_non_marginalized_meta_strategies(update=False))
sorted_list = sorted(
zip(joint_strategy_probabilities, ids),
reverse=True,
key=lambda pair: pair[0])
selected_policy_ids = [id_selected for _, id_selected in sorted_list
][:effective_number]
used_policies = solver.get_joint_policies_from_id_list(selected_policy_ids)
return used_policies, get_indices_from_non_marginalized(used_policies)
def uniform_non_marginalized(solver):
"""Returns [kwargs] randomly selected policies (Uniform probability).
Args:
solver: A GenPSROSolver instance.
"""
kwargs = solver.get_kwargs()
# By default, select only 1 new policy to optimize from.
number_policies_to_select = kwargs.get("number_policies_selected") or 1
ids = solver.get_joint_policy_ids()
effective_number = min(number_policies_to_select, len(ids))
selected_policy_ids = list(
np.random.choice(
ids, effective_number, replace=False, p=np.ones(len(ids)) / len(ids)))
used_policies = solver.get_joint_policies_from_id_list(selected_policy_ids)
return used_policies, get_indices_from_non_marginalized(used_policies)
def compressed_lambda(x):
return x.get_and_update_non_marginalized_meta_strategies(update=False)
def functional_probabilistic_non_marginalized(solver):
"""Returns [kwargs] randomly selected policies with generated probabilities.
Args:
solver: A GenPSROSolver instance.
"""
kwargs = solver.get_kwargs()
# By default, select only 1 new policy to optimize from.
number_policies_to_select = kwargs.get("number_policies_selected") or 1
# By default, use meta strategies.
probability_computation_function = kwargs.get(
"selection_probability_function") or compressed_lambda
ids = solver.get_joint_policy_ids()
joint_strategy_probabilities = probability_computation_function(solver)
effective_number = min(number_policies_to_select, len(ids))
selected_policies = list(
np.random.choice(
ids, effective_number, replace=False, p=joint_strategy_probabilities))
used_policies = solver.get_joint_policies_from_id_list(selected_policies)
return used_policies, get_indices_from_non_marginalized(used_policies)
TRAINING_STRATEGY_SELECTORS = {
"functional_probabilistic": functional_probabilistic,
"top_k_probabilities": top_k_probabilities,
"probabilistic": probabilistic,
"exhaustive": exhaustive,
"rectified": rectified,
"uniform": uniform,
"functional_probabilistic_non_marginalized": (
functional_probabilistic_non_marginalized
),
"top_k_probabilites_non_marginalized": top_k_probabilites_non_marginalized,
"probabilistic_non_marginalized": probabilistic_non_marginalized,
"exhaustive_non_marginalized": exhaustive_non_marginalized,
"rectified_non_marginalized": rectified_non_marginalized,
"uniform_non_marginalized": uniform_non_marginalized,
}
| open_spiel-master | open_spiel/python/algorithms/psro_v2/strategy_selectors.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various general utility functions."""
import random
import numpy as np
from open_spiel.python.algorithms import get_all_states
from open_spiel.python.algorithms import policy_aggregator
from open_spiel.python.algorithms import policy_aggregator_joint
from open_spiel.python.egt import alpharank
from open_spiel.python.egt import utils as alpharank_utils
def empty_list_generator(number_dimensions):
result = []
for _ in range(number_dimensions - 1):
result = [result]
return result
def random_choice(outcomes, probabilities):
"""Samples from discrete probability distribution.
`numpy.choice` does not seem optimized for repeated calls, this code
had higher performance.
Args:
outcomes: List of categorical outcomes.
probabilities: Discrete probability distribtuion as list of floats.
Returns:
Entry of `outcomes` sampled according to the distribution.
"""
cumsum = np.cumsum(probabilities)
return outcomes[np.searchsorted(cumsum/cumsum[-1], random.random())]
def sample_strategy(total_policies,
probabilities_of_playing_policies,
probs_are_marginal=True):
"""Samples strategies given probabilities.
Uses independent sampling if probs_are_marginal, and joint sampling otherwise.
Args:
total_policies: if probs_are_marginal, this is a list, each element a list
of each player's policies. If not, this is a list of joint policies. In
both cases the policy orders must match that of
probabilities_of_playing_policies.
probabilities_of_playing_policies: if probs_are_marginal, this is a list,
with the k-th element also a list specifying the play probabilities of the
k-th player's policies. If not, this is a list of play probabilities of
the joint policies specified by total_policies.
probs_are_marginal: a boolean indicating if player-wise marginal
probabilities are provided in probabilities_of_playing_policies. If False,
then play_probabilities is assumed to specify joint distribution.
Returns:
sampled_policies: A list specifying a single sampled joint strategy.
"""
if probs_are_marginal:
return sample_strategy_marginal(total_policies,
probabilities_of_playing_policies)
else:
return sample_strategy_joint(total_policies,
probabilities_of_playing_policies)
def sample_strategy_marginal(total_policies, probabilities_of_playing_policies):
"""Samples strategies given marginal probabilities.
Uses independent sampling if probs_are_marginal, and joint sampling otherwise.
Args:
total_policies: A list, each element a list of each player's policies.
probabilities_of_playing_policies: This is a list, with the k-th element
also a list specifying the play probabilities of the k-th player's
policies.
Returns:
sampled_policies: A list specifying a single sampled joint strategy.
"""
num_players = len(total_policies)
sampled_policies = []
for k in range(num_players):
current_policies = total_policies[k]
current_probabilities = probabilities_of_playing_policies[k]
sampled_policy_k = random_choice(current_policies, current_probabilities)
sampled_policies.append(sampled_policy_k)
return sampled_policies
def sample_random_tensor_index(probabilities_of_index_tensor):
shape = probabilities_of_index_tensor.shape
reshaped_probas = probabilities_of_index_tensor.reshape(-1)
strat_list = list(range(len(reshaped_probas)))
chosen_index = random_choice(strat_list, reshaped_probas)
return np.unravel_index(chosen_index, shape)
def sample_strategy_joint(total_policies, probabilities_of_playing_policies):
"""Samples strategies given joint probabilities.
Uses independent sampling if probs_are_marginal, and joint sampling otherwise.
Args:
total_policies: A list, each element a list of each player's policies.
probabilities_of_playing_policies: This is a list of play probabilities of
the joint policies specified by total_policies.
Returns:
sampled_policies: A list specifying a single sampled joint strategy.
"""
sampled_index = sample_random_tensor_index(probabilities_of_playing_policies)
sampled_policies = []
for player in range(len(sampled_index)):
ind = sampled_index[player]
sampled_policies.append(total_policies[player][ind])
return sampled_policies
def softmax(x):
return np.exp(x) / np.sum(np.exp(x))
def round_maintain_sum(x):
"""Returns element-wise rounded version y of a vector x, with sum(x)==sum(y).
E.g., if x = array([3.37625333, 2.27920304, 4.34454364]), note sum(x) == 10.
However, naively doing y = np.round(x) yields sum(y) == 9. In this function,
however, the rounded counterpart y will have sum(y) == 10.
Args:
x: a vector.
"""
y = np.floor(x)
sum_diff = round(sum(x)) - sum(y) # Difference of original vs. floored sum
indices = np.argsort(y - x)[:int(sum_diff)] # Indices with highest difference
y[indices] += 1 # Add the missing mass to the elements with the most missing
return y
def get_alpharank_marginals(payoff_tables, pi):
"""Returns marginal strategy rankings for each player given joint rankings pi.
Args:
payoff_tables: List of meta-game payoff tables for a K-player game, where
each table has dim [n_strategies_player_1 x ... x n_strategies_player_K].
These payoff tables may be asymmetric.
pi: The vector of joint rankings as computed by alpharank. Each element i
corresponds to a unique integer ID representing a given strategy profile,
with profile_to_id mappings provided by
alpharank_utils.get_id_from_strat_profile().
Returns:
pi_marginals: List of np.arrays of player-wise marginal strategy masses,
where the k-th player's np.array has shape [n_strategies_player_k].
"""
num_populations = len(payoff_tables)
if num_populations == 1:
return pi
else:
num_strats_per_population = alpharank_utils.get_num_strats_per_population(
payoff_tables, payoffs_are_hpt_format=False)
num_profiles = alpharank_utils.get_num_profiles(num_strats_per_population)
pi_marginals = [np.zeros(n) for n in num_strats_per_population]
for i_strat in range(num_profiles):
strat_profile = (
alpharank_utils.get_strat_profile_from_id(num_strats_per_population,
i_strat))
for i_player in range(num_populations):
pi_marginals[i_player][strat_profile[i_player]] += pi[i_strat]
return pi_marginals
def remove_epsilon_negative_probs(probs, epsilon=1e-9):
"""Removes negative probabilities that occur due to precision errors."""
if len(probs[probs < 0]) > 0: # pylint: disable=g-explicit-length-test
# Ensures these negative probabilities aren't large in magnitude, as that is
# unexpected and likely not due to numerical precision issues
print("Probabilities received were: {}".format(probs[probs < 0]))
assert np.all(np.min(probs[probs < 0]) > -1.*epsilon), (
"Negative Probabilities received were: {}".format(probs[probs < 0]))
probs[probs < 0] = 0
probs = probs / np.sum(probs)
return probs
def get_joint_strategy_from_marginals(probabilities):
"""Returns a joint strategy tensor from a list of marginals.
Args:
probabilities: list of list of probabilities, one for each player.
Returns:
A joint strategy from a list of marginals.
"""
probas = []
for i in range(len(probabilities)):
probas_shapes = [1] * len(probabilities)
probas_shapes[i] = -1
probas.append(np.array(probabilities[i]).reshape(probas_shapes))
return np.prod(probas)
def alpharank_strategy(solver, return_joint=False, **unused_kwargs):
"""Returns AlphaRank distribution on meta game matrix.
This method works for general games.
Args:
solver: GenPSROSolver instance.
return_joint: a boolean specifying whether to return player-wise
marginals.
Returns:
marginals: a list, specifying for each player the alpharank marginal
distributions on their strategies.
joint_distr: a list, specifying the joint alpharank distributions for all
strategy profiles.
"""
meta_games = solver.get_meta_game()
meta_games = [np.asarray(x) for x in meta_games]
if solver.symmetric_game:
meta_games = [meta_games[0]]
# Get alpharank distribution via alpha-sweep
joint_distr = alpharank.sweep_pi_vs_epsilon(
meta_games)
joint_distr = remove_epsilon_negative_probs(joint_distr)
marginals = 2 * [joint_distr]
joint_distr = get_joint_strategy_from_marginals(marginals)
if return_joint:
return marginals, joint_distr
else:
return joint_distr
else:
joint_distr = alpharank.sweep_pi_vs_epsilon(meta_games)
joint_distr = remove_epsilon_negative_probs(joint_distr)
if return_joint:
marginals = get_alpharank_marginals(meta_games, joint_distr)
return marginals, joint_distr
else:
return joint_distr
def get_strategy_profile_ids(payoff_tables):
num_strats_per_population = (
alpharank_utils.get_num_strats_per_population(
payoff_tables, payoffs_are_hpt_format=False))
return range(alpharank_utils.get_num_profiles(num_strats_per_population))
def get_joint_policies_from_id_list(payoff_tables, policies, profile_id_list):
"""Returns a list of joint policies, given a list of integer IDs.
Args:
payoff_tables: List of payoff tables, one per player.
policies: A list of policies, one per player.
profile_id_list: list of integer IDs, each corresponding to a joint policy.
These integers correspond to those in get_strategy_profile_ids().
Returns:
selected_joint_policies: A list, with each element being a joint policy
instance (i.e., a list of policies, one per player).
"""
num_strats_per_population = (
alpharank_utils.get_num_strats_per_population(
payoff_tables, payoffs_are_hpt_format=False))
np.testing.assert_array_equal(num_strats_per_population,
[len(p) for p in policies])
num_players = len(policies)
selected_joint_policies = []
for profile_id in profile_id_list:
# Compute the profile associated with the integer profile_id
policy_profile = alpharank_utils.get_strat_profile_from_id(
num_strats_per_population, profile_id)
# Append the joint policy corresponding to policy_profile
selected_joint_policies.append(
[policies[k][policy_profile[k]] for k in range(num_players)])
return selected_joint_policies
def compute_states_and_info_states_if_none(game,
all_states=None,
state_to_information_state=None):
"""Returns all_states and/or state_to_information_state for the game.
To recompute everything, pass in None for both all_states and
state_to_information_state. Otherwise, this function will use the passed in
values to reconstruct either of them.
Args:
game: The open_spiel game.
all_states: The result of calling get_all_states.get_all_states. Cached for
improved performance.
state_to_information_state: A dict mapping str(state) to
state.information_state for every state in the game. Cached for improved
performance.
"""
if all_states is None:
all_states = get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False)
if state_to_information_state is None:
state_to_information_state = {
state: all_states[state].information_state_string()
for state in all_states
}
return all_states, state_to_information_state
def aggregate_policies(game, total_policies, probabilities_of_playing_policies):
"""Aggregate the players' policies.
Specifically, returns a single callable policy object that is
realization-equivalent to playing total_policies with
probabilities_of_playing_policies. I.e., aggr_policy is a joint policy that
can be called at any information state [via
action_probabilities(state, player_id)].
Args:
game: The open_spiel game.
total_policies: A list of list of all policy.Policy strategies used for
training, where the n-th entry of the main list is a list of policies
available to the n-th player.
probabilities_of_playing_policies: A list of arrays representing, per
player, the probabilities of playing each policy in total_policies for the
same player.
Returns:
A callable object representing the policy.
"""
aggregator = policy_aggregator.PolicyAggregator(game)
return aggregator.aggregate(
range(len(probabilities_of_playing_policies)), total_policies,
probabilities_of_playing_policies)
def marginal_to_joint(policies):
"""Marginal policies to joint policies.
Args:
policies: List of list of policies, one list per player.
Returns:
Joint policies in the right order (np.reshape compatible).
"""
shape = tuple([len(a) for a in policies])
num_players = len(shape)
total_length = np.prod(shape)
indexes = np.array(list(range(total_length)))
joint_indexes = np.unravel_index(indexes, shape)
joint_policies = []
for joint_index in zip(*joint_indexes):
joint_policies.append([
policies[player][joint_index[player]] for player in range(num_players)
])
return joint_policies
def aggregate_joint_policies(game, total_policies,
probabilities_of_playing_policies):
"""Aggregate the players' joint policies.
Specifically, returns a single callable policy object that is
realization-equivalent to playing total_policies with
probabilities_of_playing_policies. I.e., aggr_policy is a joint policy that
can be called at any information state [via
action_probabilities(state, player_id)].
Args:
game: The open_spiel game.
total_policies: A list of list of all policy.Policy strategies used for
training, where the n-th entry of the main list is a list of policies, one
entry for each player.
probabilities_of_playing_policies: A list of floats representing the
probabilities of playing each joint strategy in total_policies.
Returns:
A callable object representing the policy.
"""
aggregator = policy_aggregator_joint.JointPolicyAggregator(game)
return aggregator.aggregate(
range(len(total_policies[0])), total_policies,
probabilities_of_playing_policies)
| open_spiel-master | open_spiel/python/algorithms/psro_v2/utils.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.psro_v2.strategy_selectors."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms.psro_v2 import strategy_selectors
class FakeSolver(object):
def __init__(self, strategies, policies):
self.strategies = strategies
self.policies = policies
def get_policies(self):
return self.policies
def get_meta_strategies(self):
return self.strategies
def equal_to_transposition_lists(a, b):
return [set(x) for x in a] == [set(x) for x in b]
EPSILON_MIN_POSITIVE_PROBA = 1e-8
def rectified_alias(solver, number_policies_to_select):
"""Returns every strategy with nonzero selection probability.
Args:
solver: A GenPSROSolver instance.
number_policies_to_select: Number policies to select
Returns:
used_policies: A list, each element a list of the policies used per player.
"""
del number_policies_to_select
used_policies = []
used_policy_indexes = []
policies = solver.get_policies()
num_players = len(policies)
meta_strategy_probabilities = solver.get_meta_strategies()
for k in range(num_players):
current_policies = policies[k]
current_probabilities = meta_strategy_probabilities[k]
current_indexes = [
i for i in range(len(current_policies))
if current_probabilities[i] > EPSILON_MIN_POSITIVE_PROBA
]
current_policies = [
current_policies[i]
for i in current_indexes
]
used_policy_indexes.append(current_indexes)
used_policies.append(current_policies)
return used_policies, used_policy_indexes
def probabilistic_alias(solver, number_policies_to_select):
"""Returns [kwargs] policies randomly, proportionally with selection probas.
Args:
solver: A GenPSROSolver instance.
number_policies_to_select: Number policies to select
"""
policies = solver.get_policies()
num_players = len(policies)
meta_strategy_probabilities = solver.get_meta_strategies()
print(policies, meta_strategy_probabilities)
used_policies = []
used_policy_indexes = []
for k in range(num_players):
current_policies = policies[k]
current_selection_probabilities = meta_strategy_probabilities[k]
effective_number = min(number_policies_to_select, len(current_policies))
selected_indexes = list(
np.random.choice(
list(range(len(current_policies))),
effective_number,
replace=False,
p=current_selection_probabilities))
selected_policies = [current_policies[i] for i in selected_indexes]
used_policies.append(selected_policies)
used_policy_indexes.append(selected_indexes)
return used_policies, used_policy_indexes
def top_k_probabilities_alias(solver, number_policies_to_select):
"""Returns [kwargs] policies with highest selection probabilities.
Args:
solver: A GenPSROSolver instance.
number_policies_to_select: Number policies to select
"""
policies = solver.get_policies()
num_players = len(policies)
meta_strategy_probabilities = solver.get_meta_strategies()
used_policies = []
used_policy_indexes = []
for k in range(num_players):
current_policies = policies[k]
current_selection_probabilities = meta_strategy_probabilities[k]
effective_number = min(number_policies_to_select, len(current_policies))
# pylint: disable=g-complex-comprehension
selected_indexes = [
index for _, index in sorted(
zip(current_selection_probabilities,
list(range(len(current_policies)))),
key=lambda pair: pair[0])
][:effective_number]
selected_policies = [current_policies[i] for i in selected_indexes]
used_policies.append(selected_policies)
used_policy_indexes.append(selected_indexes)
return used_policies, used_policy_indexes
class StrategySelectorsTest(absltest.TestCase):
def test_vital(self):
n_tests = 1000
number_strategies = 50
number_players = 3
for i in range(n_tests):
probabilities = np.random.uniform(size=(number_players,
number_strategies))
probabilities /= np.sum(probabilities, axis=1).reshape(-1, 1)
probabilities = list(probabilities)
policies = [list(range(number_strategies)) for _ in range(number_players)]
solver = FakeSolver(probabilities, policies)
# To see how rectified reacts to 0 probabilities.
probabilities[0][0] = 0
probabilities[-1][-1] = 0
a, b = strategy_selectors.rectified(solver, 1)
c, d = rectified_alias(solver, 1)
self.assertEqual(a, c, "Rectified failed.")
self.assertEqual(b, d, "Rectified failed.")
a, b = strategy_selectors.top_k_probabilities(solver, 3)
c, d = top_k_probabilities_alias(solver, 3)
self.assertEqual(a, c, "Top k failed.")
self.assertEqual(b, d, "Top k failed.")
n_nonzero_policies = 2
probabilities = [np.zeros(number_strategies) for _ in range(
number_players)]
for player in range(number_players):
for _ in range(n_nonzero_policies):
i = np.random.randint(0, high=number_strategies)
while probabilities[player][i] > 1e-12:
i = np.random.randint(0, high=number_strategies)
probabilities[player][i] = 1.0 / n_nonzero_policies
probabilities[player] /= np.sum(probabilities[player])
solver = FakeSolver(probabilities, policies)
a, b = strategy_selectors.probabilistic(solver, n_nonzero_policies)
c, d = probabilistic_alias(solver, n_nonzero_policies)
self.assertTrue(equal_to_transposition_lists(a, c),
"Probabilistic failed.")
self.assertTrue(equal_to_transposition_lists(b, d),
"Probabilistic failed.")
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/psro_v2/strategy_selectors_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An Oracle for any RL algorithm.
An Oracle for any RL algorithm following the OpenSpiel Policy API.
"""
import numpy as np
from open_spiel.python.algorithms.psro_v2 import optimization_oracle
from open_spiel.python.algorithms.psro_v2 import utils
def update_episodes_per_oracles(episodes_per_oracle, played_policies_indexes):
"""Updates the current episode count per policy.
Args:
episodes_per_oracle: List of list of number of episodes played per policy.
One list per player.
played_policies_indexes: List with structure (player_index, policy_index) of
played policies whose count needs updating.
Returns:
Updated count.
"""
for player_index, policy_index in played_policies_indexes:
episodes_per_oracle[player_index][policy_index] += 1
return episodes_per_oracle
def freeze_all(policies_per_player):
"""Freezes all policies within policy_per_player.
Args:
policies_per_player: List of list of number of policies.
"""
for policies in policies_per_player:
for pol in policies:
pol.freeze()
def random_count_weighted_choice(count_weight):
"""Returns a randomly sampled index i with P ~ 1 / (count_weight[i] + 1).
Allows random sampling to prioritize indexes that haven't been sampled as many
times as others.
Args:
count_weight: A list of counts to sample an index from.
Returns:
Randomly-sampled index.
"""
indexes = list(range(len(count_weight)))
p = np.array([1 / (weight + 1) for weight in count_weight])
p /= np.sum(p)
chosen_index = np.random.choice(indexes, p=p)
return chosen_index
class RLOracle(optimization_oracle.AbstractOracle):
"""Oracle handling Approximate Best Responses computation."""
def __init__(self,
env,
best_response_class,
best_response_kwargs,
number_training_episodes=1e3,
self_play_proportion=0.0,
**kwargs):
"""Init function for the RLOracle.
Args:
env: rl_environment instance.
best_response_class: class of the best response.
best_response_kwargs: kwargs of the best response.
number_training_episodes: (Minimal) number of training episodes to run
each best response through. May be higher for some policies.
self_play_proportion: Float, between 0 and 1. Defines the probability that
a non-currently-training player will actually play (one of) its
currently training strategy (Which will be trained as well).
**kwargs: kwargs
"""
self._env = env
self._best_response_class = best_response_class
self._best_response_kwargs = best_response_kwargs
self._self_play_proportion = self_play_proportion
self._number_training_episodes = number_training_episodes
super(RLOracle, self).__init__(**kwargs)
def sample_episode(self, unused_time_step, agents, is_evaluation=False):
time_step = self._env.reset()
cumulative_rewards = 0.0
while not time_step.last():
if time_step.is_simultaneous_move():
action_list = []
for agent in agents:
output = agent.step(time_step, is_evaluation=is_evaluation)
action_list.append(output.action)
time_step = self._env.step(action_list)
cumulative_rewards += np.array(time_step.rewards)
else:
player_id = time_step.observations["current_player"]
# is_evaluation is a boolean that, when False, lets policies train. The
# setting of PSRO requires that all policies be static aside from those
# being trained by the oracle. is_evaluation could be used to prevent
# policies from training, yet we have opted for adding frozen attributes
# that prevents policies from training, for all values of is_evaluation.
# Since all policies returned by the oracle are frozen before being
# returned, only currently-trained policies can effectively learn.
agent_output = agents[player_id].step(
time_step, is_evaluation=is_evaluation)
action_list = [agent_output.action]
time_step = self._env.step(action_list)
cumulative_rewards += np.array(time_step.rewards)
if not is_evaluation:
for agent in agents:
agent.step(time_step)
return cumulative_rewards
def _has_terminated(self, episodes_per_oracle):
# The oracle has terminated when all policies have at least trained for
# self._number_training_episodes. Given the stochastic nature of our
# training, some policies may have more training episodes than that value.
return np.all(
episodes_per_oracle.reshape(-1) > self._number_training_episodes)
def sample_policies_for_episode(self, new_policies, training_parameters,
episodes_per_oracle, strategy_sampler):
"""Randomly samples a set of policies to run during the next episode.
Note : sampling is biased to select players & strategies that haven't
trained as much as the others.
Args:
new_policies: The currently training policies, list of list, one per
player.
training_parameters: List of list of training parameters dictionaries, one
list per player, one dictionary per training policy.
episodes_per_oracle: List of list of integers, computing the number of
episodes trained on by each policy. Used to weight the strategy
sampling.
strategy_sampler: Sampling function that samples a joint strategy given
probabilities.
Returns:
Sampled list of policies (One policy per player), index of currently
training policies in the list.
"""
num_players = len(training_parameters)
# Prioritizing players that haven't had as much training as the others.
episodes_per_player = [sum(episodes) for episodes in episodes_per_oracle]
chosen_player = random_count_weighted_choice(episodes_per_player)
# Uniformly choose among the sampled player.
agent_chosen_ind = np.random.randint(
0, len(training_parameters[chosen_player]))
agent_chosen_dict = training_parameters[chosen_player][agent_chosen_ind]
new_policy = new_policies[chosen_player][agent_chosen_ind]
# Sample other players' policies.
total_policies = agent_chosen_dict["total_policies"]
probabilities_of_playing_policies = agent_chosen_dict[
"probabilities_of_playing_policies"]
episode_policies = strategy_sampler(total_policies,
probabilities_of_playing_policies)
live_agents_player_index = [(chosen_player, agent_chosen_ind)]
for player in range(num_players):
if player == chosen_player:
episode_policies[player] = new_policy
assert not new_policy.is_frozen()
else:
# Sample a bernoulli with parameter 'self_play_proportion' to determine
# whether we do self-play with 'player'.
if np.random.binomial(1, self._self_play_proportion):
# If we are indeed doing self-play on that round, sample among the
# trained strategies of current_player, with priority given to less-
# selected agents.
agent_index = random_count_weighted_choice(
episodes_per_oracle[player])
self_play_agent = new_policies[player][agent_index]
episode_policies[player] = self_play_agent
live_agents_player_index.append((player, agent_index))
else:
assert episode_policies[player].is_frozen()
return episode_policies, live_agents_player_index
def _rollout(self, game, agents, **oracle_specific_execution_kwargs):
self.sample_episode(None, agents, is_evaluation=False)
def generate_new_policies(self, training_parameters):
"""Generates new policies to be trained into best responses.
Args:
training_parameters: list of list of training parameter dictionaries, one
list per player.
Returns:
List of list of the new policies, following the same structure as
training_parameters.
"""
new_policies = []
for player in range(len(training_parameters)):
player_parameters = training_parameters[player]
new_pols = []
for param in player_parameters:
current_pol = param["policy"]
if isinstance(current_pol, self._best_response_class):
new_pol = current_pol.copy_with_noise(self._kwargs.get("sigma", 0.0))
else:
new_pol = self._best_response_class(self._env, player,
**self._best_response_kwargs)
new_pol.unfreeze()
new_pols.append(new_pol)
new_policies.append(new_pols)
return new_policies
def __call__(self,
game,
training_parameters,
strategy_sampler=utils.sample_strategy,
**oracle_specific_execution_kwargs):
"""Call method for oracle, returns best responses against a set of policies.
Args:
game: The game on which the optimization process takes place.
training_parameters: A list of list of dictionaries (One list per player),
each dictionary containing the following fields :
- policy : the policy from which to start training.
- total_policies: A list of all policy.Policy strategies used for
training, including the one for the current player.
- current_player: Integer representing the current player.
- probabilities_of_playing_policies: A list of arrays representing, per
player, the probabilities of playing each policy in total_policies for
the same player.
strategy_sampler: Callable that samples strategies from total_policies
using probabilities_of_playing_policies. It only samples one joint
set of policies for all players. Implemented to be able to take into
account joint probabilities of action (For Alpharank)
**oracle_specific_execution_kwargs: Other set of arguments, for
compatibility purposes. Can for example represent whether to Rectify
Training or not.
Returns:
A list of list, one for each member of training_parameters, of (epsilon)
best responses.
"""
episodes_per_oracle = [[0
for _ in range(len(player_params))]
for player_params in training_parameters]
episodes_per_oracle = np.array(episodes_per_oracle)
new_policies = self.generate_new_policies(training_parameters)
# TODO(author4): Look into multithreading.
while not self._has_terminated(episodes_per_oracle):
agents, indexes = self.sample_policies_for_episode(
new_policies, training_parameters, episodes_per_oracle,
strategy_sampler)
self._rollout(game, agents, **oracle_specific_execution_kwargs)
episodes_per_oracle = update_episodes_per_oracles(episodes_per_oracle,
indexes)
# Freeze the new policies to keep their weights static. This allows us to
# later not have to make the distinction between static and training
# policies in training iterations.
freeze_all(new_policies)
return new_policies
| open_spiel-master | open_spiel/python/algorithms/psro_v2/rl_oracle.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An Oracle for Exact Best Responses.
This class computes the best responses against sets of policies.
"""
from open_spiel.python import policy as openspiel_policy
from open_spiel.python.algorithms import best_response
from open_spiel.python.algorithms import policy_utils
from open_spiel.python.algorithms.psro_v2 import optimization_oracle
from open_spiel.python.algorithms.psro_v2 import utils
import pyspiel
class BestResponseOracle(optimization_oracle.AbstractOracle):
"""Oracle using exact best responses to compute BR to policies."""
def __init__(self,
best_response_backend='cpp',
game=None,
all_states=None,
state_to_information_state=None,
prob_cut_threshold=-1.0,
action_value_tolerance=-1.0,
**kwargs):
"""Init function for the RLOracle.
Args:
best_response_backend: A string (either 'cpp' or 'py'), specifying the
best response backend to use (C++ or python, respectively). The cpp
backend should be preferred, generally, as it is significantly faster.
game: The game on which the optimization process takes place.
all_states: The result of calling get_all_states.get_all_states. Cached
for improved performance.
state_to_information_state: A dict mapping str(state) to
state.information_state for every state in the game. Cached for improved
performance.
prob_cut_threshold: For cpp backend, a partially computed best-response
can be computed when using a prob_cut_threshold >= 0.
action_value_tolerance: For cpp backend, the max-entropy best-response
policy is computed if a non-negative `action_value_tolerance` is used.
**kwargs: kwargs
"""
super(BestResponseOracle, self).__init__(**kwargs)
self.best_response_backend = best_response_backend
if self.best_response_backend == 'cpp':
# Should compute all_states and state_to_information_state only once in
# the program, as caching them speeds up TabularBestResponse tremendously.
self.all_states, self.state_to_information_state = (
utils.compute_states_and_info_states_if_none(
game, all_states, state_to_information_state))
policy = openspiel_policy.UniformRandomPolicy(game)
policy_to_dict = policy_utils.policy_to_dict(
policy, game, self.all_states, self.state_to_information_state)
# pylint: disable=g-complex-comprehension
# Cache TabularBestResponse for players, due to their costly construction
# TODO(b/140426861): Use a single best-responder once the code supports
# multiple player ids.
self.best_response_processors = [
pyspiel.TabularBestResponse(game, best_responder_id, policy_to_dict,
prob_cut_threshold,
action_value_tolerance)
for best_responder_id in range(game.num_players())
]
self.best_responders = [
best_response.CPPBestResponsePolicy(
game, i_player, policy, self.all_states,
self.state_to_information_state,
self.best_response_processors[i_player]
)
for i_player in range(game.num_players())
]
# pylint: enable=g-complex-comprehension
def __call__(self,
game,
training_parameters,
strategy_sampler=utils.sample_strategy,
using_joint_strategies=False,
**oracle_specific_execution_kwargs):
"""Call method for oracle, returns best responses for training_parameters.
Args:
game: The game on which the optimization process takes place.
training_parameters: List of list of dicts: one list per player, one dict
per selected agent in the pool for each player,
each dictionary containing the following fields:
- policy: the policy from which to start training.
- total_policies: A list of all policy.Policy strategies used for
training, including the one for the current player. Either
marginalized or joint strategies are accepted.
- current_player: Integer representing the current player.
- probabilities_of_playing_policies: A list of arrays representing, per
player, the probabilities of playing each policy in total_policies for
the same player.
strategy_sampler: Callable that samples strategies from `total_policies`
using `probabilities_of_playing_policies`. It only samples one joint
"action" for all players. Implemented to be able to take into account
joint probabilities of action.
using_joint_strategies: Whether the meta-strategies sent are joint (True)
or marginalized.
**oracle_specific_execution_kwargs: Other set of arguments, for
compatibility purposes. Can for example represent whether to Rectify
Training or not.
Returns:
A list of list of OpenSpiel Policy objects representing the expected
best response, following the same structure as training_parameters.
"""
new_policies = []
for player_parameters in training_parameters:
player_policies = []
for params in player_parameters:
current_player = params['current_player']
total_policies = params['total_policies']
probabilities_of_playing_policies = params[
'probabilities_of_playing_policies']
if using_joint_strategies:
aggr_policy = utils.aggregate_joint_policies(
game, utils.marginal_to_joint(total_policies),
probabilities_of_playing_policies.reshape(-1))
else:
aggr_policy = utils.aggregate_policies(
game, total_policies, probabilities_of_playing_policies)
# This takes as input an aggregate policy, and computes a best response
# for current_player at the applicable information states by recursing
# through the game tree. At information states involving other players
# or chance, the aggr_policy is used to compute the expected value, such
# that a best response for current_player can be computed.
if self.best_response_backend == 'py':
best_resp = best_response.BestResponsePolicy(game, current_player,
aggr_policy)
else:
self.best_response_processors[current_player].set_policy(
policy_utils.policy_to_dict(aggr_policy, game, self.all_states,
self.state_to_information_state))
self.best_responders[current_player] = (
best_response.CPPBestResponsePolicy(
game, current_player, aggr_policy, self.all_states,
self.state_to_information_state,
self.best_response_processors[current_player]))
best_resp = self.best_responders[current_player]
player_policies.append(best_resp)
new_policies.append(player_policies)
return new_policies
| open_spiel-master | open_spiel/python/algorithms/psro_v2/best_response_oracle.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modular implementations of the PSRO meta algorithm.
Allows the use of Restricted Nash Response, Nash Response, Uniform Response,
and other modular matchmaking selection components users can add.
This version works for N player, general sum games.
One iteration of the algorithm consists of:
1) Computing the selection probability vector (or meta-strategy) for current
strategies of each player, given their payoff.
2) [optional] Generating a mask over joint policies that restricts which policy
to train against, ie. rectify the set of policies trained against. (This
operation is designated by "rectify" in the code)
3) From every strategy used, generating a new best response strategy against the
meta-strategy-weighted, potentially rectified, mixture of strategies using an
oracle.
4) Updating meta game matrix with new game results.
"""
import itertools
import numpy as np
from open_spiel.python import policy
from open_spiel.python.algorithms.psro_v2 import abstract_meta_trainer
from open_spiel.python.algorithms.psro_v2 import strategy_selectors
from open_spiel.python.algorithms.psro_v2 import utils
TRAIN_TARGET_SELECTORS = {
"": None,
"rectified": strategy_selectors.rectified_selector,
}
class PSROSolver(abstract_meta_trainer.AbstractMetaTrainer):
"""A general implementation PSRO.
PSRO is the algorithm described in (Lanctot et Al., 2017,
https://arxiv.org/pdf/1711.00832.pdf ).
Subsequent work regarding PSRO's matchmaking and training has been performed
by David Balduzzi, who introduced Restricted Nash Response (RNR), Nash
Response (NR) and Uniform Response (UR).
RNR is Algorithm 4 in (Balduzzi, 2019, "Open-ended Learning in Symmetric
Zero-sum Games"). NR, Nash response, is algorithm 3.
Balduzzi et Al., 2019, https://arxiv.org/pdf/1901.08106.pdf
This implementation allows one to modularly choose different meta strategy
computation methods, or other user-written ones.
"""
def __init__(self,
game,
oracle,
sims_per_entry,
initial_policies=None,
rectifier="",
training_strategy_selector=None,
meta_strategy_method="alpharank",
sample_from_marginals=False,
number_policies_selected=1,
n_noisy_copies=0,
alpha_noise=0.0,
beta_noise=0.0,
**kwargs):
"""Initialize the PSRO solver.
Arguments:
game: The open_spiel game object.
oracle: Callable that takes as input: - game - policy - policies played -
array representing the probability of playing policy i - other kwargs
and returns a new best response.
sims_per_entry: Number of simulations to run to estimate each element of
the game outcome matrix.
initial_policies: A list of initial policies for each player, from which
the optimization process will start.
rectifier: A string indicating the rectifying method. Can be :
- "" or None: Train against potentially all strategies.
- "rectified": Train only against strategies beaten by current
strategy.
training_strategy_selector: Callable taking (PSROSolver,
'number_policies_selected') and returning a list of list of selected
strategies to train from - this usually means copying weights and
rectifying with respect to the selected strategy's performance (One list
entry per player), or string selecting pre-implemented methods.
String value can be:
- "top_k_probabilites": selects the first
'number_policies_selected' policies with highest selection
probabilities.
- "probabilistic": randomly selects 'number_policies_selected'
with probabilities determined by the meta strategies.
- "exhaustive": selects every policy of every player.
- "rectified": only selects strategies that have nonzero chance of
being selected.
- "uniform": randomly selects 'number_policies_selected'
policies with uniform probabilities.
meta_strategy_method: String or callable taking a GenPSROSolver object and
returning two lists ; one list of meta strategies (One list entry per
player), and one list of joint strategies.
String value can be:
- alpharank: AlphaRank distribution on policies.
- "uniform": Uniform distribution on policies.
- "nash": Taking nash distribution. Only works for 2 player, 0-sum
games.
- "prd": Projected Replicator Dynamics, as described in Lanctot et
Al.
sample_from_marginals: A boolean, specifying whether to sample from
marginal (True) or joint (False) meta-strategy distributions.
number_policies_selected: Number of policies to return for each player.
n_noisy_copies: Number of noisy copies of each agent after training. 0 to
ignore this.
alpha_noise: lower bound on alpha noise value (Mixture amplitude.)
beta_noise: lower bound on beta noise value (Softmax temperature.)
**kwargs: kwargs for meta strategy computation and training strategy
selection.
"""
self._sims_per_entry = sims_per_entry
print("Using {} sims per entry.".format(sims_per_entry))
self._rectifier = TRAIN_TARGET_SELECTORS.get(
rectifier, None)
self._rectify_training = self._rectifier
print("Rectifier : {}".format(rectifier))
self._meta_strategy_probabilities = np.array([])
self._non_marginalized_probabilities = np.array([])
print("Perturbating oracle outputs : {}".format(n_noisy_copies > 0))
self._n_noisy_copies = n_noisy_copies
self._alpha_noise = alpha_noise
self._beta_noise = beta_noise
self._policies = [] # A list of size `num_players` of lists containing the
# strategies of each player.
self._new_policies = []
# Alpharank is a special case here, as it's not supported by the abstract
# meta trainer api, so has to be passed as a function instead of a string.
if not meta_strategy_method or meta_strategy_method == "alpharank":
meta_strategy_method = utils.alpharank_strategy
print("Sampling from marginals : {}".format(sample_from_marginals))
self.sample_from_marginals = sample_from_marginals
super(PSROSolver, self).__init__(
game,
oracle,
initial_policies,
meta_strategy_method,
training_strategy_selector,
number_policies_selected=number_policies_selected,
**kwargs)
def _initialize_policy(self, initial_policies):
if self.symmetric_game:
self._policies = [[]]
# Notice that the following line returns N references to the same policy
# This might not be correct for certain applications.
# E.g., a DQN BR oracle with player_id information
self._new_policies = [
(
[initial_policies[0]]
if initial_policies
else [policy.UniformRandomPolicy(self._game)]
)
]
else:
self._policies = [[] for _ in range(self._num_players)]
self._new_policies = [
(
[initial_policies[k]]
if initial_policies
else [policy.UniformRandomPolicy(self._game)]
)
for k in range(self._num_players)
]
def _initialize_game_state(self):
effective_payoff_size = self._game_num_players
self._meta_games = [
np.array(utils.empty_list_generator(effective_payoff_size))
for _ in range(effective_payoff_size)
]
self.update_empirical_gamestate(seed=None)
def get_joint_policy_ids(self):
"""Returns a list of integers enumerating all joint meta strategies."""
return utils.get_strategy_profile_ids(self._meta_games)
def get_joint_policies_from_id_list(self, selected_policy_ids):
"""Returns a list of joint policies from a list of integer IDs.
Args:
selected_policy_ids: A list of integer IDs corresponding to the
meta-strategies, with duplicate entries allowed.
Returns:
selected_joint_policies: A list, with each element being a joint policy
instance (i.e., a list of policies, one per player).
"""
policies = self.get_policies()
selected_joint_policies = utils.get_joint_policies_from_id_list(
self._meta_games, policies, selected_policy_ids)
return selected_joint_policies
def update_meta_strategies(self):
"""Recomputes the current meta strategy of each player.
Given new payoff tables, we call self._meta_strategy_method to update the
meta-probabilities.
"""
if self.symmetric_game:
# Notice that the following line returns N references to the same policy
# This might not be correct for certain applications.
# E.g., a DQN BR oracle with player_id information
self._policies = self._policies * self._game_num_players
self._meta_strategy_probabilities, self._non_marginalized_probabilities = (
self._meta_strategy_method(solver=self, return_joint=True))
if self.symmetric_game:
self._policies = [self._policies[0]]
self._meta_strategy_probabilities = [self._meta_strategy_probabilities[0]]
def get_policies_and_strategies(self):
"""Returns current policy sampler, policies and meta-strategies of the game.
If strategies are rectified, we automatically switch to returning joint
strategies.
Returns:
sample_strategy: A strategy sampling function
total_policies: A list of list of policies, one list per player.
probabilities_of_playing_policies: the meta strategies, either joint or
marginalized.
"""
sample_strategy = utils.sample_strategy_marginal
probabilities_of_playing_policies = self.get_meta_strategies()
if self._rectify_training or not self.sample_from_marginals:
sample_strategy = utils.sample_strategy_joint
probabilities_of_playing_policies = self._non_marginalized_probabilities
total_policies = self.get_policies()
return sample_strategy, total_policies, probabilities_of_playing_policies
def _restrict_target_training(self,
current_player,
ind,
total_policies,
probabilities_of_playing_policies,
restrict_target_training_bool,
epsilon=1e-12):
"""Rectifies training.
Args:
current_player: the current player.
ind: Current strategy index of the player.
total_policies: all policies available to all players.
probabilities_of_playing_policies: meta strategies.
restrict_target_training_bool: Boolean specifying whether to restrict
training. If False, standard meta strategies are returned. Otherwise,
restricted joint strategies are returned.
epsilon: threshold below which we consider 0 sum of probabilities.
Returns:
Probabilities of playing each joint strategy (If rectifying) / probability
of each player playing each strategy (Otherwise - marginal probabilities)
"""
true_shape = tuple([len(a) for a in total_policies])
if not restrict_target_training_bool:
return probabilities_of_playing_policies
else:
kept_probas = self._rectifier(
self, current_player, ind)
# Ensure probabilities_of_playing_policies has same shape as kept_probas.
probability = probabilities_of_playing_policies.reshape(true_shape)
probability = probability * kept_probas
prob_sum = np.sum(probability)
# If the rectified probabilities are too low / 0, we play against the
# non-rectified probabilities.
if prob_sum <= epsilon:
probability = probabilities_of_playing_policies
else:
probability /= prob_sum
return probability
def update_agents(self):
"""Updates policies for each player at the same time by calling the oracle.
The resulting policies are appended to self._new_policies.
"""
used_policies, used_indexes = self._training_strategy_selector(
self, self._number_policies_selected)
(sample_strategy,
total_policies,
probabilities_of_playing_policies) = self.get_policies_and_strategies()
# Contains the training parameters of all trained oracles.
# This is a list (Size num_players) of list (Size num_new_policies[player]),
# each dict containing the needed information to train a new best response.
training_parameters = [[] for _ in range(self._num_players)]
for current_player in range(self._num_players):
if self.sample_from_marginals:
currently_used_policies = used_policies[current_player]
current_indexes = used_indexes[current_player]
else:
currently_used_policies = [
joint_policy[current_player] for joint_policy in used_policies
]
current_indexes = used_indexes[current_player]
for i in range(len(currently_used_policies)):
pol = currently_used_policies[i]
ind = current_indexes[i]
new_probabilities = self._restrict_target_training(
current_player, ind, total_policies,
probabilities_of_playing_policies,
self._rectify_training)
new_parameter = {
"policy": pol,
"total_policies": total_policies,
"current_player": current_player,
"probabilities_of_playing_policies": new_probabilities
}
training_parameters[current_player].append(new_parameter)
if self.symmetric_game:
# Notice that the following line returns N references to the same policy
# This might not be correct for certain applications.
# E.g., a DQN BR oracle with player_id information
self._policies = self._game_num_players * self._policies
self._num_players = self._game_num_players
training_parameters = [training_parameters[0]]
# List of List of new policies (One list per player)
self._new_policies = self._oracle(
self._game,
training_parameters,
strategy_sampler=sample_strategy,
using_joint_strategies=self._rectify_training or
not self.sample_from_marginals)
if self.symmetric_game:
# In a symmetric game, only one population is kept. The below lines
# therefore make PSRO consider only the first player during training,
# since both players are identical.
self._policies = [self._policies[0]]
self._num_players = 1
def update_empirical_gamestate(self, seed=None):
"""Given new agents in _new_policies, update meta_games through simulations.
Args:
seed: Seed for environment generation.
Returns:
Meta game payoff matrix.
"""
if seed is not None:
np.random.seed(seed=seed)
assert self._oracle is not None
if self.symmetric_game:
# Switch to considering the game as a symmetric game where players have
# the same policies & new policies. This allows the empirical gamestate
# update to function normally.
# Notice that the following line returns N references to the same policy
# This might not be correct for certain applications.
# E.g., a DQN BR oracle with player_id information
self._policies = self._game_num_players * self._policies
self._new_policies = self._game_num_players * self._new_policies
self._num_players = self._game_num_players
# Concatenate both lists.
updated_policies = [
self._policies[k] + self._new_policies[k]
for k in range(self._num_players)
]
# Each metagame will be (num_strategies)^self._num_players.
# There are self._num_player metagames, one per player.
total_number_policies = [
len(updated_policies[k]) for k in range(self._num_players)
]
number_older_policies = [
len(self._policies[k]) for k in range(self._num_players)
]
number_new_policies = [
len(self._new_policies[k]) for k in range(self._num_players)
]
# Initializing the matrix with nans to recognize unestimated states.
meta_games = [
np.full(tuple(total_number_policies), np.nan)
for k in range(self._num_players)
]
# Filling the matrix with already-known values.
older_policies_slice = tuple(
[slice(len(self._policies[k])) for k in range(self._num_players)])
for k in range(self._num_players):
meta_games[k][older_policies_slice] = self._meta_games[k]
# Filling the matrix for newly added policies.
for current_player in range(self._num_players):
# Only iterate over new policies for current player ; compute on every
# policy for the other players.
range_iterators = [
range(total_number_policies[k]) for k in range(current_player)
] + [range(number_new_policies[current_player])] + [
range(total_number_policies[k])
for k in range(current_player + 1, self._num_players)
]
for current_index in itertools.product(*range_iterators):
used_index = list(current_index)
used_index[current_player] += number_older_policies[current_player]
if np.isnan(meta_games[current_player][tuple(used_index)]):
estimated_policies = [
updated_policies[k][current_index[k]]
for k in range(current_player)
] + [
self._new_policies[current_player][current_index[current_player]]
] + [
updated_policies[k][current_index[k]]
for k in range(current_player + 1, self._num_players)
]
if self.symmetric_game:
# TODO(author4): This update uses ~2**(n_players-1) * sims_per_entry
# samples to estimate each payoff table entry. This should be
# brought to sims_per_entry to coincide with expected behavior.
utility_estimates = self.sample_episodes(estimated_policies,
self._sims_per_entry)
player_permutations = list(itertools.permutations(list(range(
self._num_players))))
for permutation in player_permutations:
used_tuple = tuple([used_index[i] for i in permutation])
for player in range(self._num_players):
if np.isnan(meta_games[player][used_tuple]):
meta_games[player][used_tuple] = 0.0
meta_games[player][used_tuple] += utility_estimates[
permutation[player]] / len(player_permutations)
else:
utility_estimates = self.sample_episodes(estimated_policies,
self._sims_per_entry)
for k in range(self._num_players):
meta_games[k][tuple(used_index)] = utility_estimates[k]
if self.symmetric_game:
# Make PSRO consider that we only have one population again, as we
# consider that we are in a symmetric game (No difference between players)
self._policies = [self._policies[0]]
self._new_policies = [self._new_policies[0]]
updated_policies = [updated_policies[0]]
self._num_players = 1
self._meta_games = meta_games
self._policies = updated_policies
return meta_games
def get_meta_game(self):
"""Returns the meta game matrix."""
return self._meta_games
@property
def meta_games(self):
return self._meta_games
def get_policies(self):
"""Returns a list, each element being a list of each player's policies."""
policies = self._policies
if self.symmetric_game:
# For compatibility reasons, return list of expected length.
# Notice that the following line returns N references to the same policy
# This might not be correct for certain applications.
# E.g., a DQN BR oracle with player_id information
policies = self._game_num_players * self._policies
return policies
def get_and_update_non_marginalized_meta_strategies(self, update=True):
"""Returns the Nash Equilibrium distribution on meta game matrix."""
if update:
self.update_meta_strategies()
return self._non_marginalized_probabilities
def get_strategy_computation_and_selection_kwargs(self):
return self._strategy_computation_and_selection_kwargs
| open_spiel-master | open_spiel/python/algorithms/psro_v2/psro_v2.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for RNaD algorithm under open_spiel."""
import pickle
from absl.testing import absltest
from absl.testing import parameterized
import jax
import numpy as np
from open_spiel.python.algorithms.rnad import rnad
# TODO(author18): test the losses and jax ops
class RNADTest(parameterized.TestCase):
def test_run_kuhn(self):
solver = rnad.RNaDSolver(rnad.RNaDConfig(game_name="kuhn_poker"))
for _ in range(10):
solver.step()
def test_serialization(self):
solver = rnad.RNaDSolver(rnad.RNaDConfig(game_name="kuhn_poker"))
solver.step()
state_bytes = pickle.dumps(solver)
solver2 = pickle.loads(state_bytes)
self.assertEqual(solver.config, solver2.config)
np.testing.assert_equal(
jax.device_get(solver.params), jax.device_get(solver2.params))
# TODO(author16): figure out the last bits of the non-determinism
# and reenable the checks below.
# Now run both solvers for the same number of steps and verify
# they behave in exactly the same way.
# for _ in range(10):
# solver.step()
# solver2.step()
# np.testing.assert_equal(
# jax.device_get(solver.params), jax.device_get(solver2.params))
@parameterized.named_parameters(
dict(
testcase_name="3x2_5x1_6",
sizes=[3, 5, 6],
repeats=[2, 1, 1],
cover_steps=24,
expected=[
(0, False),
(2 / 3, False),
(1, True), # 3
(0, False),
(2 / 3, False),
(1, True), # 3 x 2
(0, False),
(0.4, False),
(0.8, False),
(1, False),
(1, True), # 5
(0, False),
(1 / 3, False),
(2 / 3, False),
(1, False),
(1, False),
(1, True), # 6
(0, False),
(1 / 3, False),
(2 / 3, False),
(1, False),
(1, False),
(1, True), # 6 x 2
(0, False),
],
),
)
def test_entropy_schedule(self, sizes, repeats, cover_steps, expected):
schedule = rnad.EntropySchedule(sizes=sizes, repeats=repeats)
computed = [schedule(i) for i in range(cover_steps)]
np.testing.assert_almost_equal(computed, expected)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/rnad/rnad_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/algorithms/rnad/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python implementation of R-NaD (https://arxiv.org/pdf/2206.15378.pdf)."""
import enum
import functools
from typing import Any, Callable, Sequence, Tuple
import chex
import haiku as hk
import jax
from jax import lax
from jax import numpy as jnp
from jax import tree_util as tree
import numpy as np
import optax
from open_spiel.python import policy as policy_lib
import pyspiel
# Some handy aliases.
# Since most of these are just aliases for a "bag of tensors", the goal
# is to improve the documentation, and not to actually enforce correctness
# through pytype.
Params = chex.ArrayTree
class EntropySchedule:
"""An increasing list of steps where the regularisation network is updated.
Example
EntropySchedule([3, 5, 10], [2, 4, 1])
=> [0, 3, 6, 11, 16, 21, 26, 36]
| 3 x2 | 5 x4 | 10 x1
"""
def __init__(self, *, sizes: Sequence[int], repeats: Sequence[int]):
"""Constructs a schedule of entropy iterations.
Args:
sizes: the list of iteration sizes.
repeats: the list, parallel to sizes, with the number of times for each
size from `sizes` to repeat.
"""
try:
if len(repeats) != len(sizes):
raise ValueError("`repeats` must be parallel to `sizes`.")
if not sizes:
raise ValueError("`sizes` and `repeats` must not be empty.")
if any([(repeat <= 0) for repeat in repeats]):
raise ValueError("All repeat values must be strictly positive")
if repeats[-1] != 1:
raise ValueError("The last value in `repeats` must be equal to 1, "
"ince the last iteration size is repeated forever.")
except ValueError as e:
raise ValueError(
f"Entropy iteration schedule: repeats ({repeats}) and sizes"
f" ({sizes})."
) from e
schedule = [0]
for size, repeat in zip(sizes, repeats):
schedule.extend([schedule[-1] + (i + 1) * size for i in range(repeat)])
self.schedule = np.array(schedule, dtype=np.int32)
def __call__(self, learner_step: int) -> Tuple[float, bool]:
"""Entropy scheduling parameters for a given `learner_step`.
Args:
learner_step: The current learning step.
Returns:
alpha: The mixing weight (from [0, 1]) of the previous policy with
the one before for computing the intrinsic reward.
update_target_net: A boolean indicator for updating the target network
with the current network.
"""
# The complexity below is because at some point we might go past
# the explicit schedule, and then we'd need to just use the last step
# in the schedule and apply the logic of
# ((learner_step - last_step) % last_iteration) == 0)
# The schedule might look like this:
# X----X-------X--X--X--X--------X
# learner_step | might be here ^ |
# or there ^ |
# or even past the schedule ^
# We need to deal with two cases below.
# Instead of going for the complicated conditional, let's just
# compute both and then do the A * s + B * (1 - s) with s being a bool
# selector between A and B.
# 1. assume learner_step is past the schedule,
# ie schedule[-1] <= learner_step.
last_size = self.schedule[-1] - self.schedule[-2]
last_start = self.schedule[-1] + (
learner_step - self.schedule[-1]) // last_size * last_size
# 2. assume learner_step is within the schedule.
start = jnp.amax(self.schedule * (self.schedule <= learner_step))
finish = jnp.amin(
self.schedule * (learner_step < self.schedule),
initial=self.schedule[-1],
where=(learner_step < self.schedule))
size = finish - start
# Now select between the two.
beyond = (self.schedule[-1] <= learner_step) # Are we past the schedule?
iteration_start = (last_start * beyond + start * (1 - beyond))
iteration_size = (last_size * beyond + size * (1 - beyond))
update_target_net = jnp.logical_and(
learner_step > 0,
jnp.sum(learner_step == iteration_start + iteration_size - 1),
)
alpha = jnp.minimum(
(2.0 * (learner_step - iteration_start)) / iteration_size, 1.0)
return alpha, update_target_net # pytype: disable=bad-return-type # jax-types
@chex.dataclass(frozen=True)
class FineTuning:
"""Fine tuning options, aka policy post-processing.
Even when fully trained, the resulting softmax-based policy may put
a small probability mass on bad actions. This results in an agent
waiting for the opponent (itself in self-play) to commit an error.
To address that the policy is post-processed using:
- thresholding: any action with probability smaller than self.threshold
is simply removed from the policy.
- discretization: the probability values are rounded to the closest
multiple of 1/self.discretization.
The post-processing is used on the learner, and thus must be jit-friendly.
"""
# The learner step after which the policy post processing (aka finetuning)
# will be enabled when learning. A strictly negative value is equivalent
# to infinity, ie disables finetuning completely.
from_learner_steps: int = -1
# All policy probabilities below `threshold` are zeroed out. Thresholding
# is disabled if this value is non-positive.
policy_threshold: float = 0.03
# Rounds the policy probabilities to the "closest"
# multiple of 1/`self.discretization`.
# Discretization is disabled for non-positive values.
policy_discretization: int = 32
def __call__(self, policy: chex.Array, mask: chex.Array,
learner_steps: int) -> chex.Array:
"""A configurable fine tuning of a policy."""
chex.assert_equal_shape((policy, mask))
do_finetune = jnp.logical_and(self.from_learner_steps >= 0,
learner_steps > self.from_learner_steps)
return jnp.where(do_finetune, self.post_process_policy(policy, mask),
policy)
def post_process_policy(
self,
policy: chex.Array,
mask: chex.Array,
) -> chex.Array:
"""Unconditionally post process a given masked policy."""
chex.assert_equal_shape((policy, mask))
policy = self._threshold(policy, mask)
policy = self._discretize(policy)
return policy
def _threshold(self, policy: chex.Array, mask: chex.Array) -> chex.Array:
"""Remove from the support the actions 'a' where policy(a) < threshold."""
chex.assert_equal_shape((policy, mask))
if self.policy_threshold <= 0:
return policy
mask = mask * (
# Values over the threshold.
(policy >= self.policy_threshold) +
# Degenerate case is when policy is less than threshold *everywhere*.
# In that case we just keep the policy as-is.
(jnp.max(policy, axis=-1, keepdims=True) < self.policy_threshold))
return mask * policy / jnp.sum(mask * policy, axis=-1, keepdims=True)
def _discretize(self, policy: chex.Array) -> chex.Array:
"""Round all action probabilities to a multiple of 1/self.discretize."""
if self.policy_discretization <= 0:
return policy
# The unbatched/single policy case:
if len(policy.shape) == 1:
return self._discretize_single(policy)
# policy may be [B, A] or [T, B, A], etc. Thus add hk.BatchApply.
dims = len(policy.shape) - 1
# TODO(author18): avoid mixing vmap and BatchApply since the two could
# be folded into either a single BatchApply or a sequence of vmaps, but
# not the mix.
vmapped = jax.vmap(self._discretize_single)
policy = hk.BatchApply(vmapped, num_dims=dims)(policy)
return policy
def _discretize_single(self, mu: chex.Array) -> chex.Array:
"""A version of self._discretize but for the unbatched data."""
# TODO(author18): try to merge _discretize and _discretize_single
# into one function that handles both batched and unbatched cases.
if len(mu.shape) == 2:
mu_ = jnp.squeeze(mu, axis=0)
else:
mu_ = mu
n_actions = mu_.shape[-1]
roundup = jnp.ceil(mu_ * self.policy_discretization).astype(jnp.int32)
result = jnp.zeros_like(mu_)
order = jnp.argsort(-mu_) # Indices of descending order.
weight_left = self.policy_discretization
def f_disc(i, order, roundup, weight_left, result):
x = jnp.minimum(roundup[order[i]], weight_left)
result = jax.numpy.where(weight_left >= 0, result.at[order[i]].add(x),
result)
weight_left -= x
return i + 1, order, roundup, weight_left, result
def f_scan_scan(carry, x):
i, order, roundup, weight_left, result = carry
i_next, order_next, roundup_next, weight_left_next, result_next = f_disc(
i, order, roundup, weight_left, result)
carry_next = (i_next, order_next, roundup_next, weight_left_next,
result_next)
return carry_next, x
(_, _, _, weight_left_next, result_next), _ = jax.lax.scan(
f_scan_scan,
init=(jnp.asarray(0), order, roundup, weight_left, result),
xs=None,
length=n_actions)
result_next = jnp.where(weight_left_next > 0,
result_next.at[order[0]].add(weight_left_next),
result_next)
if len(mu.shape) == 2:
result_next = jnp.expand_dims(result_next, axis=0)
return result_next / self.policy_discretization
def _legal_policy(logits: chex.Array, legal_actions: chex.Array) -> chex.Array:
"""A soft-max policy that respects legal_actions."""
chex.assert_equal_shape((logits, legal_actions))
# Fiddle a bit to make sure we don't generate NaNs or Inf in the middle.
l_min = logits.min(axis=-1, keepdims=True)
logits = jnp.where(legal_actions, logits, l_min)
logits -= logits.max(axis=-1, keepdims=True)
logits *= legal_actions
exp_logits = jnp.where(legal_actions, jnp.exp(logits),
0) # Illegal actions become 0.
exp_logits_sum = jnp.sum(exp_logits, axis=-1, keepdims=True)
return exp_logits / exp_logits_sum
def legal_log_policy(logits: chex.Array,
legal_actions: chex.Array) -> chex.Array:
"""Return the log of the policy on legal action, 0 on illegal action."""
chex.assert_equal_shape((logits, legal_actions))
# logits_masked has illegal actions set to -inf.
logits_masked = logits + jnp.log(legal_actions)
max_legal_logit = logits_masked.max(axis=-1, keepdims=True)
logits_masked = logits_masked - max_legal_logit
# exp_logits_masked is 0 for illegal actions.
exp_logits_masked = jnp.exp(logits_masked)
baseline = jnp.log(jnp.sum(exp_logits_masked, axis=-1, keepdims=True))
# Subtract baseline from logits. We do not simply return
# logits_masked - baseline
# because that has -inf for illegal actions, or
# legal_actions * (logits_masked - baseline)
# because that leads to 0 * -inf == nan for illegal actions.
log_policy = jnp.multiply(legal_actions,
(logits - max_legal_logit - baseline))
return log_policy
def _player_others(player_ids: chex.Array, valid: chex.Array,
player: int) -> chex.Array:
"""A vector of 1 for the current player and -1 for others.
Args:
player_ids: Tensor [...] containing player ids (0 <= player_id < N).
valid: Tensor [...] containing whether these states are valid.
player: The player id as int.
Returns:
player_other: is 1 for the current player and -1 for others [..., 1].
"""
chex.assert_equal_shape((player_ids, valid))
current_player_tensor = (player_ids == player).astype(jnp.int32) # pytype: disable=attribute-error # numpy-scalars
res = 2 * current_player_tensor - 1
res = res * valid
return jnp.expand_dims(res, axis=-1)
def _policy_ratio(pi: chex.Array, mu: chex.Array, actions_oh: chex.Array,
valid: chex.Array) -> chex.Array:
"""Returns a ratio of policy pi/mu when selecting action a.
By convention, this ratio is 1 on non valid states
Args:
pi: the policy of shape [..., A].
mu: the sampling policy of shape [..., A].
actions_oh: a one-hot encoding of the current actions of shape [..., A].
valid: 0 if the state is not valid and else 1 of shape [...].
Returns:
pi/mu on valid states and 1 otherwise. The shape is the same
as pi, mu or actions_oh but without the last dimension A.
"""
chex.assert_equal_shape((pi, mu, actions_oh))
chex.assert_shape((valid,), actions_oh.shape[:-1])
def _select_action_prob(pi):
return (jnp.sum(actions_oh * pi, axis=-1, keepdims=False) * valid +
(1 - valid))
pi_actions_prob = _select_action_prob(pi)
mu_actions_prob = _select_action_prob(mu)
return pi_actions_prob / mu_actions_prob
def _where(pred: chex.Array, true_data: chex.ArrayTree,
false_data: chex.ArrayTree) -> chex.ArrayTree:
"""Similar to jax.where but treats `pred` as a broadcastable prefix."""
def _where_one(t, f):
chex.assert_equal_rank((t, f))
# Expand the dimensions of pred if true_data and false_data are higher rank.
p = jnp.reshape(pred, pred.shape + (1,) * (len(t.shape) - len(pred.shape)))
return jnp.where(p, t, f)
return tree.tree_map(_where_one, true_data, false_data)
def _has_played(valid: chex.Array, player_id: chex.Array,
player: int) -> chex.Array:
"""Compute a mask of states which have a next state in the sequence."""
chex.assert_equal_shape((valid, player_id))
def _loop_has_played(carry, x):
valid, player_id = x
chex.assert_equal_shape((valid, player_id))
our_res = jnp.ones_like(player_id)
opp_res = carry
reset_res = jnp.zeros_like(carry)
our_carry = carry
opp_carry = carry
reset_carry = jnp.zeros_like(player_id)
# pyformat: disable
return _where(valid, _where((player_id == player),
(our_carry, our_res),
(opp_carry, opp_res)),
(reset_carry, reset_res))
# pyformat: enable
_, result = lax.scan(
f=_loop_has_played,
init=jnp.zeros_like(player_id[-1]),
xs=(valid, player_id),
reverse=True)
return result
# V-Trace
#
# Custom implementation of VTrace to handle trajectories having a mix of
# different player steps. The standard rlax.vtrace can't be applied here
# out of the box because a trajectory could look like '121211221122'.
def v_trace(
v: chex.Array,
valid: chex.Array,
player_id: chex.Array,
acting_policy: chex.Array,
merged_policy: chex.Array,
merged_log_policy: chex.Array,
player_others: chex.Array,
actions_oh: chex.Array,
reward: chex.Array,
player: int,
# Scalars below.
eta: float,
lambda_: float,
c: float,
rho: float,
) -> Tuple[Any, Any, Any]:
"""Custom VTrace for trajectories with a mix of different player steps."""
gamma = 1.0
has_played = _has_played(valid, player_id, player)
policy_ratio = _policy_ratio(merged_policy, acting_policy, actions_oh, valid)
inv_mu = _policy_ratio(
jnp.ones_like(merged_policy), acting_policy, actions_oh, valid)
eta_reg_entropy = (-eta *
jnp.sum(merged_policy * merged_log_policy, axis=-1) *
jnp.squeeze(player_others, axis=-1))
eta_log_policy = -eta * merged_log_policy * player_others
@chex.dataclass(frozen=True)
class LoopVTraceCarry:
"""The carry of the v-trace scan loop."""
reward: chex.Array
# The cumulated reward until the end of the episode. Uncorrected (v-trace).
# Gamma discounted and includes eta_reg_entropy.
reward_uncorrected: chex.Array
next_value: chex.Array
next_v_target: chex.Array
importance_sampling: chex.Array
init_state_v_trace = LoopVTraceCarry(
reward=jnp.zeros_like(reward[-1]),
reward_uncorrected=jnp.zeros_like(reward[-1]),
next_value=jnp.zeros_like(v[-1]),
next_v_target=jnp.zeros_like(v[-1]),
importance_sampling=jnp.ones_like(policy_ratio[-1]))
def _loop_v_trace(carry: LoopVTraceCarry, x) -> Tuple[LoopVTraceCarry, Any]:
(cs, player_id, v, reward, eta_reg_entropy, valid, inv_mu, actions_oh,
eta_log_policy) = x
reward_uncorrected = (
reward + gamma * carry.reward_uncorrected + eta_reg_entropy)
discounted_reward = reward + gamma * carry.reward
# V-target:
our_v_target = (
v + jnp.expand_dims(
jnp.minimum(rho, cs * carry.importance_sampling), axis=-1) *
(jnp.expand_dims(reward_uncorrected, axis=-1) +
gamma * carry.next_value - v) + lambda_ * jnp.expand_dims(
jnp.minimum(c, cs * carry.importance_sampling), axis=-1) * gamma *
(carry.next_v_target - carry.next_value))
opp_v_target = jnp.zeros_like(our_v_target)
reset_v_target = jnp.zeros_like(our_v_target)
# Learning output:
our_learning_output = (
v + # value
eta_log_policy + # regularisation
actions_oh * jnp.expand_dims(inv_mu, axis=-1) *
(jnp.expand_dims(discounted_reward, axis=-1) + gamma * jnp.expand_dims(
carry.importance_sampling, axis=-1) * carry.next_v_target - v))
opp_learning_output = jnp.zeros_like(our_learning_output)
reset_learning_output = jnp.zeros_like(our_learning_output)
# State carry:
our_carry = LoopVTraceCarry(
reward=jnp.zeros_like(carry.reward),
next_value=v,
next_v_target=our_v_target,
reward_uncorrected=jnp.zeros_like(carry.reward_uncorrected),
importance_sampling=jnp.ones_like(carry.importance_sampling))
opp_carry = LoopVTraceCarry(
reward=eta_reg_entropy + cs * discounted_reward,
reward_uncorrected=reward_uncorrected,
next_value=gamma * carry.next_value,
next_v_target=gamma * carry.next_v_target,
importance_sampling=cs * carry.importance_sampling)
reset_carry = init_state_v_trace
# Invalid turn: init_state_v_trace and (zero target, learning_output)
# pyformat: disable
return _where(valid, # pytype: disable=bad-return-type # numpy-scalars
_where((player_id == player),
(our_carry, (our_v_target, our_learning_output)),
(opp_carry, (opp_v_target, opp_learning_output))),
(reset_carry, (reset_v_target, reset_learning_output)))
# pyformat: enable
_, (v_target, learning_output) = lax.scan(
f=_loop_v_trace,
init=init_state_v_trace,
xs=(policy_ratio, player_id, v, reward, eta_reg_entropy, valid, inv_mu,
actions_oh, eta_log_policy),
reverse=True)
return v_target, has_played, learning_output
def get_loss_v(v_list: Sequence[chex.Array],
v_target_list: Sequence[chex.Array],
mask_list: Sequence[chex.Array]) -> chex.Array:
"""Define the loss function for the critic."""
chex.assert_trees_all_equal_shapes(v_list, v_target_list)
# v_list and v_target_list come with a degenerate trailing dimension,
# which mask_list tensors do not have.
chex.assert_shape(mask_list, v_list[0].shape[:-1])
loss_v_list = []
for (v_n, v_target, mask) in zip(v_list, v_target_list, mask_list):
assert v_n.shape[0] == v_target.shape[0]
loss_v = jnp.expand_dims(
mask, axis=-1) * (v_n - lax.stop_gradient(v_target))**2
normalization = jnp.sum(mask)
loss_v = jnp.sum(loss_v) / (normalization + (normalization == 0.0))
loss_v_list.append(loss_v)
return sum(loss_v_list)
def apply_force_with_threshold(decision_outputs: chex.Array, force: chex.Array,
threshold: float,
threshold_center: chex.Array) -> chex.Array:
"""Apply the force with below a given threshold."""
chex.assert_equal_shape((decision_outputs, force, threshold_center))
can_decrease = decision_outputs - threshold_center > -threshold
can_increase = decision_outputs - threshold_center < threshold
force_negative = jnp.minimum(force, 0.0)
force_positive = jnp.maximum(force, 0.0)
clipped_force = can_decrease * force_negative + can_increase * force_positive
return decision_outputs * lax.stop_gradient(clipped_force)
def renormalize(loss: chex.Array, mask: chex.Array) -> chex.Array:
"""The `normalization` is the number of steps over which loss is computed."""
chex.assert_equal_shape((loss, mask))
loss = jnp.sum(loss * mask)
normalization = jnp.sum(mask)
return loss / (normalization + (normalization == 0.0))
def get_loss_nerd(logit_list: Sequence[chex.Array],
policy_list: Sequence[chex.Array],
q_vr_list: Sequence[chex.Array],
valid: chex.Array,
player_ids: Sequence[chex.Array],
legal_actions: chex.Array,
importance_sampling_correction: Sequence[chex.Array],
clip: float = 100,
threshold: float = 2) -> chex.Array:
"""Define the nerd loss."""
assert isinstance(importance_sampling_correction, list)
loss_pi_list = []
for k, (logit_pi, pi, q_vr, is_c) in enumerate(
zip(logit_list, policy_list, q_vr_list, importance_sampling_correction)):
assert logit_pi.shape[0] == q_vr.shape[0]
# loss policy
adv_pi = q_vr - jnp.sum(pi * q_vr, axis=-1, keepdims=True)
adv_pi = is_c * adv_pi # importance sampling correction
adv_pi = jnp.clip(adv_pi, a_min=-clip, a_max=clip)
adv_pi = lax.stop_gradient(adv_pi)
logits = logit_pi - jnp.mean(
logit_pi * legal_actions, axis=-1, keepdims=True)
threshold_center = jnp.zeros_like(logits)
nerd_loss = jnp.sum(
legal_actions *
apply_force_with_threshold(logits, adv_pi, threshold, threshold_center),
axis=-1)
nerd_loss = -renormalize(nerd_loss, valid * (player_ids == k))
loss_pi_list.append(nerd_loss)
return sum(loss_pi_list)
@chex.dataclass(frozen=True)
class AdamConfig:
"""Adam optimizer related params."""
b1: float = 0.0
b2: float = 0.999
eps: float = 10e-8
@chex.dataclass(frozen=True)
class NerdConfig:
"""Nerd related params."""
beta: float = 2.0
clip: float = 10_000
class StateRepresentation(str, enum.Enum):
INFO_SET = "info_set"
OBSERVATION = "observation"
@chex.dataclass(frozen=True)
class RNaDConfig:
"""Configuration parameters for the RNaDSolver."""
# The game parameter string including its name and parameters.
game_name: str
# The games longer than this value are truncated. Must be strictly positive.
trajectory_max: int = 10
# The content of the EnvStep.obs tensor.
state_representation: StateRepresentation = StateRepresentation.INFO_SET
# Network configuration.
policy_network_layers: Sequence[int] = (256, 256)
# The batch size to use when learning/improving parameters.
batch_size: int = 256
# The learning rate for `params`.
learning_rate: float = 0.00005
# The config related to the ADAM optimizer used for updating `params`.
adam: AdamConfig = AdamConfig()
# All gradients values are clipped to [-clip_gradient, clip_gradient].
clip_gradient: float = 10_000
# The "speed" at which `params_target` is following `params`.
target_network_avg: float = 0.001
# RNaD algorithm configuration.
# Entropy schedule configuration. See EntropySchedule class documentation.
entropy_schedule_repeats: Sequence[int] = (1,)
entropy_schedule_size: Sequence[int] = (20_000,)
# The weight of the reward regularisation term in RNaD.
eta_reward_transform: float = 0.2
nerd: NerdConfig = NerdConfig()
c_vtrace: float = 1.0
# Options related to fine tuning of the agent.
finetune: FineTuning = FineTuning()
# The seed that fully controls the randomness.
seed: int = 42
@chex.dataclass(frozen=True)
class EnvStep:
"""Holds the tensor data representing the current game state."""
# Indicates whether the state is a valid one or just a padding. Shape: [...]
# The terminal state being the first one to be marked !valid.
# All other tensors in EnvStep contain data, but only for valid timesteps.
# Once !valid the data needs to be ignored, since it's a duplicate of
# some other previous state.
# The rewards is the only exception that contains reward values
# in the terminal state, which is marked !valid.
# TODO(author16): This is a confusion point and would need to be clarified.
valid: chex.Array = () # pytype: disable=annotation-type-mismatch # numpy-scalars
# The single tensor representing the state observation. Shape: [..., ??]
obs: chex.Array = () # pytype: disable=annotation-type-mismatch # numpy-scalars
# The legal actions mask for the current player. Shape: [..., A]
legal: chex.Array = () # pytype: disable=annotation-type-mismatch # numpy-scalars
# The current player id as an int. Shape: [...]
player_id: chex.Array = () # pytype: disable=annotation-type-mismatch # numpy-scalars
# The rewards of all the players. Shape: [..., P]
rewards: chex.Array = () # pytype: disable=annotation-type-mismatch # numpy-scalars
@chex.dataclass(frozen=True)
class ActorStep:
"""The actor step tensor summary."""
# The action (as one-hot) of the current player. Shape: [..., A]
action_oh: chex.Array = () # pytype: disable=annotation-type-mismatch # numpy-scalars
# The policy of the current player. Shape: [..., A]
policy: chex.Array = () # pytype: disable=annotation-type-mismatch # numpy-scalars
# The rewards of all the players. Shape: [..., P]
# Note - these are rewards obtained *after* the actor step, and thus
# these are the same as EnvStep.rewards visible before the *next* step.
rewards: chex.Array = () # pytype: disable=annotation-type-mismatch # numpy-scalars
@chex.dataclass(frozen=True)
class TimeStep:
"""The tensor data for one game transition (env_step, actor_step)."""
env: EnvStep = EnvStep()
actor: ActorStep = ActorStep()
Optimizer = Callable[[Params, Params], Params] # (params, grads) -> params
def optax_optimizer(
params: chex.ArrayTree,
init_and_update: optax.GradientTransformation) -> Optimizer:
"""Creates a parameterized function that represents an optimizer."""
init_fn, update_fn = init_and_update
@chex.dataclass
class OptaxOptimizer:
"""A jax-friendly representation of an optimizer state with the update."""
state: chex.Array
def __call__(self, params: Params, grads: Params) -> Params:
updates, self.state = update_fn(grads, self.state) # pytype: disable=annotation-type-mismatch # numpy-scalars
return optax.apply_updates(params, updates)
return OptaxOptimizer(state=init_fn(params))
class RNaDSolver(policy_lib.Policy):
"""Implements a solver for the R-NaD Algorithm.
See https://arxiv.org/abs/2206.15378.
Define all networks. Derive losses & learning steps. Initialize the game
state and algorithmic variables.
"""
def __init__(self, config: RNaDConfig):
self.config = config
# Learner and actor step counters.
self.learner_steps = 0
self.actor_steps = 0
self.init()
def init(self):
"""Initialize the network and losses."""
# The random facilities for jax and numpy.
self._rngkey = jax.random.PRNGKey(self.config.seed)
self._np_rng = np.random.RandomState(self.config.seed)
# TODO(author16): serialize both above to get the fully deterministic behaviour.
# Create a game and an example of a state.
self._game = pyspiel.load_game(self.config.game_name)
self._ex_state = self._play_chance(self._game.new_initial_state())
# The network.
def network(
env_step: EnvStep
) -> Tuple[chex.Array, chex.Array, chex.Array, chex.Array]:
mlp_torso = hk.nets.MLP(
self.config.policy_network_layers, activate_final=True
)
torso = mlp_torso(env_step.obs)
mlp_policy_head = hk.nets.MLP([self._game.num_distinct_actions()])
logit = mlp_policy_head(torso)
mlp_policy_value = hk.nets.MLP([1])
v = mlp_policy_value(torso)
pi = _legal_policy(logit, env_step.legal)
log_pi = legal_log_policy(logit, env_step.legal)
return pi, v, log_pi, logit
self.network = hk.without_apply_rng(hk.transform(network))
# The machinery related to updating parameters/learner.
self._entropy_schedule = EntropySchedule(
sizes=self.config.entropy_schedule_size,
repeats=self.config.entropy_schedule_repeats)
self._loss_and_grad = jax.value_and_grad(self.loss, has_aux=False)
# Create initial parameters.
env_step = self._state_as_env_step(self._ex_state)
key = self._next_rng_key() # Make sure to use the same key for all.
self.params = self.network.init(key, env_step)
self.params_target = self.network.init(key, env_step)
self.params_prev = self.network.init(key, env_step)
self.params_prev_ = self.network.init(key, env_step)
# Parameter optimizers.
self.optimizer = optax_optimizer(
self.params,
optax.chain(
optax.scale_by_adam(
eps_root=0.0,
**self.config.adam,
), optax.scale(-self.config.learning_rate),
optax.clip(self.config.clip_gradient)))
self.optimizer_target = optax_optimizer(
self.params_target, optax.sgd(self.config.target_network_avg))
def loss(self, params: Params, params_target: Params, params_prev: Params,
params_prev_: Params, ts: TimeStep, alpha: float,
learner_steps: int) -> float:
rollout = jax.vmap(self.network.apply, (None, 0), 0)
pi, v, log_pi, logit = rollout(params, ts.env)
policy_pprocessed = self.config.finetune(pi, ts.env.legal, learner_steps)
_, v_target, _, _ = rollout(params_target, ts.env)
_, _, log_pi_prev, _ = rollout(params_prev, ts.env)
_, _, log_pi_prev_, _ = rollout(params_prev_, ts.env)
# This line creates the reward transform log(pi(a|x)/pi_reg(a|x)).
# For the stability reasons, reward changes smoothly between iterations.
# The mixing between old and new reward transform is a convex combination
# parametrised by alpha.
log_policy_reg = log_pi - (alpha * log_pi_prev + (1 - alpha) * log_pi_prev_)
v_target_list, has_played_list, v_trace_policy_target_list = [], [], []
for player in range(self._game.num_players()):
reward = ts.actor.rewards[:, :, player] # [T, B, Player]
v_target_, has_played, policy_target_ = v_trace(
v_target,
ts.env.valid,
ts.env.player_id,
ts.actor.policy,
policy_pprocessed,
log_policy_reg,
_player_others(ts.env.player_id, ts.env.valid, player),
ts.actor.action_oh,
reward,
player,
lambda_=1.0,
c=self.config.c_vtrace,
rho=np.inf,
eta=self.config.eta_reward_transform)
v_target_list.append(v_target_)
has_played_list.append(has_played)
v_trace_policy_target_list.append(policy_target_)
loss_v = get_loss_v([v] * self._game.num_players(), v_target_list,
has_played_list)
is_vector = jnp.expand_dims(jnp.ones_like(ts.env.valid), axis=-1)
importance_sampling_correction = [is_vector] * self._game.num_players()
# Uses v-trace to define q-values for Nerd
loss_nerd = get_loss_nerd(
[logit] * self._game.num_players(), [pi] * self._game.num_players(),
v_trace_policy_target_list,
ts.env.valid,
ts.env.player_id,
ts.env.legal,
importance_sampling_correction,
clip=self.config.nerd.clip,
threshold=self.config.nerd.beta)
return loss_v + loss_nerd # pytype: disable=bad-return-type # numpy-scalars
@functools.partial(jax.jit, static_argnums=(0,))
def update_parameters(
self,
params: Params,
params_target: Params,
params_prev: Params,
params_prev_: Params,
optimizer: Optimizer,
optimizer_target: Optimizer,
timestep: TimeStep,
alpha: float,
learner_steps: int,
update_target_net: bool):
"""A jitted pure-functional part of the `step`."""
loss_val, grad = self._loss_and_grad(params, params_target, params_prev,
params_prev_, timestep, alpha,
learner_steps)
# Update `params`` using the computed gradient.
params = optimizer(params, grad)
# Update `params_target` towards `params`.
params_target = optimizer_target(
params_target, tree.tree_map(lambda a, b: a - b, params_target, params))
# Rolls forward the prev and prev_ params if update_target_net is 1.
# pyformat: disable
params_prev, params_prev_ = jax.lax.cond(
update_target_net,
lambda: (params_target, params_prev),
lambda: (params_prev, params_prev_))
# pyformat: enable
logs = {
"loss": loss_val,
}
return (params, params_target, params_prev, params_prev_, optimizer,
optimizer_target), logs
def __getstate__(self):
"""To serialize the agent."""
return dict(
# RNaD config.
config=self.config,
# Learner and actor step counters.
learner_steps=self.learner_steps,
actor_steps=self.actor_steps,
# The randomness keys.
np_rng=self._np_rng.get_state(),
rngkey=self._rngkey,
# Network params.
params=self.params,
params_target=self.params_target,
params_prev=self.params_prev,
params_prev_=self.params_prev_,
# Optimizer state.
optimizer=self.optimizer.state, # pytype: disable=attribute-error # always-use-return-annotations
optimizer_target=self.optimizer_target.state, # pytype: disable=attribute-error # always-use-return-annotations
)
def __setstate__(self, state):
"""To deserialize the agent."""
# RNaD config.
self.config = state["config"]
self.init()
# Learner and actor step counters.
self.learner_steps = state["learner_steps"]
self.actor_steps = state["actor_steps"]
# The randomness keys.
self._np_rng.set_state(state["np_rng"])
self._rngkey = state["rngkey"]
# Network params.
self.params = state["params"]
self.params_target = state["params_target"]
self.params_prev = state["params_prev"]
self.params_prev_ = state["params_prev_"]
# Optimizer state.
self.optimizer.state = state["optimizer"]
self.optimizer_target.state = state["optimizer_target"]
def step(self):
"""One step of the algorithm, that plays the game and improves params."""
timestep = self.collect_batch_trajectory()
alpha, update_target_net = self._entropy_schedule(self.learner_steps)
(self.params, self.params_target, self.params_prev, self.params_prev_,
self.optimizer, self.optimizer_target), logs = self.update_parameters(
self.params, self.params_target, self.params_prev, self.params_prev_,
self.optimizer, self.optimizer_target, timestep, alpha,
self.learner_steps, update_target_net)
self.learner_steps += 1
logs.update({
"actor_steps": self.actor_steps,
"learner_steps": self.learner_steps,
})
return logs
def _next_rng_key(self) -> chex.PRNGKey:
"""Get the next rng subkey from class rngkey.
Must *not* be called from under a jitted function!
Returns:
A fresh rng_key.
"""
self._rngkey, subkey = jax.random.split(self._rngkey)
return subkey
def _state_as_env_step(self, state: pyspiel.State) -> EnvStep:
# A terminal state must be communicated to players, however since
# it's a terminal state things like the state_representation or
# the set of legal actions are meaningless and only needed
# for the sake of creating well a defined trajectory tensor.
# Therefore the code below:
# - extracts the rewards
# - if the state is terminal, uses a dummy other state for other fields.
rewards = np.array(state.returns(), dtype=np.float64)
valid = not state.is_terminal()
if not valid:
state = self._ex_state
if self.config.state_representation == StateRepresentation.OBSERVATION:
obs = state.observation_tensor()
elif self.config.state_representation == StateRepresentation.INFO_SET:
obs = state.information_state_tensor()
else:
raise ValueError(
f"Invalid StateRepresentation: {self.config.state_representation}.")
# TODO(author16): clarify the story around rewards and valid.
return EnvStep(
obs=np.array(obs, dtype=np.float64),
legal=np.array(state.legal_actions_mask(), dtype=np.int8),
player_id=np.array(state.current_player(), dtype=np.float64),
valid=np.array(valid, dtype=np.float64),
rewards=rewards)
def action_probabilities(self,
state: pyspiel.State,
player_id: Any = None):
"""Returns action probabilities dict for a single batch."""
env_step = self._batch_of_states_as_env_step([state])
probs = self._network_jit_apply_and_post_process(
self.params_target, env_step)
probs = jax.device_get(probs[0]) # Squeeze out the 1-element batch.
return {
action: probs[action]
for action, valid in enumerate(jax.device_get(env_step.legal[0]))
if valid
}
@functools.partial(jax.jit, static_argnums=(0,))
def _network_jit_apply_and_post_process(
self, params: Params, env_step: EnvStep) -> chex.Array:
pi, _, _, _ = self.network.apply(params, env_step)
pi = self.config.finetune.post_process_policy(pi, env_step.legal)
return pi
@functools.partial(jax.jit, static_argnums=(0,))
def _network_jit_apply(self, params: Params, env_step: EnvStep) -> chex.Array:
pi, _, _, _ = self.network.apply(params, env_step)
return pi
def actor_step(self, env_step: EnvStep):
pi = self._network_jit_apply(self.params, env_step)
pi = np.asarray(pi).astype("float64")
# TODO(author18): is this policy normalization really needed?
pi = pi / np.sum(pi, axis=-1, keepdims=True)
action = np.apply_along_axis(
lambda x: self._np_rng.choice(range(pi.shape[1]), p=x), axis=-1, arr=pi)
# TODO(author16): reapply the legal actions mask to bullet-proof sampling.
action_oh = np.zeros(pi.shape, dtype="float64")
action_oh[range(pi.shape[0]), action] = 1.0
actor_step = ActorStep(policy=pi, action_oh=action_oh, rewards=()) # pytype: disable=wrong-arg-types # numpy-scalars
return action, actor_step
def collect_batch_trajectory(self) -> TimeStep:
states = [
self._play_chance(self._game.new_initial_state())
for _ in range(self.config.batch_size)
]
timesteps = []
env_step = self._batch_of_states_as_env_step(states)
for _ in range(self.config.trajectory_max):
prev_env_step = env_step
a, actor_step = self.actor_step(env_step)
states = self._batch_of_states_apply_action(states, a)
env_step = self._batch_of_states_as_env_step(states)
timesteps.append(
TimeStep(
env=prev_env_step,
actor=ActorStep(
action_oh=actor_step.action_oh,
policy=actor_step.policy,
rewards=env_step.rewards),
))
# Concatenate all the timesteps together to form a single rollout [T, B, ..]
return jax.tree_util.tree_map(lambda *xs: np.stack(xs, axis=0), *timesteps)
def _batch_of_states_as_env_step(self,
states: Sequence[pyspiel.State]) -> EnvStep:
envs = [self._state_as_env_step(state) for state in states]
return jax.tree_util.tree_map(lambda *e: np.stack(e, axis=0), *envs)
def _batch_of_states_apply_action(
self, states: Sequence[pyspiel.State],
actions: chex.Array) -> Sequence[pyspiel.State]:
"""Apply a batch of `actions` to a parallel list of `states`."""
for state, action in zip(states, list(actions)):
if not state.is_terminal():
self.actor_steps += 1
state.apply_action(action)
self._play_chance(state)
return states
def _play_chance(self, state: pyspiel.State) -> pyspiel.State:
"""Plays the chance nodes until we end up at another type of node.
Args:
state: to be updated until it does not correspond to a chance node.
Returns:
The same input state object, but updated. The state is returned
only for convenience, to allow chaining function calls.
"""
while state.is_chance_node():
chance_outcome, chance_proba = zip(*state.chance_outcomes())
action = self._np_rng.choice(chance_outcome, p=chance_proba)
state.apply_action(action)
return state
| open_spiel-master | open_spiel/python/algorithms/rnad/rnad.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy Gradient (PG)."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.symmetric import updates
class Solver(updates.Solver):
"""PG Solver."""
def __init__(self, proj_grad=True, euclidean=False, lrs=(1e-1,),
rnd_init=False, seed=None, **kwargs):
"""Ctor."""
del kwargs
super().__init__(proj_grad, euclidean, rnd_init, seed)
self.lrs = lrs
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients for all parameters.
Args:
params: tuple of params (dist,), see pg.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
tuple of gradients (grad_dist,), see pg.gradients
unregularized exploitability (stochastic estimate)
unregularized exploitability (stochastic estimate) *duplicate
"""
return gradients(*params, payoff_matrices, self.proj_grad)
def exploitability(self, params, payoff_matrices):
"""Policy gradient does not minimize any exploitability so return NaN.
Args:
params: tuple of params (dist,)
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
np.NaN
"""
return np.NaN
def gradients(dist, payoff_matrices, proj_grad=True):
"""Computes exploitablity gradient.
Args:
dist: 1-d np.array, current estimate of nash distribution
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of payoff w.r.t. (dist) as tuple
unregularized exploitability (stochastic estimate)
unregularized exploitability (stochastic estimate) *duplicate
"""
nabla = payoff_matrices[0].dot(dist)
unreg_exp = np.max(nabla) - nabla.dot(dist)
grad_dist = -nabla
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
return (grad_dist,), unreg_exp, unreg_exp
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/symmetric/pg.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adaptive Tsallis Entropy (ATE) Stochastic Approximate Nash Solver."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from scipy import special
from open_spiel.python.algorithms.adidas_utils.helpers import misc
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.symmetric import exploitability as exp
class Solver(object):
"""ATE Solver."""
def __init__(self, p=1., proj_grad=True, euclidean=False, cheap=False,
lrs=(1e-2, 1e-1), vr=True, rnd_init=False, seed=None, **kwargs):
"""Ctor."""
del kwargs
if (p < 0.) or (p > 1.):
raise ValueError("p must be in [0, 1]")
self.num_players = None
self.p = p
self.proj_grad = proj_grad
self.cheap = cheap
self.vr = vr
self.pm_vr = None
self.rnd_init = rnd_init
self.lrs = lrs
self.has_aux = True
self.aux_errors = []
self.euclidean = euclidean
if euclidean:
self.update = self.euc_descent_step
else:
self.update = self.mirror_descent_step
self.seed = seed
self.random = np.random.RandomState(seed)
def init_vars(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_players = num_players
if self.rnd_init:
init_dist = self.random.rand(num_strats)
else:
init_dist = np.ones(num_strats)
init_dist /= init_dist.sum()
init_y = np.zeros(num_strats)
if self.cheap and self.vr:
self.pm_vr = np.zeros((num_strats, num_strats))
return (init_dist, init_y)
def record_aux_errors(self, grads):
"""Record errors for the auxiliary variables."""
grad_y = grads[1]
self.aux_errors.append([np.linalg.norm(grad_y)])
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients (and exploitabilities) for all parameters.
Args:
params: tuple of params (dist, y), see ate.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
tuple of gradients (grad_dist, grad_y), see ate.gradients
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
if self.cheap and self.vr:
grads, pm_vr, exp_sto, exp_solver_sto = cheap_gradients_vr(
self.random, *params, payoff_matrices, self.num_players, self.pm_vr,
self.p, self.proj_grad,)
self.pm_vr = pm_vr
return grads, exp_sto, exp_solver_sto
elif self.cheap and not self.vr:
return cheap_gradients(self.random, *params, payoff_matrices,
self.num_players, self.p, self.proj_grad)
else:
return gradients(*params, payoff_matrices, self.num_players, self.p,
self.proj_grad)
def exploitability(self, params, payoff_matrices):
"""Compute and return tsallis entropy regularized exploitability.
Args:
params: tuple of params (dist, y), see ate.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
float, exploitability of current dist
"""
return exp.ate_exploitability(params, payoff_matrices, self.p)
def euc_descent_step(self, params, grads, t):
"""Projected gradient descent on exploitability using Euclidean projection.
Args:
params: tuple of variables to be updated (dist, y)
grads: tuple of variable gradients (grad_dist, grad_y)
t: int, solver iteration (unused)
Returns:
new_params: tuple of update params (new_dist, new_y)
"""
lr_dist, lr_y = self.lrs
new_params = [params[0] - lr_dist * grads[0]]
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_params += [params[1] - lr_y * grads[1]]
new_params = euc_project(*new_params)
return new_params
def mirror_descent_step(self, params, grads, t):
"""Entropic mirror descent on exploitability.
Args:
params: tuple of variables to be updated (dist, y)
grads: tuple of variable gradients (grad_dist, grad_y)
t: int, solver iteration (unused)
Returns:
new_params: tuple of update params (new_dist, new_y)
"""
lr_dist, lr_y = self.lrs
new_params = [np.log(np.clip(params[0], 0, np.inf)) - lr_dist * grads[0]]
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_params += [params[1] - lr_y * grads[1]]
new_params = mirror_project(*new_params)
return new_params
def gradients(dist, y, payoff_matrices, num_players, p=1, proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients.
Args:
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
num_players: int, number of players, in case payoff_matrices is abbreviated
p: float in [0, 1], Tsallis entropy-regularization --> 0 as p --> 0
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
nabla = payoff_matrices[0].dot(dist)
if p > 0:
power = 1. / float(p)
s = np.linalg.norm(y, ord=power)
if s == 0:
br = misc.uniform_dist(y)
else:
br = (y / s)**power
else:
power = np.inf
s = np.linalg.norm(y, ord=power)
br = np.zeros_like(dist)
maxima = (y == s)
br[maxima] = 1. / maxima.sum()
unreg_exp = np.max(y) - y.dot(dist)
br_inv_sparse = 1 - np.sum(br**(p + 1))
dist_inv_sparse = 1 - np.sum(dist**(p + 1))
entr_br = s / (p + 1) * br_inv_sparse
entr_dist = s / (p + 1) * dist_inv_sparse
reg_exp = y.dot(br - dist) + entr_br - entr_dist
entr_br_vec = br_inv_sparse * br**(1 - p)
entr_dist_vec = dist_inv_sparse * dist**(1 - p)
policy_gradient = nabla - s * dist**p
other_player_fx = (br - dist) + 1 / (p + 1) * (entr_br_vec - entr_dist_vec)
other_player_fx_translated = payoff_matrices[1].dot(other_player_fx)
grad_dist = -policy_gradient + (num_players - 1) * other_player_fx_translated
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
grad_y = y - nabla
return (grad_dist, grad_y), unreg_exp, reg_exp
def cheap_gradients(random, dist, y, payoff_matrices, num_players, p=1,
proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients with samples.
This implementation takes payoff_matrices as input so technically uses O(d^2)
compute but only a single column of payoff_matrices is used to perform the
update so can be re-implemented in O(d) if needed.
Args:
random: random number generator, np.random.RandomState(seed)
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
num_players: int, number of players, in case payoff_matrices is abbreviated
p: float in [0, 1], Tsallis entropy-regularization --> 0 as p --> 0
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
action_1 = random.choice(dist.size, p=dist)
nabla = payoff_matrices[0][:, action_1]
if p > 0:
power = 1. / float(p)
s = np.linalg.norm(y, ord=power)
if s == 0:
br = misc.uniform_dist(y)
else:
br = (y / s)**power
else:
power = np.inf
s = np.linalg.norm(y, ord=power)
br = np.zeros_like(dist)
maxima = (y == s)
br[maxima] = 1. / maxima.sum()
unreg_exp = np.max(y) - y.dot(dist)
br_inv_sparse = 1 - np.sum(br**(p + 1))
dist_inv_sparse = 1 - np.sum(dist**(p + 1))
entr_br = s / (p + 1) * br_inv_sparse
entr_dist = s / (p + 1) * dist_inv_sparse
reg_exp = y.dot(br - dist) + entr_br - entr_dist
entr_br_vec = br_inv_sparse * br**(1 - p)
entr_dist_vec = dist_inv_sparse * dist**(1 - p)
policy_gradient = nabla - s * dist**p
other_player_fx = (br - dist) + 1 / (p + 1) * (entr_br_vec - entr_dist_vec)
action_u = random.choice(dist.size) # uniform, ~importance sampling
other_player_fx = dist.size * other_player_fx[action_u]
other_player_fx_translated = payoff_matrices[1, :, action_u] * other_player_fx
grad_dist = -policy_gradient + (num_players - 1) * other_player_fx_translated
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
grad_y = y - nabla
return (grad_dist, grad_y), unreg_exp, reg_exp
def cheap_gradients_vr(random, dist, y, payoff_matrices, num_players, pm_vr,
p=1, proj_grad=True, version=0):
"""Computes exploitablity gradient and aux variable gradients with samples.
This implementation takes payoff_matrices as input so technically uses O(d^2)
compute but only a single column of payoff_matrices is used to perform the
update so can be re-implemented in O(d) if needed.
Args:
random: random number generator, np.random.RandomState(seed)
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
num_players: int, number of players, in case payoff_matrices is abbreviated
pm_vr: approximate payoff_matrix for variance reduction
p: float in [0, 1], Tsallis entropy-regularization --> 0 as p --> 0
proj_grad: bool, if True, projects dist gradient onto simplex
version: int, default 0, two options for variance reduction
Returns:
gradient of exploitability w.r.t. (dist, y) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
if pm_vr is None:
raise ValueError("pm_vr must be np.array of shape (num_strats, num_strats)")
if (not isinstance(version, int)) or (version < 0) or (version > 1):
raise ValueError("version must be non-negative int < 2")
action_1 = random.choice(dist.size, p=dist)
nabla = payoff_matrices[0][:, action_1]
if p > 0:
power = 1. / float(p)
s = np.linalg.norm(y, ord=power)
if s == 0:
br = misc.uniform_dist(y)
else:
br = (y / s)**power
else:
power = np.inf
s = np.linalg.norm(y, ord=power)
br = np.zeros_like(dist)
maxima = (y == s)
br[maxima] = 1. / maxima.sum()
unreg_exp = np.max(y) - y.dot(dist)
br_inv_sparse = 1 - np.sum(br**(p + 1))
dist_inv_sparse = 1 - np.sum(dist**(p + 1))
entr_br = s / (p + 1) * br_inv_sparse
entr_dist = s / (p + 1) * dist_inv_sparse
reg_exp = y.dot(br - dist) + entr_br - entr_dist
entr_br_vec = br_inv_sparse * br**(1 - p)
entr_dist_vec = dist_inv_sparse * dist**(1 - p)
policy_gradient = nabla - s * dist**p
other_player_fx = (br - dist) + 1 / (p + 1) * (entr_br_vec - entr_dist_vec)
if version == 0:
other_player_fx_translated = pm_vr.dot(other_player_fx)
action_u = random.choice(dist.size) # uniform, ~importance sampling
other_player_fx = other_player_fx[action_u]
pm_mod = dist.size * (payoff_matrices[1, :, action_u] - pm_vr[:, action_u])
other_player_fx_translated += pm_mod * other_player_fx
elif version == 1:
other_player_fx_translated = np.sum(pm_vr, axis=1)
action_u = random.choice(dist.size) # uniform, ~importance sampling
other_player_fx = other_player_fx[action_u]
pm_mod = dist.size * payoff_matrices[1, :, action_u]
r = dist.size * pm_vr[:, action_u]
other_player_fx_translated += pm_mod * other_player_fx - r
grad_dist = -policy_gradient + (num_players - 1) * other_player_fx_translated
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
grad_y = y - nabla
if version == 0:
pm_vr[:, action_u] = payoff_matrices[1, :, action_u]
elif version == 1:
pm_vr[:, action_u] = payoff_matrices[1, :, action_u] * other_player_fx
return (grad_dist, grad_y), pm_vr, unreg_exp, reg_exp
def euc_project(dist, y):
"""Project variables onto their feasible sets (euclidean proj for dist).
Args:
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
Returns:
projected variables (dist, y) as tuple
"""
dist = simplex.euclidean_projection_onto_simplex(dist)
y = np.clip(y, 0., np.inf)
return dist, y
def mirror_project(dist, y):
"""Project variables onto their feasible sets (softmax for dist).
Args:
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
Returns:
projected variables (dist, y) as tuple
"""
dist = special.softmax(dist)
y = np.clip(y, 0., np.inf)
return dist, y
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/symmetric/ate.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regret Matching Approximate Nash Solver."""
from absl import logging # pylint:disable=unused-import
import numpy as np
class Solver(object):
"""Regret-matching Solver."""
def __init__(self, optimism=True, discount=False, rnd_init=False, seed=None,
**kwargs):
"""Ctor."""
del kwargs
self.num_players = None
self.lrs = None
self.optimism = optimism
self.discount = discount
self.rnd_init = rnd_init
self.has_aux = True
self.aux_errors = []
self.seed = seed
self.random = np.random.RandomState(seed)
def init_vars(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_players = num_players
if self.rnd_init:
init_dist = self.random.rand(num_strats)
else:
init_dist = np.ones(num_strats)
init_dist /= init_dist.sum()
init_regret = np.zeros(num_strats)
return (init_dist, init_regret)
def record_aux_errors(self, grads):
"""Record errors for the auxiliary variables."""
grad_regret = grads[1]
self.aux_errors.append([np.linalg.norm(grad_regret)])
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients (and exploitabilities) for all parameters.
Args:
params: tuple of params (dist, regret), see regmatch.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
tuple of gradients (grad_dist, grad_regret), see ate.gradients
unregularized exploitability (stochastic estimate)
solver exploitability (stochastic estimate) - NaN
"""
return gradients(*params, payoff_matrices)
def exploitability(self, params, payoff_matrices):
"""Regret matching does not minimize any exploitability so return NaN.
Args:
params: tuple of params (dist,)
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
np.NaN
"""
del params
del payoff_matrices
return np.NaN
def update(self, params, grads, t):
"""Update cumulative regret and strategy (dist).
Args:
params: tuple of variables to be updated (dist, regret)
grads: tuple of variable gradients (grad_dist, grad_regret)
t: int, solver iteration (not used)
Returns:
new_params: tuple of update params (new_dist, new_regret)
"""
dist, regret = params
regret_delta = grads[1]
if self.discount:
gamma = t / float(t + 1)
else:
gamma = 1
new_regret = gamma * regret + regret_delta
new_clipped_regrets = np.clip(new_regret + self.optimism * regret_delta,
0.,
np.inf)
if np.sum(new_clipped_regrets) > 0:
new_dist = new_clipped_regrets / new_clipped_regrets.sum()
else:
new_dist = np.ones_like(dist) / dist.size
new_params = (new_dist, new_regret)
return new_params
def gradients(dist, regret, payoff_matrices):
"""Computes regret delta to be added to regret in update.
Args:
dist: 1-d np.array, current estimate of nash distribution
regret: 1-d np.array (same shape as dist), current estimate of regrets
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
deltas w.r.t. (dist, regret) as tuple
unregularized exploitability (stochastic estimate)
solver exploitability (stochastic estimate) - NaN
"""
del regret
nabla = payoff_matrices[0].dot(dist)
utility = nabla.dot(dist)
grad_dist = np.NaN * np.ones_like(dist)
grad_regret = nabla - utility
unreg_exp = np.max(nabla) - nabla.dot(dist)
return (grad_dist, grad_regret), unreg_exp, np.NaN
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/symmetric/regmatch.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.adidas_utils.solvers.symmetric."""
from absl import logging # pylint:disable=unused-import
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from scipy.spatial.distance import cosine
from open_spiel.python.algorithms.adidas_utils.solvers.symmetric import ate
from open_spiel.python.algorithms.adidas_utils.solvers.symmetric import ped
from open_spiel.python.algorithms.adidas_utils.solvers.symmetric import qre
class ExploitabilityDescentTest(parameterized.TestCase):
@staticmethod
def numerical_gradient(fun, x, eps=np.sqrt(np.finfo(float).eps)):
fun_0 = fun(x)
num_grad = np.zeros_like(x)
x_plus_dx = np.copy(x)
for i in range(len(x)):
x_plus_dx[i] = x[i] + eps
num_grad[i] = (fun(x_plus_dx) - fun_0) / eps
x_plus_dx[i] = x[i]
return num_grad
@staticmethod
def prep_params(dist, payoff_matrices, num_params, solver_tuple):
params = [dist]
if num_params > 1:
params += [payoff_matrices[0].dot(params[0])] # policy_gradient
if num_params > 2:
params += [np.linalg.norm(params[1], ord=solver_tuple[1])]
return tuple(params)
@parameterized.named_parameters(
("PED", (ped, False)),
("ATE_p=1", (ate, 1., False)),
("ATE_p=0.5", (ate, 0.5, False)),
("ATE_p=0.1", (ate, 0.1, False)),
("QRE_t=0.0", (qre, 0.0, False)),
("QRE_t=0.1", (qre, 0.1, False))
)
def test_exploitability_gradient_on_symmetric_two_player_matrix_games(
self, solver_tuple, trials=100, max_num_strats=2, atol=1e-1, rtol=1e-1,
seed=1234):
num_players = 2
solver = solver_tuple[0].Solver(*solver_tuple[1:])
random = np.random.RandomState(seed)
successes = []
for _ in range(trials):
num_strats = random.randint(low=2, high=max_num_strats + 1)
strat_dims = (num_strats,) * num_players
payoff_matrices = random.rand(num_players, *strat_dims)
payoff_matrices[1] = payoff_matrices[0].T
num_params = len(solver.init_vars(num_strats, num_players))
dirichlet_alpha = np.ones(num_strats)
dist = random.dirichlet(dirichlet_alpha) # mixed srategy
params = self.prep_params(dist, payoff_matrices, num_params, solver_tuple)
grad = solver.compute_gradients(params, payoff_matrices)[0][0]
exp = lambda x: solver.exploitability(x, payoff_matrices) # pylint: disable=cell-var-from-loop
num_grad = self.numerical_gradient(exp, dist)
successes += [np.logical_and(np.allclose(grad, num_grad, rtol, atol),
cosine(grad, num_grad) <= atol)]
perc = 100 * np.mean(successes)
logging.info("gradient accuracy success rate out of %d is %f", trials, perc)
self.assertGreaterEqual(
perc, 95., "exploitability gradient accuracy is too poor")
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/symmetric/solvers_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantal Response Equilibrium (QRE) Stochastic Approximate Nash Solver."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from scipy import special
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.symmetric import exploitability as exp
class Solver(object):
"""QRE Solver."""
def __init__(self, temperature=1., proj_grad=True, euclidean=False,
cheap=False, lrs=(1e-2, 1e-1), exp_thresh=-1., vr=True,
rnd_init=False, seed=None, **kwargs):
"""Ctor."""
del kwargs
if temperature < 0.:
raise ValueError("temperature must be non-negative")
self.num_players = None
self.temperature = temperature
self.proj_grad = proj_grad
self.cheap = cheap
self.vr = vr
self.pm_vr = None
self.rnd_init = rnd_init
self.lrs = lrs
self.exp_thresh = exp_thresh
self.has_aux = True
self.aux_errors = []
self.euclidean = euclidean
if euclidean:
self.update = self.euc_descent_step
else:
self.update = self.mirror_descent_step
self.seed = seed
self.random = np.random.RandomState(seed)
def init_vars(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_players = num_players
if self.rnd_init:
init_dist = self.random.rand(num_strats)
else:
init_dist = np.ones(num_strats)
init_dist /= init_dist.sum()
init_y = np.zeros(num_strats)
init_anneal_steps = 0
if self.cheap and self.vr:
self.pm_vr = np.zeros((num_strats, num_strats))
return (init_dist, init_y, init_anneal_steps)
def record_aux_errors(self, grads):
"""Record errors for the auxiliary variables."""
grad_y = grads[1]
self.aux_errors.append([np.linalg.norm(grad_y)])
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients (and exploitabilities) for all parameters.
Args:
params: tuple of params (dist, y, anneal_steps), see gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
tuple of gradients (grad_dist, grad_y, grad_anneal_steps), see gradients
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
if self.cheap and self.vr:
grads, pm_vr, exp_sto, exp_solver_sto = self.cheap_gradients_vr(
self.random, *params, payoff_matrices, self.num_players, self.pm_vr,
self.temperature, self.proj_grad,)
self.pm_vr = pm_vr
return grads, exp_sto, exp_solver_sto
elif self.cheap and not self.vr:
return self.cheap_gradients(self.random, *params, payoff_matrices,
self.num_players, self.temperature,
self.proj_grad)
else:
return self.gradients(*params, payoff_matrices, self.num_players,
self.temperature, self.proj_grad)
def exploitability(self, params, payoff_matrices):
"""Compute and return shannon entropy regularized exploitability.
Args:
params: tuple of params (dist, y), see qre.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
float, exploitability of current dist
"""
return exp.qre_exploitability(params, payoff_matrices, self.temperature)
def gradients(self, dist, y, anneal_steps, payoff_matrices, num_players,
temperature=0., proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients.
Args:
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
anneal_steps: int, elapsed num steps since last anneal
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
num_players: int, number of players, in case payoff_matrices is
abbreviated
temperature: non-negative float, default 0.
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y, anneal_steps) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
nabla = payoff_matrices[0].dot(dist)
if temperature >= 1e-3:
br = special.softmax(y / temperature)
br_mat = (np.diag(br) - np.outer(br, br)) / temperature
log_br_safe = np.clip(np.log(br), -1e5, 0)
br_policy_gradient = nabla - temperature * (log_br_safe + 1)
else:
power = np.inf
s = np.linalg.norm(y, ord=power)
br = np.zeros_like(dist)
maxima = (y >= s)
br[maxima] = 1. / maxima.sum()
br_mat = np.zeros((br.size, br.size))
br_policy_gradient = np.zeros_like(br)
unreg_exp = np.max(y) - y.dot(dist)
entr_br = temperature * special.entr(br).sum()
entr_dist = temperature * special.entr(dist).sum()
reg_exp = y.dot(br - dist) + entr_br - entr_dist
policy_gradient = np.array(nabla)
if temperature > 0:
log_dist_safe = np.clip(np.log(dist), -1e5, 0)
policy_gradient -= temperature * (log_dist_safe + 1)
other_player_fx = (br - dist) + br_mat.dot(br_policy_gradient)
other_player_fx_translated = payoff_matrices[1].dot(other_player_fx)
grad_dist = -policy_gradient
grad_dist += (num_players - 1) * other_player_fx_translated
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
grad_y = y - nabla
_, lr_y = self.lrs
if (reg_exp < self.exp_thresh) and (anneal_steps >= 1 / lr_y):
self.temperature = np.clip(temperature / 2., 0., np.inf)
if self.temperature < 1e-3:
self.temperature = 0.
grad_anneal_steps = -anneal_steps
else:
grad_anneal_steps = 1
return (grad_dist, grad_y, grad_anneal_steps), unreg_exp, reg_exp
def cheap_gradients(self, random, dist, y, anneal_steps, payoff_matrices,
num_players, temperature=0., proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients with samples.
This implementation takes payoff_matrices as input so technically uses
O(d^2) compute but only a single column of payoff_matrices is used to
perform the update so can be re-implemented in O(d) if needed.
Args:
random: random number generator, np.random.RandomState(seed)
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
anneal_steps: int, elapsed num steps since last anneal
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
num_players: int, number of players, in case payoff_matrices is
abbreviated
temperature: non-negative float, default 0.
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y, anneal_steps) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
del anneal_steps
action_1 = random.choice(dist.size, p=dist)
nabla = payoff_matrices[0][:, action_1]
if temperature >= 1e-3:
br = special.softmax(y / temperature)
br_mat = (np.diag(br) - np.outer(br, br)) / temperature
br_policy_gradient = nabla - temperature * (np.log(br) + 1)
else:
power = np.inf
s = np.linalg.norm(y, ord=power)
br = np.zeros_like(dist)
maxima = (y == s)
br[maxima] = 1. / maxima.sum()
br_mat = np.zeros((br.size, br.size))
br_policy_gradient = np.zeros_like(br)
unreg_exp = np.max(y) - y.dot(dist)
entr_br = temperature * special.entr(br).sum()
entr_dist = temperature * special.entr(dist).sum()
reg_exp = y.dot(br - dist) + entr_br - entr_dist
policy_gradient = nabla - temperature * (np.log(dist) + 1)
other_player_fx = (br - dist) + br_mat.dot(br_policy_gradient)
action_u = random.choice(dist.size) # uniform, ~importance sampling
other_player_fx = dist.size * other_player_fx[action_u]
other_player_fx_translat = payoff_matrices[1, :, action_u] * other_player_fx
grad_dist = -policy_gradient + (num_players - 1) * other_player_fx_translat
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
grad_y = y - nabla
return (grad_dist, grad_y, None), unreg_exp, reg_exp
def cheap_gradients_vr(self, random, dist, y, anneal_steps, payoff_matrices,
num_players, pm_vr, temperature=0., proj_grad=True,
version=0):
"""Computes exploitablity gradient and aux variable gradients with samples.
This implementation takes payoff_matrices as input so technically uses
O(d^2) compute but only a single column of payoff_matrices is used to
perform the update so can be re-implemented in O(d) if needed.
Args:
random: random number generator, np.random.RandomState(seed)
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
anneal_steps: int, elapsed num steps since last anneal
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
num_players: int, number of players, in case payoff_matrices is
abbreviated
pm_vr: approximate payoff_matrix for variance reduction
temperature: non-negative float, default 0.
proj_grad: bool, if True, projects dist gradient onto simplex
version: int, default 0, two options for variance reduction
Returns:
gradient of exploitability w.r.t. (dist, y, anneal_steps) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
del anneal_steps
if pm_vr is None:
raise ValueError("pm_vr must be np.array of shape (num_strats,) * 2")
if (not isinstance(version, int)) or (version < 0) or (version > 1):
raise ValueError("version must be non-negative int < 2")
action_1 = random.choice(dist.size, p=dist)
nabla = payoff_matrices[0][:, action_1]
if temperature >= 1e-3:
br = special.softmax(y / temperature)
br_mat = (np.diag(br) - np.outer(br, br)) / temperature
br_policy_gradient = nabla - temperature * (np.log(br) + 1)
else:
power = np.inf
s = np.linalg.norm(y, ord=power)
br = np.zeros_like(dist)
maxima = (y == s)
br[maxima] = 1. / maxima.sum()
br_mat = np.zeros((br.size, br.size))
br_policy_gradient = np.zeros_like(br)
unreg_exp = np.max(y) - y.dot(dist)
entr_br = temperature * special.entr(br).sum()
entr_dist = temperature * special.entr(dist).sum()
reg_exp = y.dot(br - dist) + entr_br - entr_dist
policy_gradient = nabla - temperature * (np.log(dist) + 1)
other_player_fx = (br - dist) + br_mat.dot(br_policy_gradient)
if version == 0:
other_player_fx_translated = pm_vr.dot(other_player_fx)
action_u = random.choice(dist.size) # uniform, ~importance sampling
other_player_fx = other_player_fx[action_u]
m = dist.size
pm_mod = m * (payoff_matrices[1, :, action_u] - pm_vr[:, action_u])
other_player_fx_translated += pm_mod * other_player_fx
elif version == 1:
other_player_fx_translated = np.sum(pm_vr, axis=1)
action_u = random.choice(dist.size) # uniform, ~importance sampling
other_player_fx = other_player_fx[action_u]
pm_mod = dist.size * payoff_matrices[1, :, action_u]
r = dist.size * pm_vr[:, action_u]
other_player_fx_translated += pm_mod * other_player_fx - r
grad_dist = -policy_gradient
grad_dist += (num_players - 1) * other_player_fx_translated
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
grad_y = y - nabla
if version == 0:
pm_vr[:, action_u] = payoff_matrices[1, :, action_u]
elif version == 1:
pm_vr[:, action_u] = payoff_matrices[1, :, action_u] * other_player_fx
return (grad_dist, grad_y, None), pm_vr, unreg_exp, reg_exp
def euc_descent_step(self, params, grads, t):
"""Projected gradient descent on exploitability using Euclidean projection.
Args:
params: tuple of variables to be updated (dist, y, anneal_steps)
grads: tuple of variable gradients (grad_dist, grad_y, grad_anneal_steps)
t: int, solver iteration
Returns:
new_params: tuple of update params (new_dist, new_y, new_anneal_steps)
"""
lr_dist, lr_y = self.lrs
new_params = [params[0] - lr_dist * grads[0]]
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_params += [params[1] - lr_y * grads[1]]
new_params = euc_project(*new_params)
new_params += (params[2] + grads[2],)
return new_params
def mirror_descent_step(self, params, grads, t):
"""Entropic mirror descent on exploitability.
Args:
params: tuple of variables to be updated (dist, y, anneal_steps)
grads: tuple of variable gradients (grad_dist, grad_y, grad_anneal_steps)
t: int, solver iteration
Returns:
new_params: tuple of update params (new_dist, new_y, new_anneal_steps)
"""
lr_dist, lr_y = self.lrs
new_params = [np.log(np.clip(params[0], 0, np.inf)) - lr_dist * grads[0]]
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_params += [params[1] - lr_y * grads[1]]
new_params = mirror_project(*new_params)
new_params += (params[2] + grads[2],)
return new_params
def euc_project(dist, y):
"""Project variables onto their feasible sets (euclidean proj for dist).
Args:
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
Returns:
projected variables (dist, y) as tuple
"""
dist = simplex.euclidean_projection_onto_simplex(dist)
y = np.clip(y, 0., np.inf)
return dist, y
def mirror_project(dist, y):
"""Project variables onto their feasible sets (softmax for dist).
Args:
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
Returns:
projected variables (dist, y) as tuple
"""
dist = special.softmax(dist)
y = np.clip(y, 0., np.inf)
return dist, y
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/symmetric/qre_anneal.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/symmetric/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantal Response Equilibrium (QRE) Stochastic Approximate Nash Solver."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from scipy import special
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.symmetric import exploitability as exp
class Solver(object):
"""QRE Solver without auxiliary y variable."""
def __init__(self, temperature=1., proj_grad=True, euclidean=False,
cheap=False, lrs=(1e-2,), exp_thresh=-1., vr=True,
rnd_init=False, seed=None, **kwargs):
"""Ctor."""
del kwargs
if temperature < 0.:
raise ValueError("temperature must be non-negative")
self.num_players = None
self.temperature = temperature
self.proj_grad = proj_grad
self.cheap = cheap
self.vr = vr
self.pm_vr = None
self.rnd_init = rnd_init
self.lrs = lrs
self.exp_thresh = exp_thresh
self.has_aux = False
self.euclidean = euclidean
if euclidean:
self.update = self.euc_descent_step
else:
self.update = self.mirror_descent_step
self.seed = seed
self.random = np.random.RandomState(seed)
def init_vars(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_players = num_players
if self.rnd_init:
init_dist = self.random.rand(num_strats)
else:
init_dist = np.ones(num_strats)
init_dist /= init_dist.sum()
init_anneal_steps = 0
if self.cheap and self.vr:
self.pm_vr = np.zeros((num_strats, num_strats))
return (init_dist, init_anneal_steps)
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients (and exploitabilities) for all parameters.
Args:
params: tuple of params (dist, anneal_steps), see gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
tuple of gradients (grad_dist, grad_anneal_steps), see gradients
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
if self.cheap and self.vr:
grads, pm_vr, exp_sto, exp_solver_sto = self.cheap_gradients_vr(
self.random, *params, payoff_matrices, self.num_players, self.pm_vr,
self.temperature, self.proj_grad,)
self.pm_vr = pm_vr
return grads, exp_sto, exp_solver_sto
elif self.cheap and not self.vr:
return self.cheap_gradients(self.random, *params, payoff_matrices,
self.num_players, self.temperature,
self.proj_grad)
else:
return self.gradients(*params, payoff_matrices, self.num_players,
self.temperature, self.proj_grad)
def exploitability(self, params, payoff_matrices):
"""Compute and return shannon entropy regularized exploitability.
Args:
params: tuple of params (dist, y), see qre.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
float, exploitability of current dist
"""
return exp.qre_exploitability(params, payoff_matrices, self.temperature)
def gradients(self, dist, anneal_steps, payoff_matrices, num_players,
temperature=0., proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients.
Args:
dist: 1-d np.array, current estimate of nash distribution
anneal_steps: int, elapsed num steps since last anneal
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
num_players: int, number of players, in case payoff_matrices is
abbreviated
temperature: non-negative float, default 0.
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, anneal_steps) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
y = nabla = payoff_matrices[0].dot(dist)
if temperature >= 1e-3:
br = special.softmax(y / temperature)
br_mat = (np.diag(br) - np.outer(br, br)) / temperature
log_br_safe = np.clip(np.log(br), -1e5, 0)
br_policy_gradient = nabla - temperature * (log_br_safe + 1)
else:
power = np.inf
s = np.linalg.norm(y, ord=power)
br = np.zeros_like(dist)
maxima = (y == s)
br[maxima] = 1. / maxima.sum()
br_mat = np.zeros((br.size, br.size))
br_policy_gradient = np.zeros_like(br)
unreg_exp = np.max(y) - y.dot(dist)
entr_br = temperature * special.entr(br).sum()
entr_dist = temperature * special.entr(dist).sum()
reg_exp = y.dot(br - dist) + entr_br - entr_dist
policy_gradient = np.array(nabla)
if temperature > 0:
log_dist_safe = np.clip(np.log(dist), -1e5, 0)
policy_gradient -= temperature * (log_dist_safe + 1)
other_player_fx = (br - dist) + br_mat.dot(br_policy_gradient)
other_player_fx_translated = payoff_matrices[1].dot(other_player_fx)
grad_dist = -policy_gradient
grad_dist += (num_players - 1) * other_player_fx_translated
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
if reg_exp < self.exp_thresh:
self.temperature = np.clip(temperature / 2., 0., np.inf)
if self.temperature < 1e-3:
self.temperature = 0.
grad_anneal_steps = -anneal_steps
else:
grad_anneal_steps = 1
return (grad_dist, grad_anneal_steps), unreg_exp, reg_exp
def cheap_gradients(self, random, dist, anneal_steps, payoff_matrices,
num_players, temperature=0., proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients with samples.
This implementation takes payoff_matrices as input so technically uses
O(d^2) compute but only a single column of payoff_matrices is used to
perform the update so can be re-implemented in O(d) if needed.
Args:
random: random number generator, np.random.RandomState(seed)
dist: 1-d np.array, current estimate of nash distribution
anneal_steps: int, elapsed num steps since last anneal
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
num_players: int, number of players, in case payoff_matrices is
abbreviated
temperature: non-negative float, default 0.
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, anneal_steps) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
del anneal_steps
action_1 = random.choice(dist.size, p=dist)
y = nabla = payoff_matrices[0][:, action_1]
if temperature >= 1e-3:
br = special.softmax(y / temperature)
br_mat = (np.diag(br) - np.outer(br, br)) / temperature
br_policy_gradient = nabla - temperature * (np.log(br) + 1)
else:
power = np.inf
s = np.linalg.norm(y, ord=power)
br = np.zeros_like(dist)
maxima = (y == s)
br[maxima] = 1. / maxima.sum()
br_mat = np.zeros((br.size, br.size))
br_policy_gradient = np.zeros_like(br)
unreg_exp = np.max(y) - y.dot(dist)
entr_br = temperature * special.entr(br).sum()
entr_dist = temperature * special.entr(dist).sum()
reg_exp = y.dot(br - dist) + entr_br - entr_dist
policy_gradient = nabla - temperature * (np.log(dist) + 1)
other_player_fx = (br - dist) + br_mat.dot(br_policy_gradient)
action_u = random.choice(dist.size) # uniform, ~importance sampling
other_player_fx = dist.size * other_player_fx[action_u]
other_player_fx_translat = payoff_matrices[1, :, action_u] * other_player_fx
grad_dist = -policy_gradient + (num_players - 1) * other_player_fx_translat
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
return (grad_dist, None), unreg_exp, reg_exp
def cheap_gradients_vr(self, random, dist, anneal_steps, payoff_matrices,
num_players, pm_vr, temperature=0., proj_grad=True,
version=0):
"""Computes exploitablity gradient and aux variable gradients with samples.
This implementation takes payoff_matrices as input so technically uses
O(d^2) compute but only a single column of payoff_matrices is used to
perform the update so can be re-implemented in O(d) if needed.
Args:
random: random number generator, np.random.RandomState(seed)
dist: 1-d np.array, current estimate of nash distribution
anneal_steps: int, elapsed num steps since last anneal
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
num_players: int, number of players, in case payoff_matrices is
abbreviated
pm_vr: approximate payoff_matrix for variance reduction
temperature: non-negative float, default 0.
proj_grad: bool, if True, projects dist gradient onto simplex
version: int, default 0, two options for variance reduction
Returns:
gradient of exploitability w.r.t. (dist, anneal_steps) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
del anneal_steps
if pm_vr is None:
raise ValueError("pm_vr must be np.array of shape (num_strats,) * 2")
if (not isinstance(version, int)) or (version < 0) or (version > 1):
raise ValueError("version must be non-negative int < 2")
action_1 = random.choice(dist.size, p=dist)
y = nabla = payoff_matrices[0][:, action_1]
if temperature >= 1e-3:
br = special.softmax(y / temperature)
br_mat = (np.diag(br) - np.outer(br, br)) / temperature
br_policy_gradient = nabla - temperature * (np.log(br) + 1)
else:
power = np.inf
s = np.linalg.norm(y, ord=power)
br = np.zeros_like(dist)
maxima = (y == s)
br[maxima] = 1. / maxima.sum()
br_mat = np.zeros((br.size, br.size))
br_policy_gradient = np.zeros_like(br)
unreg_exp = np.max(y) - y.dot(dist)
entr_br = temperature * special.entr(br).sum()
entr_dist = temperature * special.entr(dist).sum()
reg_exp = y.dot(br - dist) + entr_br - entr_dist
policy_gradient = nabla - temperature * (np.log(dist) + 1)
other_player_fx = (br - dist) + br_mat.dot(br_policy_gradient)
if version == 0:
other_player_fx_translated = pm_vr.dot(other_player_fx)
action_u = random.choice(dist.size) # uniform, ~importance sampling
other_player_fx = other_player_fx[action_u]
m = dist.size
pm_mod = m * (payoff_matrices[1, :, action_u] - pm_vr[:, action_u])
other_player_fx_translated += pm_mod * other_player_fx
elif version == 1:
other_player_fx_translated = np.sum(pm_vr, axis=1)
action_u = random.choice(dist.size) # uniform, ~importance sampling
other_player_fx = other_player_fx[action_u]
pm_mod = dist.size * payoff_matrices[1, :, action_u]
r = dist.size * pm_vr[:, action_u]
other_player_fx_translated += pm_mod * other_player_fx - r
grad_dist = -policy_gradient
grad_dist += (num_players - 1) * other_player_fx_translated
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
if version == 0:
pm_vr[:, action_u] = payoff_matrices[1, :, action_u]
elif version == 1:
pm_vr[:, action_u] = payoff_matrices[1, :, action_u] * other_player_fx
return (grad_dist, None), pm_vr, unreg_exp, reg_exp
def euc_descent_step(self, params, grads, t):
"""Projected gradient descent on exploitability using Euclidean projection.
Args:
params: tuple of variables to be updated (dist, anneal_steps)
grads: tuple of variable gradients (grad_dist, grad_anneal_steps)
t: int, solver iteration
Returns:
new_params: tuple of update params (new_dist, new_anneal_steps)
"""
del t
lr_dist = self.lrs[0]
new_params = [params[0] - lr_dist * grads[0]]
new_params = euc_project(*new_params)
new_params += (params[1] + grads[1],)
return new_params
def mirror_descent_step(self, params, grads, t):
"""Entropic mirror descent on exploitability.
Args:
params: tuple of variables to be updated (dist, anneal_steps)
grads: tuple of variable gradients (grad_dist, grad_anneal_steps)
t: int, solver iteration
Returns:
new_params: tuple of update params (new_dist, new_anneal_steps)
"""
del t
lr_dist = self.lrs[0]
new_params = [np.log(np.clip(params[0], 0, np.inf)) - lr_dist * grads[0]]
new_params = mirror_project(*new_params)
new_params += (params[1] + grads[1],)
return new_params
def euc_project(dist):
"""Project variables onto their feasible sets (euclidean proj for dist).
Args:
dist: 1-d np.array, current estimate of nash distribution
Returns:
projected variables (dist,) as tuple
"""
dist = simplex.euclidean_projection_onto_simplex(dist)
return (dist,)
def mirror_project(dist):
"""Project variables onto their feasible sets (softmax for dist).
Args:
dist: 1-d np.array, current estimate of nash distribution
Returns:
projected variables (dist,) as tuple
"""
dist = special.softmax(dist)
return (dist,)
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/symmetric/qre_anneal_noaux.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Population Exploitability Descent (PED) Stochastic Approx. Nash Solver."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.symmetric import updates
class Solver(updates.Solver):
"""PED Solver."""
def __init__(self, proj_grad=True, euclidean=False, lrs=(1e-1,),
rnd_init=False, seed=None, **kwargs):
"""Ctor."""
del kwargs
super().__init__(proj_grad, euclidean, rnd_init, seed)
self.lrs = lrs
def compute_gradients(self, params, payoff_matrices):
"""Compute and return exploitability.
Args:
params: tuple of params (dist,), see ped.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
float, exploitability of current dist
unregularized exploitability (stochastic estimate)
unregularized exploitability (stochastic estimate) *duplicate
"""
return gradients(*params, payoff_matrices, self.num_players, self.proj_grad)
def gradients(dist, payoff_matrices, num_players, proj_grad=True):
"""Computes exploitablity gradient.
Args:
dist: 1-d np.array, current estimate of nash distribution
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
num_players: int, number of players, in case payoff_matrices is abbreviated
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist) as tuple
unregularized exploitability (stochastic estimate)
unregularized exploitability (stochastic estimate) *duplicate
"""
nabla = payoff_matrices[0].dot(dist)
power = np.inf
s = np.linalg.norm(nabla, ord=power)
br = np.zeros_like(dist)
maxima = (nabla == s)
br[maxima] = 1. / maxima.sum()
unreg_exp = np.max(nabla) - nabla.dot(dist)
grad_dist = -(nabla) + (num_players - 1) * payoff_matrices[1].dot(br - dist)
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
return (grad_dist,), unreg_exp, unreg_exp
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/symmetric/ped.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantal Response Equilibrium (QRE) Stochastic Approximate Nash Solver."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from scipy import special
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.symmetric import exploitability as exp
class Solver(object):
"""QRE Solver."""
def __init__(self, temperature=0., proj_grad=True, euclidean=False,
cheap=False, lrs=(1e-2, 1e-1), vr=True, rnd_init=False,
seed=None, **kwargs):
"""Ctor."""
del kwargs
if temperature < 0.:
raise ValueError("temperature must be non-negative")
self.num_players = None
self.temperature = temperature
self.proj_grad = proj_grad
self.cheap = cheap
self.vr = vr
self.pm_vr = None
self.rnd_init = rnd_init
self.lrs = lrs
self.has_aux = True
self.aux_errors = []
self.euclidean = euclidean
if euclidean:
self.update = self.euc_descent_step
else:
self.update = self.mirror_descent_step
self.seed = seed
self.random = np.random.RandomState(seed)
def init_vars(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_players = num_players
if self.rnd_init:
init_dist = self.random.rand(num_strats)
else:
init_dist = np.ones(num_strats)
init_dist /= init_dist.sum()
init_y = np.zeros(num_strats)
if self.cheap and self.vr:
self.pm_vr = np.zeros((num_strats, num_strats))
return (init_dist, init_y)
def record_aux_errors(self, grads):
"""Record errors for the auxiliary variables."""
grad_y = grads[1]
self.aux_errors.append([np.linalg.norm(grad_y)])
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients (and exploitabilities) for all parameters.
Args:
params: tuple of params (dist, y), see qre.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
tuple of gradients (grad_dist, grad_y, grad_z), see qre.gradients
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
if self.cheap and self.vr:
grads, pm_vr, exp_sto, exp_solver_sto = cheap_gradients_vr(
self.random, *params, payoff_matrices, self.num_players, self.pm_vr,
self.temperature, self.proj_grad,)
self.pm_vr = pm_vr
return grads, exp_sto, exp_solver_sto
elif self.cheap and not self.vr:
return cheap_gradients(self.random, *params, payoff_matrices,
self.num_players, self.temperature, self.proj_grad)
else:
return gradients(*params, payoff_matrices, self.num_players,
self.temperature, self.proj_grad)
def exploitability(self, params, payoff_matrices):
"""Compute and return shannon entropy regularized exploitability.
Args:
params: tuple of params (dist, y), see qre.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
float, exploitability of current dist
"""
return exp.qre_exploitability(params, payoff_matrices, self.temperature)
def euc_descent_step(self, params, grads, t):
"""Projected gradient descent on exploitability using Euclidean projection.
Args:
params: tuple of variables to be updated (dist, y)
grads: tuple of variable gradients (grad_dist, grad_y)
t: int, solver iteration
Returns:
new_params: tuple of update params (new_dist, new_y)
"""
lr_dist, lr_y = self.lrs
new_params = [params[0] - lr_dist * grads[0]]
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_params += [params[1] - lr_y * grads[1]]
new_params = euc_project(*new_params)
return new_params
def mirror_descent_step(self, params, grads, t):
"""Entropic mirror descent on exploitability.
Args:
params: tuple of variables to be updated (dist, y)
grads: tuple of variable gradients (grad_dist, grad_y)
t: int, solver iteration
Returns:
new_params: tuple of update params (new_dist, new_y)
"""
lr_dist, lr_y = self.lrs
new_params = [np.log(np.clip(params[0], 0, np.inf)) - lr_dist * grads[0]]
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_params += [params[1] - lr_y * grads[1]]
new_params = mirror_project(*new_params)
return new_params
def gradients(dist, y, payoff_matrices, num_players, temperature=0.,
proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients.
Args:
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
num_players: int, number of players, in case payoff_matrices is abbreviated
temperature: non-negative float, default 0.
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
nabla = payoff_matrices[0].dot(dist)
y = nabla
if temperature > 0:
br = special.softmax(y / temperature)
br_mat = (np.diag(br) - np.outer(br, br)) / temperature
br_policy_gradient = nabla - temperature * (np.log(br) + 1)
else:
power = np.inf
s = np.linalg.norm(y, ord=power)
br = np.zeros_like(dist)
maxima = (y == s)
br[maxima] = 1. / maxima.sum()
br_mat = np.zeros((br.size, br.size))
br_policy_gradient = np.zeros_like(br)
unreg_exp = np.max(y) - y.dot(dist)
entr_br = temperature * special.entr(br).sum()
entr_dist = temperature * special.entr(dist).sum()
reg_exp = y.dot(br - dist) + entr_br - entr_dist
policy_gradient = nabla
if temperature > 0:
policy_gradient -= temperature * (np.log(dist) + 1)
other_player_fx = (br - dist) + br_mat.dot(br_policy_gradient)
other_player_fx_translated = payoff_matrices[1].dot(other_player_fx)
grad_dist = -policy_gradient + (num_players - 1) * other_player_fx_translated
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
grad_y = y - nabla
return (grad_dist, grad_y), unreg_exp, reg_exp
def cheap_gradients(random, dist, y, payoff_matrices, num_players,
temperature=0., proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients with samples.
This implementation takes payoff_matrices as input so technically uses O(d^2)
compute but only a single column of payoff_matrices is used to perform the
update so can be re-implemented in O(d) if needed.
Args:
random: random number generator, np.random.RandomState(seed)
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
num_players: int, number of players, in case payoff_matrices is abbreviated
temperature: non-negative float, default 0.
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
action_1 = random.choice(dist.size, p=dist)
nabla = payoff_matrices[0][:, action_1]
if temperature > 0:
br = special.softmax(y / temperature)
br_mat = (np.diag(br) - np.outer(br, br)) / temperature
br_policy_gradient = nabla - temperature * (np.log(br) + 1)
else:
power = np.inf
s = np.linalg.norm(y, ord=power)
br = np.zeros_like(dist)
maxima = (y == s)
br[maxima] = 1. / maxima.sum()
br_mat = np.zeros((br.size, br.size))
br_policy_gradient = np.zeros_like(br)
unreg_exp = np.max(y) - y.dot(dist)
entr_br = temperature * special.entr(br).sum()
entr_dist = temperature * special.entr(dist).sum()
reg_exp = y.dot(br - dist) + entr_br - entr_dist
policy_gradient = nabla
if temperature > 0:
policy_gradient -= temperature * (np.log(dist) + 1)
other_player_fx = (br - dist) + br_mat.dot(br_policy_gradient)
action_u = random.choice(dist.size) # uniform, ~importance sampling
other_player_fx = dist.size * other_player_fx[action_u]
other_player_fx_translated = payoff_matrices[1, :, action_u] * other_player_fx
grad_dist = -policy_gradient + (num_players - 1) * other_player_fx_translated
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
grad_y = y - nabla
return (grad_dist, grad_y), unreg_exp, reg_exp
def cheap_gradients_vr(random, dist, y, payoff_matrices, num_players, pm_vr,
temperature=0., proj_grad=True, version=0):
"""Computes exploitablity gradient and aux variable gradients with samples.
This implementation takes payoff_matrices as input so technically uses O(d^2)
compute but only a single column of payoff_matrices is used to perform the
update so can be re-implemented in O(d) if needed.
Args:
random: random number generator, np.random.RandomState(seed)
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
num_players: int, number of players, in case payoff_matrices is abbreviated
pm_vr: approximate payoff_matrix for variance reduction
temperature: non-negative float, default 0.
proj_grad: bool, if True, projects dist gradient onto simplex
version: int, default 0, two options for variance reduction
Returns:
gradient of exploitability w.r.t. (dist, y) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
if pm_vr is None:
raise ValueError("pm_vr must be np.array of shape (num_strats, num_strats)")
if (not isinstance(version, int)) or (version < 0) or (version > 1):
raise ValueError("version must be non-negative int < 2")
action_1 = random.choice(dist.size, p=dist)
nabla = payoff_matrices[0][:, action_1]
if temperature > 0:
br = special.softmax(y / temperature)
br_mat = (np.diag(br) - np.outer(br, br)) / temperature
br_policy_gradient = nabla - temperature * (np.log(br) + 1)
else:
power = np.inf
s = np.linalg.norm(y, ord=power)
br = np.zeros_like(dist)
maxima = (y == s)
br[maxima] = 1. / maxima.sum()
br_mat = np.zeros((br.size, br.size))
br_policy_gradient = np.zeros_like(br)
unreg_exp = np.max(y) - y.dot(dist)
entr_br = temperature * special.entr(br).sum()
entr_dist = temperature * special.entr(dist).sum()
reg_exp = y.dot(br - dist) + entr_br - entr_dist
policy_gradient = nabla
if temperature > 0:
policy_gradient -= temperature * (np.log(dist) + 1)
other_player_fx = (br - dist) + br_mat.dot(br_policy_gradient)
if version == 0:
other_player_fx_translated = pm_vr.dot(other_player_fx)
action_u = random.choice(dist.size) # uniform, ~importance sampling
other_player_fx = other_player_fx[action_u]
pm_mod = dist.size * (payoff_matrices[1, :, action_u] - pm_vr[:, action_u])
other_player_fx_translated += pm_mod * other_player_fx
elif version == 1:
other_player_fx_translated = np.sum(pm_vr, axis=1)
action_u = random.choice(dist.size) # uniform, ~importance sampling
other_player_fx = other_player_fx[action_u]
pm_mod = dist.size * payoff_matrices[1, :, action_u]
r = dist.size * pm_vr[:, action_u]
other_player_fx_translated += pm_mod * other_player_fx - r
grad_dist = -policy_gradient + (num_players - 1) * other_player_fx_translated
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
grad_y = y - nabla
if version == 0:
pm_vr[:, action_u] = payoff_matrices[1, :, action_u]
elif version == 1:
pm_vr[:, action_u] = payoff_matrices[1, :, action_u] * other_player_fx
return (grad_dist, grad_y), pm_vr, unreg_exp, reg_exp
def euc_project(dist, y):
"""Project variables onto their feasible sets (euclidean proj for dist).
Args:
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
Returns:
projected variables (dist, y) as tuple
"""
dist = simplex.euclidean_projection_onto_simplex(dist)
y = np.clip(y, 0., np.inf)
return dist, y
def mirror_project(dist, y):
"""Project variables onto their feasible sets (softmax for dist).
Args:
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
Returns:
projected variables (dist, y) as tuple
"""
dist = special.softmax(dist)
y = np.clip(y, 0., np.inf)
return dist, y
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/symmetric/qre.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adaptive Tsallis Entropy (ATE) Stochastic Approximate Nash Solver."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from scipy import special
from open_spiel.python.algorithms.adidas_utils.helpers import misc
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.symmetric import exploitability as exp
class Solver(object):
"""ATE Solver."""
def __init__(self, p=1., proj_grad=True, euclidean=False, cheap=False,
lrs=(1e-2, 1e-1), exp_thresh=-1., vr=True, rnd_init=False,
seed=None, **kwargs):
"""Ctor."""
del kwargs
if (p < 0.) or (p > 1.):
raise ValueError("p must be in [0, 1]")
self.num_players = None
self.p_init = p
self.p = p
self.proj_grad = proj_grad
self.cheap = cheap
self.vr = vr
self.pm_vr = None
self.rnd_init = rnd_init
self.lrs = lrs
self.exp_thresh = exp_thresh
self.has_aux = True
self.aux_errors = []
self.euclidean = euclidean
if euclidean:
self.update = self.euc_descent_step
else:
self.update = self.mirror_descent_step
self.seed = seed
self.random = np.random.RandomState(seed)
def init_vars(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_players = num_players
if self.rnd_init:
init_dist = self.random.rand(num_strats)
else:
init_dist = np.ones(num_strats)
init_dist /= init_dist.sum()
init_y = np.zeros(num_strats)
init_anneal_steps = 0
if self.cheap and self.vr:
self.pm_vr = np.zeros((num_strats, num_strats))
return (init_dist, init_y, init_anneal_steps)
def record_aux_errors(self, grads):
"""Record errors for the auxiliary variables."""
grad_y = grads[1]
self.aux_errors.append([np.linalg.norm(grad_y)])
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients (and exploitabilities) for all parameters.
Args:
params: tuple of params (dist, y, anneal_steps), see gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
tuple of gradients (grad_dist, grad_y, grad_anneal_steps), see gradients
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
if self.cheap and self.vr:
grads, pm_vr, exp_sto, exp_solver_sto = self.cheap_gradients_vr(
self.random, *params, payoff_matrices, self.num_players, self.pm_vr,
self.p, self.proj_grad,)
self.pm_vr = pm_vr
return grads, exp_sto, exp_solver_sto
elif self.cheap and not self.vr:
return self.cheap_gradients(self.random, *params, payoff_matrices,
self.num_players, self.p, self.proj_grad)
else:
return self.gradients(*params, payoff_matrices, self.num_players, self.p,
self.proj_grad)
def exploitability(self, params, payoff_matrices):
"""Compute and return tsallis entropy regularized exploitability.
Args:
params: tuple of params (dist, y), see ate.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
float, exploitability of current dist
"""
return exp.ate_exploitability(params, payoff_matrices, self.p)
def gradients(self, dist, y, anneal_steps, payoff_matrices, num_players, p=1,
proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients.
Args:
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
anneal_steps: int, elapsed num steps since last anneal
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
num_players: int, number of players, in case payoff_matrices is
abbreviated
p: float in [0, 1], Tsallis entropy-regularization --> 0 as p --> 0
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y, anneal_steps) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
nabla = payoff_matrices[0].dot(dist)
if p > 1e-2: # encounter numerical under/overflow when power > 100.
power = 1. / float(p)
s = np.linalg.norm(y, ord=power)
if s == 0:
br = misc.uniform_dist(y)
else:
br = (y / s)**power
else:
power = np.inf
s = np.linalg.norm(y, ord=power)
br = np.zeros_like(dist)
maxima = (y == s)
br[maxima] = 1. / maxima.sum()
unreg_exp = np.max(y) - y.dot(dist)
br_inv_sparse = 1 - np.sum(br**(p + 1))
dist_inv_sparse = 1 - np.sum(dist**(p + 1))
entr_br = s / (p + 1) * br_inv_sparse
entr_dist = s / (p + 1) * dist_inv_sparse
reg_exp = y.dot(br - dist) + entr_br - entr_dist
entr_br_vec = br_inv_sparse * br**(1 - p)
entr_dist_vec = dist_inv_sparse * dist**(1 - p)
policy_gradient = nabla - s * dist**p
other_player_fx = (br - dist) + 1 / (p + 1) * (entr_br_vec - entr_dist_vec)
other_player_fx_translated = payoff_matrices[1].dot(other_player_fx)
grad_dist = -policy_gradient
grad_dist += (num_players - 1) * other_player_fx_translated
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
grad_y = y - nabla
_, lr_y = self.lrs
if (reg_exp < self.exp_thresh) and (anneal_steps >= 1 / lr_y):
self.p = np.clip(p / 2., 0., 1.)
grad_anneal_steps = -anneal_steps
else:
grad_anneal_steps = 1
return (grad_dist, grad_y, grad_anneal_steps), unreg_exp, reg_exp
def cheap_gradients(self, random, dist, y, anneal_steps, payoff_matrices,
num_players, p=1, proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients with samples.
This implementation takes payoff_matrices as input so technically uses
O(d^2) compute but only a single column of payoff_matrices is used to
perform the update so can be re-implemented in O(d) if needed.
Args:
random: random number generator, np.random.RandomState(seed)
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
anneal_steps: int, elapsed num steps since last anneal
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
num_players: int, number of players, in case payoff_matrices is abbrev'd
p: float in [0, 1], Tsallis entropy-regularization --> 0 as p --> 0
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y, anneal_steps) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
del anneal_steps
action_1 = random.choice(dist.size, p=dist)
nabla = payoff_matrices[0][:, action_1]
if p > 0:
power = 1. / float(p)
s = np.linalg.norm(y, ord=power)
if s == 0:
br = misc.uniform_dist(y)
else:
br = (y / s)**power
else:
power = np.inf
s = np.linalg.norm(y, ord=power)
br = np.zeros_like(dist)
maxima = (y == s)
br[maxima] = 1. / maxima.sum()
unreg_exp = np.max(y) - y.dot(dist)
entr_br = s / (p + 1) * (1 - np.sum(br**(p + 1)))
entr_dist = s / (p + 1) * (1 - np.sum(dist**(p + 1)))
reg_exp = y.dot(br - dist) + entr_br - entr_dist
entr_br_vec = (p + 1) / s * entr_br * br**(1 - p)
entr_dist_vec = (p + 1) / s * entr_dist * dist**(1 - p)
policy_gradient = nabla - s * dist**p
other_player_fx = (br - dist) + 1 / (p + 1) * (entr_br_vec - entr_dist_vec)
action_u = random.choice(dist.size) # uniform, ~importance sampling
other_player_fx = dist.size * other_player_fx[action_u]
other_player_fx_translat = payoff_matrices[1, :, action_u] * other_player_fx
grad_dist = -policy_gradient + (num_players - 1) * other_player_fx_translat
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
grad_y = y - nabla
return (grad_dist, grad_y, None), unreg_exp, reg_exp
def cheap_gradients_vr(self, random, dist, y, anneal_steps, payoff_matrices,
num_players, pm_vr, p=1, proj_grad=True, version=0):
"""Computes exploitablity gradient and aux variable gradients with samples.
This implementation takes payoff_matrices as input so technically uses
O(d^2) compute but only a single column of payoff_matrices is used to
perform the update so can be re-implemented in O(d) if needed.
Args:
random: random number generator, np.random.RandomState(seed)
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
anneal_steps: int, elapsed num steps since last anneal
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
num_players: int, number of players, in case payoff_matrices is abbrev'd
pm_vr: approximate payoff_matrix for variance reduction
p: float in [0, 1], Tsallis entropy-regularization --> 0 as p --> 0
proj_grad: bool, if True, projects dist gradient onto simplex
version: int, default 0, two options for variance reduction
Returns:
gradient of exploitability w.r.t. (dist, y, anneal_steps) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
del anneal_steps
if pm_vr is None:
raise ValueError("pm_vr must be np.array of shape (num_strats,) * 2")
if (not isinstance(version, int)) or (version < 0) or (version > 1):
raise ValueError("version must be non-negative int < 2")
action_1 = random.choice(dist.size, p=dist)
nabla = payoff_matrices[0][:, action_1]
if p > 0:
power = 1. / float(p)
s = np.linalg.norm(y, ord=power)
if s == 0:
br = misc.uniform_dist(y)
else:
br = (y / s)**power
else:
power = np.inf
s = np.linalg.norm(y, ord=power)
br = np.zeros_like(dist)
maxima = (y == s)
br[maxima] = 1. / maxima.sum()
unreg_exp = np.max(y) - y.dot(dist)
entr_br = s / (p + 1) * (1 - np.sum(br**(p + 1)))
entr_dist = s / (p + 1) * (1 - np.sum(dist**(p + 1)))
reg_exp = y.dot(br - dist) + entr_br - entr_dist
entr_br_vec = (p + 1) / s * entr_br * br**(1 - p)
entr_dist_vec = (p + 1) / s * entr_dist * dist**(1 - p)
policy_gradient = nabla - s * dist**p
other_player_fx = (br - dist) + 1 / (p + 1) * (entr_br_vec - entr_dist_vec)
if version == 0:
other_player_fx_translated = pm_vr.dot(other_player_fx)
action_u = random.choice(dist.size) # uniform, ~importance sampling
other_player_fx = other_player_fx[action_u]
m = dist.size
pm_mod = m * (payoff_matrices[1, :, action_u] - pm_vr[:, action_u])
other_player_fx_translated += pm_mod * other_player_fx
elif version == 1:
other_player_fx_translated = np.sum(pm_vr, axis=1)
action_u = random.choice(dist.size) # uniform, ~importance sampling
other_player_fx = other_player_fx[action_u]
pm_mod = dist.size * payoff_matrices[1, :, action_u]
r = dist.size * pm_vr[:, action_u]
other_player_fx_translated += pm_mod * other_player_fx - r
grad_dist = -policy_gradient
grad_dist += (num_players - 1) * other_player_fx_translated
if proj_grad:
grad_dist = simplex.project_grad(grad_dist)
grad_y = y - nabla
if version == 0:
pm_vr[:, action_u] = payoff_matrices[1, :, action_u]
elif version == 1:
pm_vr[:, action_u] = payoff_matrices[1, :, action_u] * other_player_fx
return (grad_dist, grad_y, None), pm_vr, unreg_exp, reg_exp
def euc_descent_step(self, params, grads, t):
"""Projected gradient descent on exploitability using Euclidean projection.
Args:
params: tuple of variables to be updated (dist, y, anneal_steps)
grads: tuple of variable gradients (grad_dist, grad_y, grad_anneal_steps)
t: int, solver iteration
Returns:
new_params: tuple of update params (new_dist, new_y, new_anneal_steps)
"""
lr_dist, lr_y = self.lrs
new_params = [params[0] - lr_dist * grads[0]]
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_params += [params[1] - lr_y * grads[1]]
new_params = euc_project(*new_params)
new_params += (params[2] + grads[2],)
return new_params
def mirror_descent_step(self, params, grads, t):
"""Entropic mirror descent on exploitability.
Args:
params: tuple of variables to be updated (dist, y, anneal_steps)
grads: tuple of variable gradients (grad_dist, grad_y, grad_anneal_steps)
t: int, solver iteration
Returns:
new_params: tuple of update params (new_dist, new_y, new_anneal_steps)
"""
lr_dist, lr_y = self.lrs
new_params = [np.log(np.clip(params[0], 0, np.inf)) - lr_dist * grads[0]]
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_params += [params[1] - lr_y * grads[1]]
new_params = mirror_project(*new_params)
new_params += (params[2] + grads[2],)
return new_params
def euc_project(dist, y):
"""Project variables onto their feasible sets (euclidean proj for dist).
Args:
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
Returns:
projected variables (dist, y) as tuple
"""
dist = simplex.euclidean_projection_onto_simplex(dist)
y = np.clip(y, 0., np.inf)
return dist, y
def mirror_project(dist, y):
"""Project variables onto their feasible sets (softmax for dist).
Args:
dist: 1-d np.array, current estimate of nash distribution
y: 1-d np.array (same shape as dist), current estimate of payoff gradient
Returns:
projected variables (dist, y) as tuple
"""
dist = special.softmax(dist)
y = np.clip(y, 0., np.inf)
return dist, y
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/symmetric/ate_anneal.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy Gradient (PG)."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.nonsymmetric import updates
class Solver(updates.Solver):
"""PG Solver."""
def __init__(self, proj_grad=True, euclidean=False, lrs=(1e-1,),
rnd_init=False, seed=None, **kwargs):
"""Ctor."""
del kwargs
super().__init__(proj_grad, euclidean, rnd_init, seed)
self.lrs = lrs
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients for all parameters.
Args:
params: tuple of params (dist,), see pg.gradients
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
Returns:
tuple of gradients (grad_dist,), see pg.gradients
unregularized exploitability (stochastic estimate)
unregularized exploitability (stochastic estimate) *duplicate
"""
return gradients(*params, payoff_matrices, self.num_players, self.proj_grad)
def exploitability(self, params, payoff_matrices):
"""Policy gradient does not minimize any exploitability so return NaN.
Args:
params: tuple of params (dist,)
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
np.NaN
"""
return np.NaN
def gradients(dist, payoff_matrices, num_players, proj_grad=True):
"""Computes exploitablity gradient.
Args:
dist: list of 1-d np.arrays, current estimate of nash distribution
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
num_players: int, number of players, in case payoff_matrices is abbreviated
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of payoff w.r.t. (dist) as tuple
unregularized exploitability (stochastic estimate)
unregularized exploitability (stochastic estimate) *duplicate
"""
# first compute best responses and payoff gradients
grad_dist = []
unreg_exp = []
for i in range(num_players):
nabla_i = np.zeros_like(dist[i])
# TODO(imgemp): decide if averaging over nablas provides best comparison
for j in range(num_players):
if j == i:
continue
if i < j:
hess_i_ij = payoff_matrices[(i, j)][0]
else:
hess_i_ij = payoff_matrices[(j, i)][1].T
nabla_ij = hess_i_ij.dot(dist[j])
nabla_i += nabla_ij / float(num_players - 1)
grad_dist_i = -nabla_i
if proj_grad:
grad_dist_i = simplex.project_grad(grad_dist_i)
grad_dist.append(nabla_i)
unreg_exp.append(np.max(nabla_i) - nabla_i.dot(dist[i]))
return (grad_dist,), np.mean(unreg_exp), np.mean(unreg_exp)
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/nonsymmetric/pg.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adaptive Tsallis Entropy (ATE) Stochastic Approximate Nash Solver."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from scipy import special
from open_spiel.python.algorithms.adidas_utils.helpers import misc
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.nonsymmetric import exploitability as exp
class Solver(object):
"""ATE Solver."""
def __init__(self, p=1., proj_grad=True, euclidean=False, cheap=False,
lrs=(1e-2, 1e-1), rnd_init=False, seed=None, **kwargs):
"""Ctor."""
del kwargs
if (p < 0.) or (p > 1.):
raise ValueError('p must be in [0, 1]')
self.num_players = None
self.p = p
self.proj_grad = proj_grad
self.cheap = cheap
self.rnd_init = rnd_init
self.lrs = lrs
self.has_aux = True
self.aux_errors = []
self.euclidean = euclidean
if euclidean:
self.update = self.euc_descent_step
else:
self.update = self.mirror_descent_step
self.seed = seed
self.random = np.random.RandomState(seed)
def init_vars(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_players = num_players
if len(num_strats) != num_players:
raise ValueError('Must specify num strategies for each player')
init_dist = []
for num_strats_i in num_strats:
if self.rnd_init:
init_dist_i = self.random.rand(num_strats_i)
else:
init_dist_i = np.ones(num_strats_i)
init_dist_i /= init_dist_i.sum()
init_dist.append(init_dist_i)
init_y = [np.zeros_like(dist_i) for dist_i in init_dist]
return (init_dist, init_y)
def record_aux_errors(self, grads):
"""Record errors for the auxiliary variables."""
grad_y = grads[1]
# call ravel in case use y to track entire payoff matrices in future
grad_y_flat = np.concatenate([np.ravel(g) for g in grad_y])
self.aux_errors.append([np.linalg.norm(grad_y_flat)])
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients (and exploitabilities) for all parameters.
Args:
params: tuple of params (dist, y), see ate.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
tuple of gradients (grad_dist, grad_y), see ate.gradients
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
if self.cheap:
return cheap_gradients(self.random, *params, payoff_matrices,
self.num_players, self.p, self.proj_grad)
else:
return gradients(*params, payoff_matrices, self.num_players, self.p,
self.proj_grad)
def exploitability(self, params, payoff_matrices):
"""Compute and return tsallis entropy regularized exploitability.
Args:
params: tuple of params (dist, y), see ate.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
float, exploitability of current dist
"""
return exp.ate_exploitability(params, payoff_matrices, self.p)
def euc_descent_step(self, params, grads, t):
"""Projected gradient descent on exploitability using Euclidean projection.
Args:
params: tuple of variables to be updated (dist, y)
grads: tuple of variable gradients (grad_dist, grad_y)
t: int, solver iteration (unused)
Returns:
new_params: tuple of update params (new_dist, new_y)
"""
lr_dist, lr_y = self.lrs
new_dist = []
for dist_i, dist_grad_i in zip(params[0], grads[0]):
new_dist_i = dist_i - lr_dist * dist_grad_i
new_dist_i = simplex.euclidean_projection_onto_simplex(new_dist_i)
new_dist.append(new_dist_i)
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_y = []
for y_i, y_grad_i in zip(params[1], grads[1]):
new_y_i = y_i - lr_y * y_grad_i
new_y_i = np.clip(new_y_i, 0., np.inf)
new_y.append(new_y_i)
return (new_dist, new_y)
def mirror_descent_step(self, params, grads, t):
"""Entropic mirror descent on exploitability.
Args:
params: tuple of variables to be updated (dist, y)
grads: tuple of variable gradients (grad_dist, grad_y)
t: int, solver iteration (unused)
Returns:
new_params: tuple of update params (new_dist, new_y)
"""
lr_dist, lr_y = self.lrs
new_dist = []
for dist_i, dist_grad_i in zip(params[0], grads[0]):
new_dist_i = np.log(np.clip(dist_i, 0., np.inf)) - lr_dist * dist_grad_i
new_dist_i = special.softmax(new_dist_i)
new_dist.append(new_dist_i)
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_y = []
for y_i, y_grad_i in zip(params[1], grads[1]):
new_y_i = y_i - lr_y * y_grad_i
new_y_i = np.clip(new_y_i, 0., np.inf)
new_y.append(new_y_i)
return (new_dist, new_y)
def gradients(dist, y, payoff_matrices, num_players, p=1, proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients.
Args:
dist: list of 1-d np.arrays, current estimate of nash distribution
y: list 1-d np.arrays (same shape as dist), current est. of payoff gradient
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
num_players: int, number of players, in case payoff_matrices is abbreviated
p: float in [0, 1], Tsallis entropy-regularization --> 0 as p --> 0
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
# first compute policy gradients and player effects (fx)
policy_gradient = []
other_player_fx = []
grad_y = []
unreg_exp = []
reg_exp = []
for i in range(num_players):
nabla_i = np.zeros_like(dist[i])
for j in range(num_players):
if j == i:
continue
if i < j:
hess_i_ij = payoff_matrices[(i, j)][0]
else:
hess_i_ij = payoff_matrices[(j, i)][1].T
nabla_ij = hess_i_ij.dot(dist[j])
nabla_i += nabla_ij / float(num_players - 1)
grad_y.append(y[i] - nabla_i)
if p > 0:
power = 1. / float(p)
s_i = np.linalg.norm(y[i], ord=power)
if s_i == 0:
br_i = misc.uniform_dist(y[i])
else:
br_i = (y[i] / s_i)**power
else:
power = np.inf
s_i = np.linalg.norm(y[i], ord=power)
br_i = np.zeros_like(dist[i])
maxima_i = (y[i] == s_i)
br_i[maxima_i] = 1. / maxima_i.sum()
policy_gradient_i = nabla_i - s_i * dist[i]**p
policy_gradient.append(policy_gradient_i)
unreg_exp.append(np.max(y[i]) - y[i].dot(dist[i]))
br_i_inv_sparse = 1 - np.sum(br_i**(p + 1))
dist_i_inv_sparse = 1 - np.sum(dist[i]**(p + 1))
entr_br_i = s_i / (p + 1) * br_i_inv_sparse
entr_dist_i = s_i / (p + 1) * dist_i_inv_sparse
reg_exp.append(y[i].dot(br_i - dist[i]) + entr_br_i - entr_dist_i)
entr_br_vec_i = br_i_inv_sparse * br_i**(1 - p)
entr_dist_vec_i = dist_i_inv_sparse * dist[i]**(1 - p)
other_player_fx_i = (br_i - dist[i]) + 1 / (p + 1) * (
entr_br_vec_i - entr_dist_vec_i)
other_player_fx.append(other_player_fx_i)
# then construct exploitability gradient
grad_dist = []
for i in range(num_players):
grad_dist_i = -policy_gradient[i]
for j in range(num_players):
if j == i:
continue
if i < j:
hess_j_ij = payoff_matrices[(i, j)][1]
else:
hess_j_ij = payoff_matrices[(j, i)][0].T
grad_dist_i += hess_j_ij.dot(other_player_fx[j])
if proj_grad:
grad_dist_i = simplex.project_grad(grad_dist_i)
grad_dist.append(grad_dist_i)
return (grad_dist, grad_y), np.mean(unreg_exp), np.mean(reg_exp)
def cheap_gradients(random, dist, y, payoff_matrices, num_players, p=1,
proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients with samples.
This implementation takes payoff_matrices as input so technically uses O(d^2)
compute but only a single column of payoff_matrices is used to perform the
update so can be re-implemented in O(d) if needed.
Args:
random: random number generator, np.random.RandomState(seed)
dist: list of 1-d np.arrays, current estimate of nash distribution
y: list 1-d np.arrays (same shape as dist), current est. of payoff gradient
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
num_players: int, number of players, in case payoff_matrices is abbreviated
p: float in [0, 1], Tsallis entropy-regularization --> 0 as p --> 0
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
# first compute policy gradients and player effects (fx)
policy_gradient = []
other_player_fx = []
grad_y = []
unreg_exp = []
reg_exp = []
for i in range(num_players):
others = list(range(num_players))
others.remove(i)
j = np.random.choice(others)
action_j = random.choice(dist[j].size, p=dist[j])
if i < j:
hess_i_ij = payoff_matrices[(i, j)][0]
else:
hess_i_ij = payoff_matrices[(j, i)][1].T
nabla_i = hess_i_ij[:, action_j]
grad_y.append(y[i] - nabla_i)
if p > 0:
power = 1. / float(p)
s_i = np.linalg.norm(y[i], ord=power)
if s_i == 0:
br_i = misc.uniform_dist(y[i])
else:
br_i = (y[i] / s_i)**power
else:
power = np.inf
s_i = np.linalg.norm(y[i], ord=power)
br_i = np.zeros_like(dist[i])
maxima_i = (y[i] == s_i)
br_i[maxima_i] = 1. / maxima_i.sum()
policy_gradient_i = nabla_i - s_i * dist[i]**p
policy_gradient.append(policy_gradient_i)
unreg_exp.append(np.max(y[i]) - y[i].dot(dist[i]))
br_i_inv_sparse = 1 - np.sum(br_i**(p + 1))
dist_i_inv_sparse = 1 - np.sum(dist[i]**(p + 1))
entr_br_i = s_i / (p + 1) * br_i_inv_sparse
entr_dist_i = s_i / (p + 1) * dist_i_inv_sparse
reg_exp.append(y[i].dot(br_i - dist[i]) + entr_br_i - entr_dist_i)
entr_br_vec_i = br_i_inv_sparse * br_i**(1 - p)
entr_dist_vec_i = dist_i_inv_sparse * dist[i]**(1 - p)
other_player_fx_i = (br_i - dist[i]) + 1 / (p + 1) * (
entr_br_vec_i - entr_dist_vec_i)
other_player_fx.append(other_player_fx_i)
# then construct exploitability gradient
grad_dist = []
for i in range(num_players):
grad_dist_i = -policy_gradient[i]
for j in range(num_players):
if j == i:
continue
if i < j:
hess_j_ij = payoff_matrices[(i, j)][1]
else:
hess_j_ij = payoff_matrices[(j, i)][0].T
action_u = random.choice(dist[j].size) # uniform, ~importance sampling
other_player_fx_j = dist[j].size * other_player_fx[j][action_u]
grad_dist_i += hess_j_ij[:, action_u] * other_player_fx_j
if proj_grad:
grad_dist_i = simplex.project_grad(grad_dist_i)
grad_dist.append(grad_dist_i)
return (grad_dist, grad_y), np.mean(unreg_exp), np.mean(reg_exp)
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/nonsymmetric/ate.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adaptive Tsallis Entropy (ATE) Stochastic Regret Matching Nash Solver."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from open_spiel.python.algorithms.adidas_utils.helpers import misc
from open_spiel.python.algorithms.adidas_utils.helpers.nonsymmetric import exploitability as exp
class Solver(object):
"""ATE Exploitability Regret Matching Solver."""
def __init__(self, p=1., lrs=(1e-2,), optimism=True, discount=False,
rnd_init=False, seed=None, **kwargs):
"""Ctor."""
del kwargs
if (p < 0.) or (p > 1.):
raise ValueError('p must be in [0, 1]')
self.num_players = None
self.p = p
self.rnd_init = rnd_init
self.lrs = lrs
self.optimism = optimism
self.discount = discount
self.has_aux = True
self.aux_errors = []
self.seed = seed
self.random = np.random.RandomState(seed)
def init_vars(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_players = num_players
if len(num_strats) != num_players:
raise ValueError('Must specify num strategies for each player')
init_dist = []
for num_strats_i in num_strats:
if self.rnd_init:
init_dist_i = self.random.rand(num_strats_i)
else:
init_dist_i = np.ones(num_strats_i)
init_dist_i /= init_dist_i.sum()
init_dist.append(init_dist_i)
init_y = [np.zeros_like(dist_i) for dist_i in init_dist]
init_cumgrad = [np.zeros_like(dist_i) for dist_i in init_dist]
return (init_dist, init_y, init_cumgrad)
def record_aux_errors(self, grads):
"""Record errors for the auxiliary variables."""
concat = []
for grad in grads:
concat.extend([np.ravel(g) for g in grad])
self.aux_errors.append([np.linalg.norm(np.concatenate(concat))])
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients (and exploitabilities) for all parameters.
Args:
params: tuple of params (dist, y), see ate.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
tuple of gradients (grad_dist, grad_y), see ate.gradients
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
return gradients(*params, payoff_matrices, self.num_players, self.p)
def exploitability(self, dist, payoff_matrices):
"""Compute and return tsallis entropy regularized exploitability.
Args:
dist: tuple of list of player distributions (dist,)
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
float, exploitability of current dist
"""
return exp.ate_exploitability(dist, payoff_matrices, self.p)
def update(self, params, grads, t):
"""Projected gradient descent on exploitability using Euclidean projection.
Args:
params: tuple of variables to be updated (dist, y, regret)
grads: tuple of variable gradients (grad_dist, grad_y, regret_delta)
t: int, solver iteration
Returns:
new_params: tuple of update params (new_dist, new_y, new_regret)
"""
dist, y, regret = params
_, y_grad, regret_delta = grads
lr_y = np.clip(1 / float(t + 1), self.lrs[0], np.inf)
new_y = []
for y_i, y_grad_i in zip(y, y_grad):
new_y_i = y_i - lr_y * y_grad_i
new_y_i = np.clip(new_y_i, 0., np.inf)
new_y.append(new_y_i)
if self.discount:
gamma = t / float(t + 1)
else:
gamma = 1
new_dist = []
new_regret = []
for dist_i, regret_i, regret_delta_i in zip(dist, regret, regret_delta):
new_regret_i = gamma * regret_i + regret_delta_i
new_clipped_regrets_i = np.clip(
new_regret_i + self.optimism * regret_delta_i, 0., np.inf)
if np.sum(new_clipped_regrets_i) > 0:
new_dist_i = new_clipped_regrets_i / new_clipped_regrets_i.sum()
else:
new_dist_i = np.ones_like(dist_i) / dist_i.size
new_dist.append(new_dist_i)
new_regret.append(new_regret_i)
return (new_dist, new_y, new_regret)
def gradients(dist, y, regret, payoff_matrices, num_players, p=1):
"""Computes exploitablity gradient and aux variable gradients.
Args:
dist: list of 1-d np.arrays, current estimate of nash distribution
y: list 1-d np.arrays (same shape as dist), current est. of payoff gradient
regret: list of 1-d np.arrays (same shape as dist), exploitability regrets
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
num_players: int, number of players, in case payoff_matrices is abbreviated
p: float in [0, 1], Tsallis entropy-regularization --> 0 as p --> 0
Returns:
gradient of exploitability w.r.t. (dist, y) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
del regret
# first compute policy gradients and player effects (fx)
policy_gradient = []
other_player_fx = []
grad_y = []
unreg_exp = []
reg_exp = []
for i in range(num_players):
nabla_i = np.zeros_like(dist[i])
for j in range(num_players):
if j == i:
continue
if i < j:
hess_i_ij = payoff_matrices[(i, j)][0]
else:
hess_i_ij = payoff_matrices[(j, i)][1].T
nabla_ij = hess_i_ij.dot(dist[j])
nabla_i += nabla_ij / float(num_players - 1)
grad_y.append(y[i] - nabla_i)
y[i] = nabla_i # TODO(imgemp): overwriting temporarily to test something
if p > 0:
power = 1. / float(p)
s_i = np.linalg.norm(y[i], ord=power)
if s_i == 0:
br_i = misc.uniform_dist(y[i])
else:
br_i = (y[i] / s_i)**power
else:
power = np.inf
s_i = np.linalg.norm(y[i], ord=power)
br_i = np.zeros_like(dist[i])
maxima_i = (y[i] == s_i)
br_i[maxima_i] = 1. / maxima_i.sum()
policy_gradient_i = nabla_i - s_i * dist[i]**p
policy_gradient.append(policy_gradient_i)
unreg_exp.append(np.max(y[i]) - y[i].dot(dist[i]))
br_i_inv_sparse = 1 - np.sum(br_i**(p + 1))
dist_i_inv_sparse = 1 - np.sum(dist[i]**(p + 1))
entr_br_i = s_i / (p + 1) * br_i_inv_sparse
entr_dist_i = s_i / (p + 1) * dist_i_inv_sparse
reg_exp.append(y[i].dot(br_i - dist[i]) + entr_br_i - entr_dist_i)
entr_br_vec_i = br_i_inv_sparse * br_i**(1 - p)
entr_dist_vec_i = dist_i_inv_sparse * dist[i]**(1 - p)
other_player_fx_i = (br_i - dist[i]) + 1 / (p + 1) * (
entr_br_vec_i - entr_dist_vec_i)
other_player_fx.append(other_player_fx_i)
# then construct exploitability gradient
grad_dist = []
regret_delta = []
for i in range(num_players):
grad_dist_i = -policy_gradient[i]
for j in range(num_players):
if j == i:
continue
if i < j:
hess_j_ij = payoff_matrices[(i, j)][1]
else:
hess_j_ij = payoff_matrices[(j, i)][0].T
grad_dist_i += hess_j_ij.dot(other_player_fx[j])
regret_delta_i = -(grad_dist_i - grad_dist_i.dot(dist[i]))
# regret_delta_i = y[i] - y[i].dot(dist[i])
grad_dist.append(grad_dist_i)
regret_delta.append(regret_delta_i)
return (grad_dist, grad_y, regret_delta), np.mean(unreg_exp), np.mean(reg_exp)
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/nonsymmetric/ate_regmatch.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regret Matching Approximate Nash Solver."""
from absl import logging # pylint:disable=unused-import
import numpy as np
class Solver(object):
"""Regret-matching Solver."""
def __init__(self, optimism=True, discount=False, rnd_init=False, seed=None,
**kwargs):
"""Ctor."""
del kwargs
self.num_players = None
self.lrs = None
self.optimism = optimism
self.discount = discount
self.rnd_init = rnd_init
self.has_aux = True
self.aux_errors = []
self.seed = seed
self.random = np.random.RandomState(seed)
def init_vars(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_players = num_players
if len(num_strats) != num_players:
raise ValueError('Must specify num strategies for each player')
init_dist = []
for num_strats_i in num_strats:
if self.rnd_init:
init_dist_i = self.random.rand(num_strats_i)
else:
init_dist_i = np.ones(num_strats_i)
init_dist_i /= init_dist_i.sum()
init_dist.append(init_dist_i)
init_regret = [np.zeros_like(dist_i) for dist_i in init_dist]
return (init_dist, init_regret)
def record_aux_errors(self, grads):
"""Record errors for the auxiliary variables."""
grad_regret = grads[1]
grad_regret_flat = np.concatenate(grad_regret)
self.aux_errors.append([np.linalg.norm(grad_regret_flat)])
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients (and exploitabilities) for all parameters.
Args:
params: tuple of params (dist, regret), see regmatch.gradients
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
Returns:
tuple of gradients (grad_dist, grad_regret), see ate.gradients
unregularized exploitability (stochastic estimate)
solver exploitability (stochastic estimate) - NaN
"""
return gradients(*params, payoff_matrices, self.num_players)
def exploitability(self, params, payoff_matrices):
"""Regret matching does not minimize any exploitability so return NaN.
Args:
params: tuple of params (dist,)
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
Returns:
np.NaN
"""
del params
del payoff_matrices
return np.NaN
def update(self, params, grads, t):
"""Update cumulative regret and strategy (dist).
Args:
params: tuple of variables to be updated (dist, regret)
grads: tuple of variable gradients (grad_dist, grad_regret)
t: int, solver iteration (not used)
Returns:
new_params: tuple of update params (new_dist, new_regret)
"""
dist, regret = params
regret_delta = grads[1]
if self.discount:
gamma = t / float(t + 1)
else:
gamma = 1
new_dist = []
new_regret = []
for dist_i, regret_i, regret_delta_i in zip(dist, regret, regret_delta):
new_regret_i = gamma * regret_i + regret_delta_i
new_clipped_regrets_i = np.clip(
new_regret_i + self.optimism * regret_delta_i, 0., np.inf)
if np.sum(new_clipped_regrets_i) > 0:
new_dist_i = new_clipped_regrets_i / new_clipped_regrets_i.sum()
else:
new_dist_i = np.ones_like(dist_i) / dist_i.size
new_dist.append(new_dist_i)
new_regret.append(new_regret_i)
new_params = (new_dist, new_regret)
return new_params
def gradients(dist, regret, payoff_matrices, num_players):
"""Computes regret delta to be added to regret in update.
Args:
dist: list of 1-d np.arrays, current estimate of nash distribution
regret: list of 1-d np.arrays (same as dist), current estimate of regrets
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
num_players: int, number of players, in case payoff_matrices is abbreviated
Returns:
deltas w.r.t. (dist, regret) as tuple
unregularized exploitability (stochastic estimate)
solver exploitability (stochastic estimate) - NaN
"""
del regret
# first compute best responses and payoff gradients
grad_dist = []
grad_regret = []
unreg_exp = []
for i in range(num_players):
nabla_i = np.zeros_like(dist[i])
# TODO(imgemp): decide if averaging over nablas provides best comparison
for j in range(num_players):
if j == i:
continue
if i < j:
hess_i_ij = payoff_matrices[(i, j)][0]
else:
hess_i_ij = payoff_matrices[(j, i)][1].T
nabla_ij = hess_i_ij.dot(dist[j])
nabla_i += nabla_ij / float(num_players - 1)
grad_dist_i = np.NaN * np.ones_like(nabla_i)
grad_dist.append(grad_dist_i)
utility_i = nabla_i.dot(dist[i])
grad_regret_i = nabla_i - utility_i
grad_regret.append(grad_regret_i)
unreg_exp.append(np.max(nabla_i) - nabla_i.dot(dist[i]))
return (grad_dist, grad_regret), np.mean(unreg_exp), np.NaN
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/nonsymmetric/regmatch.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.adidas_utils.nonsymmetric."""
import itertools
from absl import logging # pylint:disable=unused-import
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from scipy.spatial.distance import cosine
from open_spiel.python.algorithms.adidas_utils.helpers import misc
from open_spiel.python.algorithms.adidas_utils.solvers.nonsymmetric import ate
from open_spiel.python.algorithms.adidas_utils.solvers.nonsymmetric import ped
from open_spiel.python.algorithms.adidas_utils.solvers.nonsymmetric import qre
class ExploitabilityDescentTest(parameterized.TestCase):
@staticmethod
def numerical_gradient(fun, x, eps=np.sqrt(np.finfo(float).eps)):
fun_0 = fun(x)
num_grad = [np.zeros_like(xi) for xi in x]
x_plus_dx = [np.copy(xi) for xi in x]
for i in range(len(x)):
for j in range(len(x[i])):
x_plus_dx[i][j] = x[i][j] + eps
num_grad[i][j] = (fun(x_plus_dx) - fun_0) / eps
x_plus_dx[i][j] = x[i][j]
return num_grad
@staticmethod
def prep_params(dist, pt, num_params):
params = [dist]
if num_params > 1:
num_players = len(dist)
nabla = [misc.pt_reduce(pt[i], dist, [i]) for i in range(num_players)]
params += [nabla] # policy_gradient
return tuple(params)
@parameterized.named_parameters(
("PED", (ped, False)),
("ATE_p=1", (ate, 1., False)),
("ATE_p=0.5", (ate, 0.5, False)),
("ATE_p=0.1", (ate, 0.1, False)),
("ATE_p=0", (ate, 0., False)),
("QRE_t=0.0", (qre, 0.0, False)),
("QRE_t=0.1", (qre, 0.1, False))
)
def test_exploitability_gradient_on_nonsymmetric_three_player_matrix_games(
self, solver_tuple, trials=100, max_num_strats=3, atol=1e-1, rtol=1e-1,
seed=1234):
num_players = 3
solver = solver_tuple[0].Solver(*solver_tuple[1:])
random = np.random.RandomState(seed)
successes = []
for _ in range(trials):
num_strats = random.randint(low=2, high=max_num_strats + 1,
size=num_players)
num_strats = tuple([int(ns) for ns in num_strats])
payoff_tensor = random.rand(num_players, *num_strats)
num_params = len(solver.init_vars(num_strats, num_players))
dirichlet_alpha = [np.ones(num_strats_i) for num_strats_i in num_strats]
dist = [random.dirichlet(alpha_i) for alpha_i in dirichlet_alpha]
params = self.prep_params(dist, payoff_tensor, num_params)
payoff_matrices = {}
for pi, pj in itertools.combinations(range(num_players), 2):
key = (pi, pj)
pt_i = misc.pt_reduce(payoff_tensor[pi], dist, [pi, pj])
pt_j = misc.pt_reduce(payoff_tensor[pj], dist, [pi, pj])
payoff_matrices[key] = np.stack((pt_i, pt_j), axis=0)
grad = solver.compute_gradients(params, payoff_matrices)[0][0]
grad = np.concatenate(grad) / float(num_players)
exp = lambda x: solver.exploitability(x, payoff_tensor) # pylint: disable=cell-var-from-loop
num_grad = np.concatenate(self.numerical_gradient(exp, dist))
successes += [np.logical_and(np.allclose(grad, num_grad, rtol, atol),
cosine(grad, num_grad) <= atol)]
perc = 100 * np.mean(successes)
logging.info("gradient accuracy success rate out of %d is %f", trials, perc)
self.assertGreaterEqual(
perc, 95., "exploitability gradient accuracy is too poor")
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/nonsymmetric/solvers_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantal Response Equilibrium (QRE) Stochastic Approximate Nash Solver."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from scipy import special
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.nonsymmetric import exploitability as exp
class Solver(object):
"""QRE Solver."""
def __init__(self, temperature=1., proj_grad=True, euclidean=False,
cheap=False, lrs=(1e-2, 1e-1), exp_thresh=-1., rnd_init=False,
seed=None, **kwargs):
"""Ctor."""
del kwargs
if temperature < 0.:
raise ValueError('temperature must be non-negative')
self.num_players = None
self.temperature = temperature
self.proj_grad = proj_grad
self.cheap = cheap
self.rnd_init = rnd_init
self.lrs = lrs
self.exp_thresh = exp_thresh
self.has_aux = True
self.aux_errors = []
self.euclidean = euclidean
if euclidean:
self.update = self.euc_descent_step
else:
self.update = self.mirror_descent_step
self.seed = seed
self.random = np.random.RandomState(seed)
def init_vars(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_players = num_players
if len(num_strats) != num_players:
raise ValueError('Must specify num strategies for each player')
init_dist = []
for num_strats_i in num_strats:
if self.rnd_init:
init_dist_i = self.random.rand(num_strats_i)
else:
init_dist_i = np.ones(num_strats_i)
init_dist_i /= init_dist_i.sum()
init_dist.append(init_dist_i)
init_y = [np.zeros_like(dist_i) for dist_i in init_dist]
init_anneal_steps = 0
return (init_dist, init_y, init_anneal_steps)
def record_aux_errors(self, grads):
"""Record errors for the auxiliary variables."""
grad_y = grads[1]
# call ravel in case use y to track entire payoff matrices in future
grad_y_flat = np.concatenate([np.ravel(g) for g in grad_y])
self.aux_errors.append([np.linalg.norm(grad_y_flat)])
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients (and exploitabilities) for all parameters.
Args:
params: tuple of params (dist, y, anneal_steps), see gradients
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
Returns:
tuple of gradients (grad_dist, grad_y, grad_anneal_steps), see gradients
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
if self.cheap:
return self.cheap_gradients(self.random, *params, payoff_matrices,
self.num_players, self.temperature,
self.proj_grad)
else:
return self.gradients(*params, payoff_matrices, self.num_players,
self.temperature, self.proj_grad)
def exploitability(self, params, payoff_matrices):
"""Compute and return tsallis entropy regularized exploitability.
Args:
params: tuple of params (dist, y), see ate.gradients
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
Returns:
float, exploitability of current dist
"""
return exp.qre_exploitability(params, payoff_matrices, self.temperature)
def euc_descent_step(self, params, grads, t):
"""Projected gradient descent on exploitability using Euclidean projection.
Args:
params: tuple of variables to be updated (dist, y, anneal_steps)
grads: tuple of variable gradients (grad_dist, grad_y, grad_anneal_steps)
t: int, solver iteration (unused)
Returns:
new_params: tuple of update params (new_dist, new_y, new_anneal_steps)
"""
lr_dist, lr_y = self.lrs
new_dist = []
for dist_i, dist_grad_i in zip(params[0], grads[0]):
new_dist_i = dist_i - lr_dist * dist_grad_i
new_dist_i = simplex.euclidean_projection_onto_simplex(new_dist_i)
new_dist.append(new_dist_i)
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_y = []
for y_i, y_grad_i in zip(params[1], grads[1]):
new_y_i = y_i - lr_y * y_grad_i
new_y_i = np.clip(new_y_i, 0., np.inf)
new_y.append(new_y_i)
new_anneal_steps = params[2] + grads[2]
return (new_dist, new_y, new_anneal_steps)
def mirror_descent_step(self, params, grads, t):
"""Entropic mirror descent on exploitability.
Args:
params: tuple of variables to be updated (dist, y, anneal_steps)
grads: tuple of variable gradients (grad_dist, grad_y, grad_anneal_steps)
t: int, solver iteration (unused)
Returns:
new_params: tuple of update params (new_dist, new_y, new_anneal_steps)
"""
lr_dist, lr_y = self.lrs
new_dist = []
for dist_i, dist_grad_i in zip(params[0], grads[0]):
new_dist_i = np.log(np.clip(dist_i, 0., np.inf)) - lr_dist * dist_grad_i
new_dist_i = special.softmax(new_dist_i)
new_dist.append(new_dist_i)
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_y = []
for y_i, y_grad_i in zip(params[1], grads[1]):
new_y_i = y_i - lr_y * y_grad_i
new_y_i = np.clip(new_y_i, 0., np.inf)
new_y.append(new_y_i)
new_anneal_steps = params[2] + grads[2]
return (new_dist, new_y, new_anneal_steps)
def gradients(self, dist, y, anneal_steps, payoff_matrices, num_players,
temperature=0., proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients.
Args:
dist: list of 1-d np.arrays, current estimate of nash distribution
y: list 1-d np.arrays (same shape as dist), current est. of payoff
gradient
anneal_steps: int, elapsed num steps since last anneal
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
num_players: int, number of players, in case payoff_matrices is
abbreviated
temperature: non-negative float, default 0.
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y, anneal_steps) as tuple
unregularized exploitability (stochastic estimate)
shannon regularized exploitability (stochastic estimate)
"""
# first compute policy gradients and player effects (fx)
policy_gradient = []
other_player_fx = []
grad_y = []
unreg_exp = []
reg_exp = []
for i in range(num_players):
nabla_i = np.zeros_like(dist[i])
for j in range(num_players):
if j == i:
continue
if i < j:
hess_i_ij = payoff_matrices[(i, j)][0]
else:
hess_i_ij = payoff_matrices[(j, i)][1].T
nabla_ij = hess_i_ij.dot(dist[j])
nabla_i += nabla_ij / float(num_players - 1)
grad_y.append(y[i] - nabla_i)
if temperature >= 1e-3:
br_i = special.softmax(y[i] / temperature)
br_i_mat = (np.diag(br_i) - np.outer(br_i, br_i)) / temperature
br_i_policy_gradient = nabla_i - temperature * (np.log(br_i) + 1)
else:
power = np.inf
s_i = np.linalg.norm(y[i], ord=power)
br_i = np.zeros_like(dist[i])
maxima_i = (y[i] == s_i)
br_i[maxima_i] = 1. / maxima_i.sum()
br_i_mat = np.zeros((br_i.size, br_i.size))
br_i_policy_gradient = np.zeros_like(br_i)
policy_gradient_i = nabla_i - temperature * (np.log(dist[i]) + 1)
policy_gradient.append(policy_gradient_i)
unreg_exp.append(np.max(y[i]) - y[i].dot(dist[i]))
entr_br_i = temperature * special.entr(br_i).sum()
entr_dist_i = temperature * special.entr(dist[i]).sum()
reg_exp.append(y[i].dot(br_i - dist[i]) + entr_br_i - entr_dist_i)
other_player_fx_i = (br_i - dist[i]) + br_i_mat.dot(br_i_policy_gradient)
other_player_fx.append(other_player_fx_i)
# then construct exploitability gradient
grad_dist = []
for i in range(num_players):
grad_dist_i = -policy_gradient[i]
for j in range(num_players):
if j == i:
continue
if i < j:
hess_j_ij = payoff_matrices[(i, j)][1]
else:
hess_j_ij = payoff_matrices[(j, i)][0].T
grad_dist_i += hess_j_ij.dot(other_player_fx[j])
if proj_grad:
grad_dist_i = simplex.project_grad(grad_dist_i)
grad_dist.append(grad_dist_i)
unreg_exp_mean = np.mean(unreg_exp)
reg_exp_mean = np.mean(reg_exp)
_, lr_y = self.lrs
if (reg_exp_mean < self.exp_thresh) and (anneal_steps >= 1 / lr_y):
self.temperature = np.clip(temperature / 2., 0., np.inf)
grad_anneal_steps = -anneal_steps
else:
grad_anneal_steps = 1
return (grad_dist, grad_y, grad_anneal_steps), unreg_exp_mean, reg_exp_mean
def cheap_gradients(self, random, dist, y, anneal_steps, payoff_matrices,
num_players, temperature=0., proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients with samples.
This implementation takes payoff_matrices as input so technically uses
O(d^2) compute but only a single column of payoff_matrices is used to
perform the update so can be re-implemented in O(d) if needed.
Args:
random: random number generator, np.random.RandomState(seed)
dist: list of 1-d np.arrays, current estimate of nash distribution
y: list 1-d np.arrays (same shape as dist), current est. of payoff
gradient
anneal_steps: int, elapsed num steps since last anneal
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
num_players: int, number of players, in case payoff_matrices is
abbreviated
temperature: non-negative float, default 0.
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y, anneal_steps) as tuple
unregularized exploitability (stochastic estimate)
shannon regularized exploitability (stochastic estimate)
"""
# first compute policy gradients and player effects (fx)
policy_gradient = []
other_player_fx = []
grad_y = []
unreg_exp = []
reg_exp = []
for i in range(num_players):
others = list(range(num_players))
others.remove(i)
j = np.random.choice(others)
action_j = random.choice(dist[j].size, p=dist[j])
if i < j:
hess_i_ij = payoff_matrices[(i, j)][0]
else:
hess_i_ij = payoff_matrices[(j, i)][1].T
nabla_i = hess_i_ij[:, action_j]
grad_y.append(y[i] - nabla_i)
if temperature >= 1e-3:
br_i = special.softmax(y[i] / temperature)
br_i_mat = (np.diag(br_i) - np.outer(br_i, br_i)) / temperature
br_i_policy_gradient = nabla_i - temperature * (np.log(br_i) + 1)
else:
power = np.inf
s_i = np.linalg.norm(y[i], ord=power)
br_i = np.zeros_like(dist[i])
maxima_i = (y[i] == s_i)
br_i[maxima_i] = 1. / maxima_i.sum()
br_i_mat = np.zeros((br_i.size, br_i.size))
br_i_policy_gradient = np.zeros_like(br_i)
policy_gradient_i = nabla_i - temperature * (np.log(dist[i]) + 1)
policy_gradient.append(policy_gradient_i)
unreg_exp.append(np.max(y[i]) - y[i].dot(dist[i]))
entr_br_i = temperature * special.entr(br_i).sum()
entr_dist_i = temperature * special.entr(dist[i]).sum()
reg_exp.append(y[i].dot(br_i - dist[i]) + entr_br_i - entr_dist_i)
other_player_fx_i = (br_i - dist[i]) + br_i_mat.dot(br_i_policy_gradient)
other_player_fx.append(other_player_fx_i)
# then construct exploitability gradient
grad_dist = []
for i in range(num_players):
grad_dist_i = -policy_gradient[i]
for j in range(num_players):
if j == i:
continue
if i < j:
hess_j_ij = payoff_matrices[(i, j)][1]
else:
hess_j_ij = payoff_matrices[(j, i)][0].T
action_u = random.choice(dist[j].size) # uniform, ~importance sampling
other_player_fx_j = dist[j].size * other_player_fx[j][action_u]
grad_dist_i += hess_j_ij[:, action_u] * other_player_fx_j
if proj_grad:
grad_dist_i = simplex.project_grad(grad_dist_i)
grad_dist.append(grad_dist_i)
unreg_exp_mean = np.mean(unreg_exp)
reg_exp_mean = np.mean(reg_exp)
_, lr_y = self.lrs
if (reg_exp_mean < self.exp_thresh) and (anneal_steps >= 1 / lr_y):
self.temperature = np.clip(temperature / 2., 0., np.inf)
grad_anneal_steps = -anneal_steps
else:
grad_anneal_steps = 1
return (grad_dist, grad_y, grad_anneal_steps), unreg_exp_mean, reg_exp_mean
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/nonsymmetric/qre_anneal.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/nonsymmetric/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Population Exploitability Descent (PED) Stochastic Approx. Nash Solver."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.nonsymmetric import updates
class Solver(updates.Solver):
"""PED Solver."""
def __init__(self, proj_grad=True, euclidean=False, lrs=(1e-1,),
rnd_init=False, seed=None, **kwargs):
"""Ctor."""
del kwargs
super().__init__(proj_grad, euclidean, rnd_init, seed)
self.lrs = lrs
def compute_gradients(self, params, payoff_matrices):
"""Compute and return exploitability.
Args:
params: tuple of params (dist,), see ped.gradients
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
Returns:
float, exploitability of current dist
unregularized exploitability (stochastic estimate)
unregularized exploitability (stochastic estimate) *duplicate
"""
return gradients(*params, payoff_matrices, self.num_players, self.proj_grad)
def gradients(dist, payoff_matrices, num_players, proj_grad=True):
"""Computes exploitablity gradient.
Args:
dist: list of 1-d np.arrays, current estimate of nash distribution
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
num_players: int, number of players, in case payoff_matrices is abbreviated
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist) as tuple
unregularized exploitability (stochastic estimate)
unregularized exploitability (stochastic estimate) *duplicate
"""
# first compute best responses and payoff gradients
nabla = []
br = []
unreg_exp = []
for i in range(num_players):
nabla_i = np.zeros_like(dist[i])
for j in range(num_players):
if j == i:
continue
if i < j:
hess_i_ij = payoff_matrices[(i, j)][0]
else:
hess_i_ij = payoff_matrices[(j, i)][1].T
nabla_ij = hess_i_ij.dot(dist[j])
nabla_i += nabla_ij / float(num_players - 1)
nabla.append(nabla_i)
power = np.inf
s_i = np.linalg.norm(nabla_i, ord=power)
br_i = np.zeros_like(nabla_i)
maxima_i = (nabla_i == s_i)
br_i[maxima_i] = 1. / maxima_i.sum()
br.append(br_i)
unreg_exp.append(np.max(nabla_i) - nabla_i.dot(dist[i]))
# then construct exploitability gradient
grad_dist = []
for i in range(num_players):
grad_dist_i = -nabla[i]
for j in range(num_players):
if j == i:
continue
if i < j:
hess_j_ij = payoff_matrices[(i, j)][1]
else:
hess_j_ij = payoff_matrices[(j, i)][0].T
grad_dist_i += hess_j_ij.dot(br[j] - dist[j])
if proj_grad:
grad_dist_i = simplex.project_grad(grad_dist_i)
grad_dist.append(grad_dist_i)
return (grad_dist,), np.mean(unreg_exp), np.mean(unreg_exp)
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/nonsymmetric/ped.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantal Response Equilibrium (QRE) Stochastic Approximate Nash Solver."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from scipy import special
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.nonsymmetric import exploitability as exp
class Solver(object):
"""QRE Solver."""
def __init__(self, temperature=0., proj_grad=True, euclidean=False,
cheap=False, lrs=(1e-2, 1e-1), rnd_init=False, seed=None,
**kwargs):
"""Ctor."""
del kwargs
if temperature < 0.:
raise ValueError('temperature must be non-negative')
self.num_players = None
self.temperature = temperature
self.proj_grad = proj_grad
self.cheap = cheap
self.rnd_init = rnd_init
self.lrs = lrs
self.has_aux = True
self.aux_errors = []
self.euclidean = euclidean
if euclidean:
self.update = self.euc_descent_step
else:
self.update = self.mirror_descent_step
self.seed = seed
self.random = np.random.RandomState(seed)
def init_vars(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_players = num_players
if len(num_strats) != num_players:
raise ValueError('Must specify num strategies for each player')
init_dist = []
for num_strats_i in num_strats:
if self.rnd_init:
init_dist_i = self.random.rand(num_strats_i)
else:
init_dist_i = np.ones(num_strats_i)
init_dist_i /= init_dist_i.sum()
init_dist.append(init_dist_i)
init_y = [np.zeros_like(dist_i) for dist_i in init_dist]
return (init_dist, init_y)
def record_aux_errors(self, grads):
"""Record errors for the auxiliary variables."""
grad_y = grads[1]
# call ravel in case use y to track entire payoff matrices in future
grad_y_flat = np.concatenate([np.ravel(g) for g in grad_y])
self.aux_errors.append([np.linalg.norm(grad_y_flat)])
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients (and exploitabilities) for all parameters.
Args:
params: tuple of params (dist, y), see ate.gradients
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
Returns:
tuple of gradients (grad_dist, grad_y), see ate.gradients
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
if self.cheap:
return cheap_gradients(self.random, *params, payoff_matrices,
self.num_players, self.temperature, self.proj_grad)
else:
return gradients(*params, payoff_matrices, self.num_players,
self.temperature, self.proj_grad)
def exploitability(self, params, payoff_matrices):
"""Compute and return tsallis entropy regularized exploitability.
Args:
params: tuple of params (dist, y), see ate.gradients
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
Returns:
float, exploitability of current dist
"""
return exp.qre_exploitability(params, payoff_matrices, self.temperature)
def euc_descent_step(self, params, grads, t):
"""Projected gradient descent on exploitability using Euclidean projection.
Args:
params: tuple of variables to be updated (dist, y)
grads: tuple of variable gradients (grad_dist, grad_y)
t: int, solver iteration (unused)
Returns:
new_params: tuple of update params (new_dist, new_y)
"""
lr_dist, lr_y = self.lrs
new_dist = []
for dist_i, dist_grad_i in zip(params[0], grads[0]):
new_dist_i = dist_i - lr_dist * dist_grad_i
new_dist_i = simplex.euclidean_projection_onto_simplex(new_dist_i)
new_dist.append(new_dist_i)
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_y = []
for y_i, y_grad_i in zip(params[1], grads[1]):
new_y_i = y_i - lr_y * y_grad_i
new_y_i = np.clip(new_y_i, 0., np.inf)
new_y.append(new_y_i)
return (new_dist, new_y)
def mirror_descent_step(self, params, grads, t):
"""Entropic mirror descent on exploitability.
Args:
params: tuple of variables to be updated (dist, y)
grads: tuple of variable gradients (grad_dist, grad_y)
t: int, solver iteration (unused)
Returns:
new_params: tuple of update params (new_dist, new_y)
"""
lr_dist, lr_y = self.lrs
new_dist = []
for dist_i, dist_grad_i in zip(params[0], grads[0]):
new_dist_i = np.log(np.clip(dist_i, 0., np.inf)) - lr_dist * dist_grad_i
new_dist_i = special.softmax(new_dist_i)
new_dist.append(new_dist_i)
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_y = []
for y_i, y_grad_i in zip(params[1], grads[1]):
new_y_i = y_i - lr_y * y_grad_i
new_y_i = np.clip(new_y_i, 0., np.inf)
new_y.append(new_y_i)
return (new_dist, new_y)
def gradients(dist, y, payoff_matrices, num_players, temperature=0.,
proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients.
Args:
dist: list of 1-d np.arrays, current estimate of nash distribution
y: list 1-d np.arrays (same shape as dist), current est. of payoff gradient
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
num_players: int, number of players, in case payoff_matrices is abbreviated
temperature: non-negative float, default 0.
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y) as tuple
unregularized exploitability (stochastic estimate)
shannon regularized exploitability (stochastic estimate)
"""
# first compute policy gradients and player effects (fx)
policy_gradient = []
other_player_fx = []
grad_y = []
unreg_exp = []
reg_exp = []
for i in range(num_players):
nabla_i = np.zeros_like(dist[i])
for j in range(num_players):
if j == i:
continue
if i < j:
hess_i_ij = payoff_matrices[(i, j)][0]
else:
hess_i_ij = payoff_matrices[(j, i)][1].T
nabla_ij = hess_i_ij.dot(dist[j])
nabla_i += nabla_ij / float(num_players - 1)
grad_y.append(y[i] - nabla_i)
if temperature > 0:
br_i = special.softmax(y[i] / temperature)
br_i_mat = (np.diag(br_i) - np.outer(br_i, br_i)) / temperature
br_i_policy_gradient = nabla_i - temperature * (np.log(br_i) + 1)
else:
power = np.inf
s_i = np.linalg.norm(y[i], ord=power)
br_i = np.zeros_like(dist[i])
maxima_i = (y[i] == s_i)
br_i[maxima_i] = 1. / maxima_i.sum()
br_i_mat = np.zeros((br_i.size, br_i.size))
br_i_policy_gradient = np.zeros_like(br_i)
policy_gradient_i = nabla_i
if temperature > 0:
policy_gradient_i -= temperature * (np.log(dist[i]) + 1)
policy_gradient.append(policy_gradient_i)
unreg_exp.append(np.max(y[i]) - y[i].dot(dist[i]))
entr_br_i = temperature * special.entr(br_i).sum()
entr_dist_i = temperature * special.entr(dist[i]).sum()
reg_exp.append(y[i].dot(br_i - dist[i]) + entr_br_i - entr_dist_i)
other_player_fx_i = (br_i - dist[i]) + br_i_mat.dot(br_i_policy_gradient)
other_player_fx.append(other_player_fx_i)
# then construct exploitability gradient
grad_dist = []
for i in range(num_players):
grad_dist_i = -policy_gradient[i]
for j in range(num_players):
if j == i:
continue
if i < j:
hess_j_ij = payoff_matrices[(i, j)][1]
else:
hess_j_ij = payoff_matrices[(j, i)][0].T
grad_dist_i += hess_j_ij.dot(other_player_fx[j])
if proj_grad:
grad_dist_i = simplex.project_grad(grad_dist_i)
grad_dist.append(grad_dist_i)
return (grad_dist, grad_y), np.mean(unreg_exp), np.mean(reg_exp)
def cheap_gradients(random, dist, y, payoff_matrices, num_players,
temperature=0., proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients with samples.
This implementation takes payoff_matrices as input so technically uses O(d^2)
compute but only a single column of payoff_matrices is used to perform the
update so can be re-implemented in O(d) if needed.
Args:
random: random number generator, np.random.RandomState(seed)
dist: list of 1-d np.arrays, current estimate of nash distribution
y: list 1-d np.arrays (same shape as dist), current est. of payoff gradient
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
num_players: int, number of players, in case payoff_matrices is abbreviated
temperature: non-negative float, default 0.
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y) as tuple
unregularized exploitability (stochastic estimate)
shannon regularized exploitability (stochastic estimate)
"""
# first compute policy gradients and player effects (fx)
policy_gradient = []
other_player_fx = []
grad_y = []
unreg_exp = []
reg_exp = []
for i in range(num_players):
others = list(range(num_players))
others.remove(i)
j = np.random.choice(others)
action_j = random.choice(dist[j].size, p=dist[j])
if i < j:
hess_i_ij = payoff_matrices[(i, j)][0]
else:
hess_i_ij = payoff_matrices[(j, i)][1].T
nabla_i = hess_i_ij[:, action_j]
grad_y.append(y[i] - nabla_i)
if temperature > 0:
br_i = special.softmax(y[i] / temperature)
br_i_mat = (np.diag(br_i) - np.outer(br_i, br_i)) / temperature
br_i_policy_gradient = nabla_i - temperature * (np.log(br_i) + 1)
else:
power = np.inf
s_i = np.linalg.norm(y[i], ord=power)
br_i = np.zeros_like(dist[i])
maxima_i = (y[i] == s_i)
br_i[maxima_i] = 1. / maxima_i.sum()
br_i_mat = np.zeros((br_i.size, br_i.size))
br_i_policy_gradient = np.zeros_like(br_i)
policy_gradient_i = nabla_i
if temperature > 0:
policy_gradient_i -= temperature * (np.log(dist[i]) + 1)
policy_gradient.append(policy_gradient_i)
unreg_exp.append(np.max(y[i]) - y[i].dot(dist[i]))
entr_br_i = temperature * special.entr(br_i).sum()
entr_dist_i = temperature * special.entr(dist[i]).sum()
reg_exp.append(y[i].dot(br_i - dist[i]) + entr_br_i - entr_dist_i)
other_player_fx_i = (br_i - dist[i]) + br_i_mat.dot(br_i_policy_gradient)
other_player_fx.append(other_player_fx_i)
# then construct exploitability gradient
grad_dist = []
for i in range(num_players):
grad_dist_i = -policy_gradient[i]
for j in range(num_players):
if j == i:
continue
if i < j:
hess_j_ij = payoff_matrices[(i, j)][1]
else:
hess_j_ij = payoff_matrices[(j, i)][0].T
action_u = random.choice(dist[j].size) # uniform, ~importance sampling
other_player_fx_j = dist[j].size * other_player_fx[j][action_u]
grad_dist_i += hess_j_ij[:, action_u] * other_player_fx_j
if proj_grad:
grad_dist_i = simplex.project_grad(grad_dist_i)
grad_dist.append(grad_dist_i)
return (grad_dist, grad_y), np.mean(unreg_exp), np.mean(reg_exp)
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/nonsymmetric/qre.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adaptive Tsallis Entropy (ATE) Stochastic Approximate Nash Solver."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from scipy import special
from open_spiel.python.algorithms.adidas_utils.helpers import misc
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.nonsymmetric import exploitability as exp
class Solver(object):
"""ATE Solver with temperature annealing."""
def __init__(self, p=1., proj_grad=True, euclidean=False, cheap=False,
lrs=(1e-2, 1e-1), exp_thresh=-1., rnd_init=False, seed=None,
**kwargs):
"""Ctor."""
del kwargs
if (p < 0.) or (p > 1.):
raise ValueError('p must be in [0, 1]')
self.num_players = None
self.p_init = p
self.p = p
self.proj_grad = proj_grad
self.cheap = cheap
self.rnd_init = rnd_init
self.lrs = lrs
self.exp_thresh = exp_thresh
self.has_aux = True
self.aux_errors = []
self.euclidean = euclidean
if euclidean:
self.update = self.euc_descent_step
else:
self.update = self.mirror_descent_step
self.seed = seed
self.random = np.random.RandomState(seed)
def init_vars(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_players = num_players
if len(num_strats) != num_players:
raise ValueError('Must specify num strategies for each player')
init_dist = []
for num_strats_i in num_strats:
if self.rnd_init:
init_dist_i = self.random.rand(num_strats_i)
else:
init_dist_i = np.ones(num_strats_i)
init_dist_i /= init_dist_i.sum()
init_dist.append(init_dist_i)
init_y = [np.zeros_like(dist_i) for dist_i in init_dist]
init_anneal_steps = 0
return (init_dist, init_y, init_anneal_steps)
def record_aux_errors(self, grads):
"""Record errors for the auxiliary variables."""
grad_y = grads[1]
# call ravel in case use y to track entire payoff matrices in future
grad_y_flat = np.concatenate([np.ravel(g) for g in grad_y])
self.aux_errors.append([np.linalg.norm(grad_y_flat)])
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients (and exploitabilities) for all parameters.
Args:
params: tuple of params (dist, y, anneal_steps), see gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
tuple of gradients (grad_dist, grad_y, grad_anneal_steps), see gradients
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
if self.cheap:
return self.cheap_gradients(self.random, *params, payoff_matrices,
self.num_players, self.p, self.proj_grad)
else:
return self.gradients(*params, payoff_matrices, self.num_players, self.p,
self.proj_grad)
def exploitability(self, params, payoff_matrices):
"""Compute and return tsallis entropy regularized exploitability.
Args:
params: tuple of params (dist, y), see ate.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
float, exploitability of current dist
"""
return exp.ate_exploitability(params, payoff_matrices, self.p)
def euc_descent_step(self, params, grads, t):
"""Projected gradient descent on exploitability using Euclidean projection.
Args:
params: tuple of variables to be updated (dist, y, anneal_steps)
grads: tuple of variable gradients (grad_dist, grad_y, grad_anneal_steps)
t: int, solver iteration (unused)
Returns:
new_params: tuple of update params (new_dist, new_y, new_anneal_steps)
"""
lr_dist, lr_y = self.lrs
new_dist = []
for dist_i, dist_grad_i in zip(params[0], grads[0]):
new_dist_i = dist_i - lr_dist * dist_grad_i
new_dist_i = simplex.euclidean_projection_onto_simplex(new_dist_i)
new_dist.append(new_dist_i)
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_y = []
for y_i, y_grad_i in zip(params[1], grads[1]):
new_y_i = y_i - lr_y * y_grad_i
new_y_i = np.clip(new_y_i, 0., np.inf)
new_y.append(new_y_i)
new_anneal_steps = params[2] + grads[2]
return (new_dist, new_y, new_anneal_steps)
def mirror_descent_step(self, params, grads, t):
"""Entropic mirror descent on exploitability.
Args:
params: tuple of variables to be updated (dist, y)
grads: tuple of variable gradients (grad_dist, grad_y)
t: int, solver iteration (unused)
Returns:
new_params: tuple of update params (new_dist, new_y)
"""
lr_dist, lr_y = self.lrs
new_dist = []
for dist_i, dist_grad_i in zip(params[0], grads[0]):
new_dist_i = np.log(np.clip(dist_i, 0., np.inf)) - lr_dist * dist_grad_i
new_dist_i = special.softmax(new_dist_i)
new_dist.append(new_dist_i)
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_y = []
for y_i, y_grad_i in zip(params[1], grads[1]):
new_y_i = y_i - lr_y * y_grad_i
new_y_i = np.clip(new_y_i, 0., np.inf)
new_y.append(new_y_i)
new_anneal_steps = params[2] + grads[2]
return (new_dist, new_y, new_anneal_steps)
def gradients(self, dist, y, anneal_steps, payoff_matrices, num_players, p=1,
proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients.
Args:
dist: list of 1-d np.arrays, current estimate of nash distribution
y: list 1-d np.arrays (same shape as dist), current est. of payoff grad
anneal_steps: int, elapsed num steps since last anneal
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
num_players: int, number of players, in case payoff_matrices is abbrev'd
p: float in [0, 1], Tsallis entropy-regularization --> 0 as p --> 0
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
# first compute policy gradients and player effects (fx)
policy_gradient = []
other_player_fx = []
grad_y = []
unreg_exp = []
reg_exp = []
for i in range(num_players):
nabla_i = np.zeros_like(dist[i])
for j in range(num_players):
if j == i:
continue
if i < j:
hess_i_ij = payoff_matrices[(i, j)][0]
else:
hess_i_ij = payoff_matrices[(j, i)][1].T
nabla_ij = hess_i_ij.dot(dist[j])
nabla_i += nabla_ij / float(num_players - 1)
grad_y.append(y[i] - nabla_i)
if p > 1e-2: # encounter numerical under/overflow when power > 100.
power = 1. / float(p)
s_i = np.linalg.norm(y[i], ord=power)
if s_i == 0:
br_i = misc.uniform_dist(y[i])
else:
br_i = (y[i] / s_i)**power
else:
power = np.inf
s_i = np.linalg.norm(y[i], ord=power)
br_i = np.zeros_like(dist[i])
maxima_i = (y[i] == s_i)
br_i[maxima_i] = 1. / maxima_i.sum()
policy_gradient_i = nabla_i - s_i * dist[i]**p
policy_gradient.append(policy_gradient_i)
unreg_exp.append(np.max(y[i]) - y[i].dot(dist[i]))
br_i_inv_sparse = 1 - np.sum(br_i**(p + 1))
dist_i_inv_sparse = 1 - np.sum(dist[i]**(p + 1))
entr_br_i = s_i / (p + 1) * br_i_inv_sparse
entr_dist_i = s_i / (p + 1) * dist_i_inv_sparse
reg_exp.append(y[i].dot(br_i - dist[i]) + entr_br_i - entr_dist_i)
entr_br_vec_i = br_i_inv_sparse * br_i**(1 - p)
entr_dist_vec_i = dist_i_inv_sparse * dist[i]**(1 - p)
other_player_fx_i = (br_i - dist[i]) + 1 / (p + 1) * (
entr_br_vec_i - entr_dist_vec_i)
other_player_fx.append(other_player_fx_i)
# then construct exploitability gradient
grad_dist = []
for i in range(num_players):
grad_dist_i = -policy_gradient[i]
for j in range(num_players):
if j == i:
continue
if i < j:
hess_j_ij = payoff_matrices[(i, j)][1]
else:
hess_j_ij = payoff_matrices[(j, i)][0].T
grad_dist_i += hess_j_ij.dot(other_player_fx[j])
if proj_grad:
grad_dist_i = simplex.project_grad(grad_dist_i)
grad_dist.append(grad_dist_i)
unreg_exp_mean = np.mean(unreg_exp)
reg_exp_mean = np.mean(reg_exp)
_, lr_y = self.lrs
if (reg_exp_mean < self.exp_thresh) and (anneal_steps >= 1 / lr_y):
self.p = np.clip(p / 2., 0., 1.)
grad_anneal_steps = -anneal_steps
else:
grad_anneal_steps = 1
return (grad_dist, grad_y, grad_anneal_steps), unreg_exp_mean, reg_exp_mean
def cheap_gradients(self, random, dist, y, anneal_steps, payoff_matrices,
num_players, p=1, proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients with samples.
This implementation takes payoff_matrices as input so technically uses
O(d^2) compute but only a single column of payoff_matrices is used to
perform the update so can be re-implemented in O(d) if needed.
Args:
random: random number generator, np.random.RandomState(seed)
dist: list of 1-d np.arrays, current estimate of nash distribution
y: list 1-d np.arrays (same shape as dist), current est. of payoff grad
anneal_steps: int, elapsed num steps since last anneal
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
num_players: int, number of players, in case payoff_matrices is abbrev'd
p: float in [0, 1], Tsallis entropy-regularization --> 0 as p --> 0
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
# first compute policy gradients and player effects (fx)
policy_gradient = []
other_player_fx = []
grad_y = []
unreg_exp = []
reg_exp = []
for i in range(num_players):
others = list(range(num_players))
others.remove(i)
j = np.random.choice(others)
action_j = random.choice(dist[j].size, p=dist[j])
if i < j:
hess_i_ij = payoff_matrices[(i, j)][0]
else:
hess_i_ij = payoff_matrices[(j, i)][1].T
nabla_i = hess_i_ij[:, action_j]
grad_y.append(y[i] - nabla_i)
if p > 1e-2: # encounter numerical under/overflow when power > 100.
power = 1. / float(p)
s_i = np.linalg.norm(y[i], ord=power)
if s_i == 0:
br_i = misc.uniform_dist(y[i])
else:
br_i = (y[i] / s_i)**power
else:
power = np.inf
s_i = np.linalg.norm(y[i], ord=power)
br_i = np.zeros_like(dist[i])
maxima_i = (y[i] == s_i)
br_i[maxima_i] = 1. / maxima_i.sum()
policy_gradient_i = nabla_i - s_i * dist[i]**p
policy_gradient.append(policy_gradient_i)
unreg_exp.append(np.max(y[i]) - y[i].dot(dist[i]))
br_i_inv_sparse = 1 - np.sum(br_i**(p + 1))
dist_i_inv_sparse = 1 - np.sum(dist[i]**(p + 1))
entr_br_i = s_i / (p + 1) * br_i_inv_sparse
entr_dist_i = s_i / (p + 1) * dist_i_inv_sparse
reg_exp.append(y[i].dot(br_i - dist[i]) + entr_br_i - entr_dist_i)
entr_br_vec_i = br_i_inv_sparse * br_i**(1 - p)
entr_dist_vec_i = dist_i_inv_sparse * dist[i]**(1 - p)
other_player_fx_i = (br_i - dist[i]) + 1 / (p + 1) * (
entr_br_vec_i - entr_dist_vec_i)
other_player_fx.append(other_player_fx_i)
# then construct exploitability gradient
grad_dist = []
for i in range(num_players):
grad_dist_i = -policy_gradient[i]
for j in range(num_players):
if j == i:
continue
if i < j:
hess_j_ij = payoff_matrices[(i, j)][1]
else:
hess_j_ij = payoff_matrices[(j, i)][0].T
action_u = random.choice(dist[j].size) # uniform, ~importance sampling
other_player_fx_j = dist[j].size * other_player_fx[j][action_u]
grad_dist_i += hess_j_ij[:, action_u] * other_player_fx_j
if proj_grad:
grad_dist_i = simplex.project_grad(grad_dist_i)
grad_dist.append(grad_dist_i)
unreg_exp_mean = np.mean(unreg_exp)
reg_exp_mean = np.mean(reg_exp)
_, lr_y = self.lrs
if (reg_exp_mean < self.exp_thresh) and (anneal_steps >= 1 / lr_y):
self.p = np.clip(p / 2., 0., 1.)
grad_anneal_steps = -anneal_steps
else:
grad_anneal_steps = 1
return (grad_dist, grad_y, grad_anneal_steps), unreg_exp_mean, reg_exp_mean
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/nonsymmetric/ate_anneal.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adaptive Tsallis Entropy (ATE) Stochastic Approximate Nash Solver."""
import itertools
from absl import logging # pylint:disable=unused-import
import numpy as np
from scipy import special
from open_spiel.python.algorithms.adidas_utils.helpers import misc
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.nonsymmetric import exploitability as exp
class Solver(object):
"""ATE Solver that constructs a polymatrix approximation to the full game."""
def __init__(self, p=1., proj_grad=True, euclidean=False, cheap=False,
lrs=(1e-2, 1e-1), rnd_init=False, seed=None, **kwargs):
"""Ctor."""
del kwargs
if (p < 0.) or (p > 1.):
raise ValueError('p must be in [0, 1]')
self.num_strats = None
self.num_players = None
self.p = p
self.proj_grad = proj_grad
self.cheap = cheap
self.rnd_init = rnd_init
self.lrs = lrs
self.has_aux = True
self.aux_errors = []
self.euclidean = euclidean
if euclidean:
self.update = self.euc_descent_step
else:
self.update = self.mirror_descent_step
self.seed = seed
self.random = np.random.RandomState(seed)
def init_vars(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_strats = num_strats
self.num_players = num_players
if len(num_strats) != num_players:
raise ValueError('Must specify num strategies for each player')
init_dist = []
for num_strats_i in num_strats:
if self.rnd_init:
init_dist_i = self.random.rand(num_strats_i)
else:
init_dist_i = np.ones(num_strats_i)
init_dist_i /= init_dist_i.sum()
init_dist.append(init_dist_i)
init_y = self.init_polymatrix(num_strats, num_players)
return (init_dist, init_y)
def init_polymatrix(self, num_strats, num_players):
"""Initialize all pairwise bimatrix games to zero and return as dict."""
init_pm = dict()
for i, j in itertools.combinations(range(num_players), 2):
init_pm[(i, j)] = np.zeros((2, num_strats[i], num_strats[j])) # i < j
return init_pm
def record_aux_errors(self, grads):
"""Record errors for the auxiliary variables."""
grad_y = grads[1]
# call ravel in case use y to track entire payoff matrices in future
grad_y_flat = np.concatenate([np.ravel(g) for g in grad_y.values()])
self.aux_errors.append([np.linalg.norm(grad_y_flat)])
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients (and exploitabilities) for all parameters.
Args:
params: tuple of params (dist, y), see ate.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
tuple of gradients (grad_dist, grad_y), see ate.gradients
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
return self.gradients(*params, payoff_matrices, self.p, self.proj_grad)
def exploitability(self, params, payoff_matrices):
"""Compute and return tsallis entropy regularized exploitability.
Args:
params: tuple of params (dist, y), see ate.gradients
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
float, exploitability of current dist
"""
return exp.ate_exploitability(params, payoff_matrices, self.p)
def euc_descent_step(self, params, grads, t):
"""Projected gradient descent on exploitability using Euclidean projection.
Args:
params: tuple of variables to be updated (dist, y)
grads: tuple of variable gradients (grad_dist, grad_y)
t: int, solver iteration (unused)
Returns:
new_params: tuple of update params (new_dist, new_y)
"""
lr_dist, lr_y = self.lrs
new_dist = []
for dist_i, dist_grad_i in zip(params[0], grads[0]):
new_dist_i = dist_i - lr_dist * dist_grad_i
new_dist_i = simplex.euclidean_projection_onto_simplex(new_dist_i)
new_dist.append(new_dist_i)
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_y = dict()
for i, j in params[1]:
y_ij = params[1][(i, j)]
y_grad_ij = grads[1][(i, j)]
new_y_ij = y_ij - lr_y * y_grad_ij
new_y_ij = np.clip(new_y_ij, 0., np.inf)
new_y[(i, j)] = new_y_ij
return (new_dist, new_y)
def mirror_descent_step(self, params, grads, t):
"""Entropic mirror descent on exploitability.
Args:
params: tuple of variables to be updated (dist, y)
grads: tuple of variable gradients (grad_dist, grad_y)
t: int, solver iteration (unused)
Returns:
new_params: tuple of update params (new_dist, new_y)
"""
lr_dist, lr_y = self.lrs
new_dist = []
for dist_i, dist_grad_i in zip(params[0], grads[0]):
new_dist_i = np.log(np.clip(dist_i, 0., np.inf)) - lr_dist * dist_grad_i
new_dist_i = special.softmax(new_dist_i)
new_dist.append(new_dist_i)
lr_y = np.clip(1 / float(t + 1), lr_y, np.inf)
new_y = dict()
for i, j in params[1]:
y_ij = params[1][(i, j)]
y_grad_ij = grads[1][(i, j)]
new_y_ij = y_ij - lr_y * y_grad_ij
new_y_ij = np.clip(new_y_ij, 0., np.inf)
new_y[(i, j)] = new_y_ij
return (new_dist, new_y)
def gradients(self, dist, y, payoff_matrices, p=1, proj_grad=True):
"""Computes exploitablity gradient and aux variable gradients.
Args:
dist: list of 1-d np.arrays, current estimate of nash distribution
y: dict of 2-d np.arrays, current est. of players (i, j)'s payoff matrix
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
p: float in [0, 1], Tsallis entropy-regularization --> 0 as p --> 0
proj_grad: bool, if True, projects dist gradient onto simplex
Returns:
gradient of exploitability w.r.t. (dist, y) as tuple
unregularized exploitability (stochastic estimate)
tsallis regularized exploitability (stochastic estimate)
"""
# first compute policy gradients and player effects (fx)
policy_gradient = []
other_player_fx = []
grad_y = self.init_polymatrix(self.num_strats, self.num_players)
unreg_exp = []
reg_exp = []
for i in range(self.num_players):
nabla_i = np.zeros_like(dist[i])
for j in range(self.num_players):
if j == i:
continue
if i < j:
hess_i_ij = payoff_matrices[(i, j)][0]
hess_i_ij_from_y = y[(i, j)][0]
grad_y[(i, j)][0] = hess_i_ij_from_y - hess_i_ij
else:
hess_i_ij = payoff_matrices[(j, i)][1].T
hess_i_ij_from_y = y[(j, i)][1].T
grad_y[(j, i)][1] = hess_i_ij_from_y.T - hess_i_ij.T
nabla_ij = hess_i_ij_from_y.dot(dist[j])
nabla_i += nabla_ij / float(self.num_players - 1)
if p > 0:
power = 1. / float(p)
s_i = np.linalg.norm(nabla_i, ord=power)
if s_i == 0:
br_i = misc.uniform_dist(nabla_i)
else:
br_i = (nabla_i / s_i)**power
else:
power = np.inf
s_i = np.linalg.norm(nabla_i, ord=power)
br_i = np.zeros_like(dist[i])
maxima_i = (nabla_i == s_i)
br_i[maxima_i] = 1. / maxima_i.sum()
policy_gradient_i = nabla_i - s_i * dist[i]**p
policy_gradient.append(policy_gradient_i)
unreg_exp.append(np.max(nabla_i) - nabla_i.dot(dist[i]))
br_i_inv_sparse = 1 - np.sum(br_i**(p + 1))
dist_i_inv_sparse = 1 - np.sum(dist[i]**(p + 1))
entr_br_i = s_i / (p + 1) * br_i_inv_sparse
entr_dist_i = s_i / (p + 1) * dist_i_inv_sparse
reg_exp.append(nabla_i.dot(br_i - dist[i]) + entr_br_i - entr_dist_i)
entr_br_vec_i = br_i_inv_sparse * br_i**(1 - p)
entr_dist_vec_i = dist_i_inv_sparse * dist[i]**(1 - p)
other_player_fx_i = (br_i - dist[i]) + 1 / (p + 1) * (
entr_br_vec_i - entr_dist_vec_i)
other_player_fx.append(other_player_fx_i)
# then construct exploitability gradient
grad_dist = []
for i in range(self.num_players):
grad_dist_i = -policy_gradient[i]
for j in range(self.num_players):
if j == i:
continue
if i < j:
hess_j_ij_from_y = y[(i, j)][1]
else:
hess_j_ij_from_y = y[(j, i)][0].T
grad_dist_i += hess_j_ij_from_y.dot(other_player_fx[j])
if proj_grad:
grad_dist_i = simplex.project_grad(grad_dist_i)
grad_dist.append(grad_dist_i)
return (grad_dist, grad_y), np.mean(unreg_exp), np.mean(reg_exp)
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/solvers/nonsymmetric/ate_poly.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for loading pyspiel games as payoff tensors."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from open_spiel.python.egt.utils import game_payoffs_array
import pyspiel
class PyspielTensorGame(object):
"""Matrix Game."""
def __init__(self, string_specifier='blotto(coins=10,fields=3,players=3)',
tensor_game=False, seed=None):
"""Ctor. Inits payoff tensor (players x actions x ... np.array)."""
self.pt = None
self.string_specifier = string_specifier
self.tensor_game = tensor_game
if tensor_game:
self.game = pyspiel.load_tensor_game(string_specifier)
else:
self.game = pyspiel.load_game(string_specifier)
self.seed = seed # currently unused
def num_players(self):
return self.game.num_players()
def num_strategies(self):
return [self.game.num_distinct_actions()] * self.num_players()
def payoff_tensor(self):
if self.pt is None:
if not self.tensor_game:
logging.info('reloading pyspiel game as tensor_game')
self.game = pyspiel.load_tensor_game(self.string_specifier)
self.tensor_game = True
pt = np.asarray(game_payoffs_array(self.game))
self.pt = pt - self.game.min_utility()
return self.pt
def get_payoffs_for_strategies(self, policies):
"""Return vector of payoffs for all players given list of strategies.
Args:
policies: list of integers indexing strategies for each player
Returns:
np.array (length num players) of payoffs
"""
state = self.game.new_initial_state()
state.apply_actions(policies)
return np.asarray(state.returns()) - self.game.min_utility()
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/games/pyspiel_tensor_game.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/games/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.adidas_utils.games.small."""
from absl import logging # pylint:disable=unused-import
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python.algorithms.adidas_utils.games import small
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
class SmallTest(parameterized.TestCase):
def test_biased_game(self, trials=100, atol=1e-5, rtol=1e-5, seed=1234):
"""Test best responses to sampled opp. actions in BiasedGame are biased."""
game = small.BiasedGame(seed)
random = np.random.RandomState(seed)
successes = []
for _ in range(trials):
dirichlet_alpha = np.ones(game.num_strategies()[0])
dist = random.dirichlet(dirichlet_alpha) # mixed srategy
sample_best_responses = np.argmax(game.payoff_tensor()[0], axis=0)
estimated_best_response = np.dot(sample_best_responses, dist)
true_best_response = game.best_response(dist)
successes += [not np.allclose(estimated_best_response, true_best_response,
rtol, atol)]
perc = 100 * np.mean(successes)
logging.info('bias rate out of %d is %f', trials, perc)
self.assertGreaterEqual(
perc, 99., 'best responses should be biased more often')
@staticmethod
def simp_to_euc(a, b, center):
r"""Transforms a point [a, b] on the simplex to Euclidean space.
/\ ^ b
/ \ |
/____\ --> a
Args:
a: horizonal deviation from center
b: vertical deviation from center
center: center of ref frame given in [x, y, z] Euclidean coordinates
Returns:
1-d np.array of len 3, i.e., np.array([x, y, z])
"""
transform = np.array([[.5, -.5, 0], [-.5, -.5, 1], [1, 1, 1]]).T
transform /= np.linalg.norm(transform, axis=0)
return transform.dot(np.array([a, b, 0])) + center
@parameterized.named_parameters(
('up_down', 0., 0.1, 0., -0.1, -1.),
('left_right', -0.1, 0., 0.1, 0., -1.),
('up_left', 0., 0.1, -0.1, 0., 0.),
('up_right', 0., 0.1, 0.1, 0., 0.),
('down_left', 0., -0.1, -0.1, 0., 0.),
('down_right', 0., -0.1, 0.1, 0., 0.),
)
def test_spiral_game(self, dx_1, dy_1, dx_2, dy_2, expected_cos_sim,
trials=100, eps=0.1, seed=1234):
"""Test that gradients on simplex rotate around SpiralGame's center."""
random = np.random.RandomState(seed)
successes = []
for _ in range(trials):
dx, dy = eps * (random.rand(2) * 2 - 1)
center = self.simp_to_euc(dx, dy, np.ones(3) / 3.)
game = small.SpiralGame(center, seed)
pt = game.payoff_tensor()[0]
point_1 = self.simp_to_euc(dx_1, dy_1, game.center)
point_2 = self.simp_to_euc(dx_2, dy_2, game.center)
grad_1 = simplex.project_grad(pt.dot(point_1))
grad_2 = simplex.project_grad(pt.dot(point_2))
norm = np.linalg.norm(grad_1) * np.linalg.norm(grad_2)
cos_sim = grad_1.dot(grad_2) / norm
successes += [(np.abs(cos_sim - expected_cos_sim) < 1e-5)]
perc = 100 * np.mean(successes)
logging.info('alignment success rate out of %d is %f', trials, perc)
self.assertGreaterEqual(
perc, 99., 'gradient field should exhibit cycles')
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/games/small_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Small matrix games."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from open_spiel.python.algorithms.adidas_utils.helpers import misc
class MatrixGame(object):
"""Matrix Game."""
def __init__(self, pt, seed=None):
"""Ctor. Inits payoff tensor (players x actions x ... np.array).
Args:
pt: payoff tensor, np.array
seed: seed for random number generator, used if computing best responses
"""
if np.any(pt < 0.):
raise ValueError("Payoff tensor must contain non-negative values")
self.pt = pt
self.seed = seed
self.random = np.random.RandomState(seed)
def num_players(self):
return self.pt.shape[0]
def num_strategies(self):
return self.pt.shape[1:]
def payoff_tensor(self):
return self.pt
def get_payoffs_for_strategies(self, policies):
"""Return vector of payoffs for all players given list of strategies.
Args:
policies: list of integers indexing strategies for each player
Returns:
np.array (length num players) of payoffs
"""
return self.pt[:, policies[0], policies[1]]
def best_response(self, mixed_strategy, return_exp=False):
"""Return best response and its superiority over the current strategy.
Args:
mixed_strategy: np.ndarray (distribution over strategies)
return_exp: bool, whether to return how much best response exploits the
given mixed strategy (default is False)
Returns:
br: int, index of strategy (ties split randomly)
exp: u(br) - u(mixed_strategy)
"""
logging.warn("Assumes symmetric game! Returns br for player 0.")
gradient = self.pt[0].dot(mixed_strategy)
br = misc.argmax(self.random, gradient)
exp = gradient.max() - gradient.dot(mixed_strategy)
if return_exp:
return br, exp
else:
return br
def best_population_response(self, dist, policies):
"""Returns the best response to the current population of policies.
Args:
dist: np.ndarray, distribution over policies
policies: list of integers indexing strategies for each player
Returns:
best response, exploitability tuple (see best_response)
"""
ns = self.num_strategies()
mixed_strat = np.zeros(ns)
for pure_strat, prob in zip(policies, dist):
mixed_strat[pure_strat] += prob
return self.best_response(mixed_strat)
class BiasedGame(MatrixGame):
"""2-Player, 3-Action symmetric game with biased stochastic best responses."""
def __init__(self, seed=None):
"""Ctor. Initializes payoff tensor (2 x 3 x 3 np.array).
Args:
seed: seed for random number generator, used if computing best responses
"""
# pylint:disable=bad-whitespace
pt_r = np.array([[0, 0, 0 ],
[1, -2, .5],
[-2, 1, -1]]) + 2.
# pylint:enable=bad-whitespace
pt_c = pt_r.T # symmetric game
pt = np.stack((pt_r, pt_c), axis=0).astype(float)
pt /= pt.max() # arbitrary design choice to upper bound entries to 1
super().__init__(pt, seed)
class PrisonersDilemma(MatrixGame):
"""2-Player, 2-Action symmetric prisoner's dilemma."""
def __init__(self, seed=None):
"""Ctor. Initializes payoff tensor (2 x 2 x 2 np.array).
Args:
seed: seed for random number generator, used if computing best responses
"""
# pylint:disable=bad-whitespace
pt_r = np.array([[-1, -3],
[0, -2]])
# pylint:enable=bad-whitespace
# shift tensor to ensure positivity required for ATE
pt_r -= pt_r.min()
pt_c = pt_r.T # symmetric game
pt = np.stack((pt_r, pt_c), axis=0).astype(float)
pt /= pt.max() # arbitrary design choice to upper bound entries to 1
super().__init__(pt, seed)
class RockPaperScissors(MatrixGame):
"""2-Player, 3-Action symmetric RPS."""
def __init__(self, weights=None, seed=None):
"""Ctor. Initializes payoff tensor (2 x 3 x 3 np.array).
Args:
weights: list of weights (floats) for [rock, paper, scissors]
seed: seed for random number generator, used if computing best responses
"""
if weights is None:
weights = np.ones(3)
r, p, s = weights
# pylint:disable=bad-whitespace
pt_r = np.array([[0, -p, r],
[p, 0, -s],
[-r, s, 0]])
# pylint:enable=bad-whitespace
# shift tensor to ensure positivity required for ATE
pt_r -= pt_r.min()
pt_c = pt_r.T # symmetric game
pt = np.stack((pt_r, pt_c), axis=0).astype(float)
super().__init__(pt, seed)
class SpiralGame(MatrixGame):
"""2-Player, 3-Action symmetric game with spiral dynamics on simplex."""
def __init__(self, center=None, seed=None):
"""Ctor. Initializes payoff tensor (2 x 3 x 3 np.array).
Args:
center: center of cycle given in [x, y, z] Euclidean coordinates
seed: seed for random number generator, used if computing best responses
"""
if center is None:
center = np.ones(3) / 3.
else:
if not ((np.sum(center) <= 1 + 1e-8) and np.all(center >= -1e-8)):
raise ValueError("center must lie on simplex")
self.center = center
center = center.reshape((3, 1))
# define coordinate frame for simplex; basis vectors on columns of transform
transform = np.array([[.5, -.5, 0], [-.5, -.5, 1], [1, 1, 1]]).T
transform /= np.linalg.norm(transform, axis=0)
transform_inv = np.linalg.inv(transform)
# canonical cycle matrix in 2-d
cycle = 0.1 * np.array([[0, 1, 0], [1, 0, 0], [0, 0, 0]])
# payoff tensor maps euclidean to simplex frame, applies cycle, maps back
pt_r = transform.dot(cycle.dot(transform_inv))
# subtracting off a column vector effectively offsets the vector field
# because [[c c c], ...] [[x], [y], [z]] = [c * (x + y + z), ...] = [c, ...]
pt_r -= pt_r.dot(center)
# shift tensor to ensure positivity required for ATE
if pt_r.min() < 0:
pt_r -= pt_r.min()
pt_c = pt_r.T # symmetric game
pt = np.stack((pt_r, pt_c), axis=0).astype(float)
super().__init__(pt, seed)
class MatchingPennies(MatrixGame):
"""2-Player, 2-Action non-symmetric matching pennies."""
def __init__(self, bias=1., seed=None):
"""Ctor. Initializes payoff tensor (2 x 2 x 2 np.array).
Args:
bias: float, rewards one action (bias) more than the other (1)
seed: seed for random number generator, used if computing best responses
"""
# pylint:disable=bad-whitespace
pt_r = np.array([[1, -1],
[-1, bias]])
# pylint:enable=bad-whitespace
pt_c = (-pt_r).T # zero-sum game
pt = np.stack((pt_r, pt_c), axis=0).astype(float)
# shift tensor to ensure positivity required for ATE
pt -= pt.min()
pt /= pt.max() # arbitrary design choice to upper bound entries to 1
super().__init__(pt, seed)
class Shapleys(MatrixGame):
"""2-Player, 3-Action non-symmetric Shapleys game."""
def __init__(self, beta=1., seed=None):
"""Ctor. Initializes payoff tensor (2 x 2 x 2 np.array).
See Eqn 4 in https://arxiv.org/pdf/1308.4049.pdf.
Args:
beta: float, modifies the game so that the utilities @ Nash are now
u_1(Nash) = (1 + beta) / 3 and u_2(Nash) = (1 - beta) / 3
where Nash is the joint uniform distribution
seed: seed for random number generator, used if computing best responses
"""
# pylint:disable=bad-whitespace
pt_r = np.array([[1, 0, beta],
[beta, 1, 0],
[0, beta, 1]])
pt_c = np.array([[-beta, 1, 0],
[0, -beta, 1],
[1, 0, -beta]])
# pylint:enable=bad-whitespace
pt = np.stack((pt_r, pt_c), axis=0).astype(float)
# shift tensor to ensure positivity required for ATE
pt -= pt.min()
pt /= pt.max() # arbitrary design choice to upper bound entries to 1
super().__init__(pt, seed)
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/games/small.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAMUT games.
See https://github.com/deepmind/open_spiel/tree/master/open_spiel/games/gamut
for details on how to build OpenSpiel with support for GAMUT.
"""
from absl import logging # pylint:disable=unused-import
import numpy as np
from open_spiel.python.egt.utils import game_payoffs_array
import pyspiel
class GAMUT(object):
"""GAMUT Games."""
def __init__(self, config_list, java_path='', seed=None):
"""Ctor. Inits payoff tensor (players x actions x ... np.array).
Args:
config_list: a list or strings alternating between gamut flags and values
see http://gamut.stanford.edu/userdoc.pdf for more information
e.g., config_list = ['-g', 'CovariantGame', '-players', '6',
'-normalize', '-min_payoff', '0',
'-max_payoff', '1', '-actions', '5', '-r', '0']
java_path: string, java path
seed: random seed, some GAMUT games are randomly generated
"""
self.pt = None
self.config_list = config_list
self.seed = seed
self.random = np.random.RandomState(seed)
# parse interval for rho if supplied, e.g., '[-.2,1]'
if '-r' in config_list:
idx = next(i for i, s in enumerate(config_list) if s == '-r')
val = config_list[idx + 1]
if not val.isnumeric() and val[0] in '([' and val[-1] in ')]':
a, b = val.strip('[]()').split(',')
a = float(a)
b = float(b)
rho = self.random.rand() * (b - a) + a
config_list[idx + 1] = str(rho)
if isinstance(seed, int):
self.config_list += ['-random_seed', str(seed)]
self.java_path = java_path
if java_path:
generator = pyspiel.GamutGenerator(
java_path,
'gamut/gamut_main_deploy.jar')
else: # use default java path as specified by pyspiel
generator = pyspiel.GamutGenerator(
'gamut.jar')
self.game = generator.generate_game(config_list)
def num_players(self):
return self.game.num_players()
def num_strategies(self):
return [self.game.num_distinct_actions()] * self.num_players()
def payoff_tensor(self):
if self.pt is None:
pt = np.asarray(game_payoffs_array(self.game))
self.pt = pt - self.game.min_utility()
return self.pt
def get_payoffs_for_strategies(self, policies):
"""Return vector of payoffs for all players given list of strategies.
Args:
policies: list of integers indexing strategies for each player
Returns:
np.array (length num players) of payoffs
"""
state = self.game.new_initial_state()
state.apply_actions(policies)
return np.asarray(state.returns()) - self.game.min_utility()
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/games/gamut.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Big tensor games."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from open_spiel.python.algorithms.adidas_utils.helpers import misc
class TensorGame(object):
"""Tensor Game."""
def __init__(self, pt, seed=None):
"""Ctor. Inits payoff tensor (players x actions x ... np.array).
Args:
pt: payoff tensor, np.array
seed: seed for random number generator, used if computing best responses
"""
if np.any(pt < 0.):
raise ValueError("Payoff tensor must contain non-negative values")
self.pt = pt
self.seed = seed
self.random = np.random.RandomState(seed)
def num_players(self):
return self.pt.shape[0]
def num_strategies(self):
return self.pt.shape[1:]
def payoff_tensor(self):
return self.pt
def get_payoffs_for_strategies(self, policies):
"""Return vector of payoffs for all players given list of strategies.
Args:
policies: list of integers indexing strategies for each player
Returns:
np.array (length num players) of payoffs
"""
return self.pt[tuple([slice(None)] + policies)]
def best_response(self, mixed_strategy, return_exp=False):
"""Return best response and its superiority over the current strategy.
Args:
mixed_strategy: np.ndarray (distribution over strategies)
return_exp: bool, whether to return how much best response exploits the
given mixed strategy (default is False)
Returns:
br: int, index of strategy (ties split randomly)
exp: u(br) - u(mixed_strategy)
"""
logging.warn("Assumes symmetric game! Returns br for player 0.")
gradient = misc.pt_reduce(self.pt[0],
[mixed_strategy] * self.num_players(),
[0])
br = misc.argmax(self.random, gradient)
exp = gradient.max() - gradient.dot(mixed_strategy)
if return_exp:
return br, exp
else:
return br
def best_population_response(self, dist, policies):
"""Returns the best response to the current population of policies.
Args:
dist: np.ndarray, distribution over policies
policies: list of integers indexing strategies for each player
Returns:
best response, exploitability tuple (see best_response)
"""
ns = self.num_strategies()
mixed_strat = np.zeros(ns)
for pure_strat, prob in zip(policies, dist):
mixed_strat[pure_strat] += prob
return self.best_response(mixed_strat)
class ElFarol(TensorGame):
"""N-Player, 2-Action symmetric game with unique symmetric Nash."""
def __init__(self, n=2, c=0.5, B=0, S=1, G=2, seed=None):
"""Ctor. Initializes payoff tensor (N x (2,) * N np.array).
See Section 3.1, The El Farol Stage Game in
http://www.econ.ed.ac.uk/papers/id186_esedps.pdf
action 0: go to bar
action 1: avoid bar
Args:
n: int, number of players
c: float, threshold for `crowded' as a fraction of number of players
B: float, payoff for going to a crowded bar
S: float, payoff for staying at home
G: float, payoff for going to an uncrowded bar
seed: seed for random number generator, used if computing best responses
"""
assert G > S > B, "Game parameters must satisfy G > S > B."
pt = np.zeros((n,) + (2,) * n)
for idx in np.ndindex(pt.shape):
p = idx[0]
a = idx[1:]
a_i = a[p]
go_to_bar = (a_i < 1)
crowded = (n - 1 - sum(a) + a_i) >= (c * n)
if go_to_bar and not crowded:
pt[idx] = G
elif go_to_bar and crowded:
pt[idx] = B
else:
pt[idx] = S
super().__init__(pt, seed)
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/games/big.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous utils."""
from absl import logging # pylint:disable=unused-import
import numpy as np
def uniform_dist(x):
"""Returns a uniform distribution with same shape as the given numpy array.
Args:
x: numpy array
Returns:
constant numpy array of same shape as input x, sums to 1
"""
return np.ones_like(x) / float(x.size)
def argmax(random, z):
"""Returns argmax of flattened z with ties split randomly.
Args:
random: Random number generator, e.g., np.random.RandomState()
z: np.array
Returns:
integer representing index of argmax
"""
inds = np.arange(z.size)
random.shuffle(inds)
z_shuffled = z[inds]
ind_max = np.argmax(z_shuffled)
return inds[ind_max]
def pt_reduce(payoff_tensor, strats, remove_players):
"""Computes possible payoffs for remove_players with others' strats fixed.
This is equivalent to the Jacobian of the payoff w.r.t. remove_players:
sum_{a...z} A_k * x_1a * ... * x_nz for player k.
Args:
payoff_tensor: a single player k's payoff tensor, i.e.,
a num action x ... x num action (num player) np.array
strats: list of distributions over strategies for each player
remove_players: players to NOT sum over in expectation
Returns:
payoff tensor of shape: num_action x ... x num_action,
num_action for each player in remove_players
"""
result = np.copy(payoff_tensor)
result_dims = list(range(len(result.shape)))
other_player_idxs = list(result_dims)
for remove_player in remove_players:
other_player_idxs.remove(remove_player)
for other_player_idx in other_player_idxs:
new_result_dims = list(result_dims)
new_result_dims.remove(other_player_idx)
result = np.einsum(result, result_dims, strats[other_player_idx],
[other_player_idx], new_result_dims)
result_dims = new_result_dims
return result
def isnan(x):
"""Checks for NaN's in nested objects."""
if isinstance(x, float):
return np.isnan(x)
elif isinstance(x, int):
return np.isnan(x)
elif isinstance(x, np.ndarray):
return np.any(np.isnan(x))
elif isinstance(x, list):
return np.any([isnan(xi) for xi in x])
elif isinstance(x, tuple):
return np.any([isnan(xi) for xi in x])
elif isinstance(x, dict):
return np.any([isnan(xi) for xi in x.values()])
else:
typ = repr(type(x))
err_string = 'type(x)={:s} not recognized when checking for NaN'.format(typ)
raise NotImplementedError(err_string)
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/helpers/misc.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.adidas_utils.helpers.simplex."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
class SimplexTest(parameterized.TestCase):
@parameterized.named_parameters(
('inside', np.array([.25, .75]), np.array([.25, .75])),
('outside_1', np.ones(2), 0.5 * np.ones(2)),
('outside_2', np.array([2., 0.]), np.array([1., 0.])),
('outside_3', np.array([.25, .25]), np.array([.5, .5])),
)
def test_euclidean_projection(self, vector, expected_projection):
projection = simplex.euclidean_projection_onto_simplex(vector, subset=False)
self.assertListEqual(list(projection), list(expected_projection),
msg='projection not accurate')
@parameterized.named_parameters(
('orth', np.array([.75, .75]), np.array([.0, .0])),
('oblique', np.array([1., .5]), np.array([.25, -.25])),
('tangent', np.array([.25, .25, -.5]), np.array([.25, .25, -.5])),
)
def test_tangent_projection(self, vector, expected_projection):
projection = simplex.project_grad(vector)
self.assertListEqual(list(projection), list(expected_projection),
msg='projection not accurate')
@parameterized.named_parameters(
('orth_1', np.array([0.5, 0.5]), np.array([.75, .75]), 0.),
('orth_2', np.array([1., 0.]), np.array([.75, .75]), 0.),
('tangent_1', np.array([1., 0.]), np.array([-.5, .5]), 0.),
('tangent_2', np.array([1., 0.]), np.array([1., -1.]), np.sqrt(2)),
)
def test_grad_norm(self, dist, grad, expected_norm):
norm = simplex.grad_norm(dist, grad)
self.assertAlmostEqual(norm, expected_norm, msg='norm not accurate')
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/helpers/simplex_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/helpers/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Treatment of iterates and gradients over the simplex."""
from absl import logging # pylint:disable=unused-import
import numpy as np
def grad_norm(dist, grad, eps=1e-8, simplex_tol=1e-9):
"""Compute norm of gradient projected onto the tangent space of simplex.
*assumes context is gradient descent (not ascent)
Args:
dist: np.array, distribution
grad: np.array, gradient (same shape as distribution)
eps: float, elements of dist in [eps, 1 - eps] are considered to be in the
interior of the simplex. gradients on border of simplex
simplex_tol: float, tolerance for checking if a point lies on the simplex,
sum(vec) <= 1 + simplex_tol and all(vec > -simplex_tol). should be smaller
than eps descent steps or points that are "leaving" simplex will be
mislabeled
Returns:
float, norm of projected gradient
"""
if simplex_tol >= eps:
raise ValueError("simplex_tol should be less than eps")
grad_proj = project_grad(grad)
g_norm = np.linalg.norm(grad_proj)
if g_norm > 0:
# take a gradient descent step in the direction grad_proj with len eps
# to determine if the update is "leaving" the simplex
dist -= eps * grad_proj / g_norm
if not ((np.sum(dist) <= 1 + simplex_tol) and np.all(dist >= -simplex_tol)):
g_norm = 0.
return g_norm
def project_grad(g):
"""Project gradient onto tangent space of simplex."""
return g - g.sum() / g.size
# Project to probability simplex
# Based on this paper:
# Projection onto the probability simplex: An efficient algorithm with a
# simple proof, and an application
# https://arxiv.org/pdf/1309.1541.pdf
def euclidean_projection_onto_simplex(y, eps=1e-3, subset=True):
"""O(n log n) Euclidean projection of y onto the simplex.
Args:
y: np.array
eps: float, ensure x remains at least eps / dim away from facets of simplex
subset: bool, whether to project onto a subset of the simplex defined by eps
Returns:
np.array, y projected onto the simplex
"""
if np.all(y >= 0.) and np.abs(np.sum(y) - 1.) < 1e-8:
return y
d = len(y)
u = sorted(y, reverse=True)
sum_uj = 0.
for j in range(d):
sum_uj += u[j]
tj = (1. - sum_uj) / (j + 1.)
if u[j] + tj <= 0:
rho = j - 1
sum_uj = sum_uj - u[j]
break
else:
rho = j
lam = (1. - sum_uj) / (rho + 1.)
x = np.array([max(y[i] + lam, 0.) for i in range(d)])
if subset:
scale = 1. - eps * float(d + 1) / d
offset = eps / float(d)
x = scale * x + offset
x /= x.sum()
return x
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/helpers/simplex.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.adidas_utils.helpers.symmetric.utils."""
from absl import logging # pylint:disable=unused-import
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python.algorithms.adidas_utils.helpers.symmetric import utils
class UtilsTest(parameterized.TestCase):
def test_symmetrize_tensor(self, trials=100, seed=1234):
random = np.random.RandomState(seed)
successes = []
for _ in range(trials):
pt = random.rand(3, 2, 2, 2)
pt_sym_man = np.zeros_like(pt)
for p in range(3):
for i in range(2):
for j in range(2):
for k in range(2):
if p == 0:
# read: if player 0 plays i and its two opponents play j and k
# this should return the same payoff as when
# player 1 plays i and its two opponents play j and k
# player 2 plays i and its two opponents play j and k
# solution is to add up all these payoffs and replace with avg
pt_sym_man[p, i, j, k] = (pt[0, i, j, k] + pt[0, i, k, j] +
pt[1, j, i, k] + pt[1, k, i, j] +
pt[2, j, k, i] + pt[2, k, j, i]) / 6.
elif p == 1:
# same rationale, but with player 1 playing j
pt_sym_man[p, i, j, k] = (pt[0, j, i, k] + pt[0, j, k, i] +
pt[1, i, j, k] + pt[1, k, j, i] +
pt[2, i, k, j] + pt[2, k, i, j]) / 6.
else:
# same rationale, but with player 2 playing k
pt_sym_man[p, i, j, k] = (pt[0, k, i, j] + pt[0, k, j, i] +
pt[1, i, k, j] + pt[1, j, k, i] +
pt[2, i, j, k] + pt[2, j, i, k]) / 6.
pt_sym = utils.sym(pt)
successes += [np.allclose(pt_sym, pt_sym_man)]
perc = 100 * np.mean(successes)
logging.info('symmetrizing success rate out of %d is %f', trials, perc)
self.assertGreaterEqual(
perc, 100., 'symmetrizing failed')
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/helpers/symmetric/utils_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exploitability measurement utils."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from scipy import special
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.symmetric import exploitability
class Solver(object):
"""Generic Solver."""
def __init__(self, proj_grad=True, euclidean=False, rnd_init=False,
seed=None):
"""Ctor."""
self.num_players = None
self.proj_grad = proj_grad
self.rnd_init = rnd_init
self.lrs = (None, None, None)
self.has_aux = False
self.euclidean = euclidean
if euclidean:
self.update = self.euc_descent_step
else:
self.update = self.mirror_descent_step
self.seed = seed
self.random = np.random.RandomState(seed)
def init_vars(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_players = num_players
if self.rnd_init:
init_dist = self.random.rand(num_strats)
else:
init_dist = np.ones(num_strats)
init_dist /= init_dist.sum()
return (init_dist,)
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients for all parameters.
Args:
params: e.g., tuple of params (dist,)
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
eg., tuple of gradients (grad_dist,)
"""
raise NotImplementedError("Should be implemented by specific solver.")
def exploitability(self, params, payoff_matrices):
"""Compute and return exploitability that solver is minimizing.
Args:
params: e.g., tuple of params (dist,)
payoff_matrices: (>=2 x A x A) np.array, payoffs for each joint action
Returns:
float, exploitability of current dist
"""
return exploitability.unreg_exploitability(params, payoff_matrices)
def euc_descent_step(self, params, grads, t):
"""Projected gradient descent on exploitability using Euclidean projection.
Args:
params: tuple of variables to be updated (dist,)
grads: tuple of variable gradients (grad_dist,)
t: int, solver iteration
Returns:
new_params: tuple of update params (new_dist,)
"""
del t
new_params = params[0] - self.lrs[0] * grads[0]
new_params = simplex.euclidean_projection_onto_simplex(new_params)
return (new_params,)
def mirror_descent_step(self, params, grads, t):
"""Entropic mirror descent on exploitability.
Args:
params: tuple of variables to be updated (dist)
grads: tuple of variable gradients (grad_dist)
t: int, solver iteration
Returns:
new_params: tuple of update params (new_dist)
"""
del t
dist = np.clip(params[0], 0, np.inf)
return (special.softmax(np.log(dist) - self.lrs[0] * grads[0]),)
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/helpers/symmetric/updates.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/helpers/symmetric/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.adidas_utils.helpers.symmetric.exploitability.
Computing the exploitability of a tsallis-entropy regularized game is more
involved, so we include a derivation here of an example test case using the
prisoner's dilemma (see pd np.array below). Note that the tsallis-entropy
setting assumes non-negative payoffs so we add 3 to the array. We assume p=1
for the tsallis entropy in this example.
pd dist grad br payoff(br) payoff(dist)
[2 0] [.5] = [1] --> [1/3] --> 5/3 --> 3/2
[3 1] [.5] [2] [2/3]
s = sum(grad) = 3
tsallis-entr(br) = s / (p + 1) * (1 - br_1^2 - br_2^2)
= 3 / 2 * (1 - 1/9 - 4/9) = 2/3
tsallis-entr(dist) = s / (p + 1) * (1 - dist_1^2 - dist_2^2)
= 3 / 2 * (1 - 1/4 - 1/4) = 3/4
u(br) - u(dist) = 5/3 + 2/3 - 3/2 - 3/4 = 7 / 3 - 9 / 4
"""
from absl import logging # pylint:disable=unused-import
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python.algorithms.adidas_utils.helpers.symmetric import exploitability
test_seed = 12345
# prisoner's dilemma test case
# pylint:disable=bad-whitespace
pt_r = np.array([[-1, -3],
[0, -2]])
# pylint:enable=bad-whitespace
pt_r -= pt_r.min()
pt_c = pt_r.T
pd = np.stack((pt_r, pt_c), axis=0)
pd_nash = np.array([0, 1])
pd_non_nash_1 = np.array([1, 0])
pd_non_nash_exp_1 = 1.
pd_non_nash_ate_exp_1 = pd_non_nash_exp_1
pd_non_nash_2 = np.array([0.5, 0.5])
pd_non_nash_exp_2 = 0.5
pd_non_nash_ate_exp_2 = 7. / 3. - 9. / 4.
qre_br = np.exp([1, 2]) / np.exp([1, 2]).sum()
entr_br = -np.sum(qre_br * np.log(qre_br))
entr_non_nash_2 = -np.sum(pd_non_nash_2 * np.log(pd_non_nash_2))
u_br_minus_non_nash = (qre_br - pd_non_nash_2).dot([1, 2])
pd_non_nash_qre_exp_2 = u_br_minus_non_nash + (entr_br - entr_non_nash_2)
# rock-paper-scissors test case
# pylint:disable=bad-whitespace
pt_r = np.array([[0, -1, 1],
[1, 0, -1],
[-1, 1, 0]])
# pylint:enable=bad-whitespace
pt_r -= pt_r.min()
pt_c = pt_r.T
rps = np.stack((pt_r, pt_c), axis=0)
rps_nash = np.ones(3) / 3.
rps_non_nash_1 = np.array([1, 0, 0])
rps_non_nash_exp_1 = 1.
rps_non_nash_2 = np.array([0, 1, 0])
rps_non_nash_exp_2 = 1.
rps_non_nash_3 = np.array([0, 0, 1])
rps_non_nash_exp_3 = 1.
class ExploitabilityTest(parameterized.TestCase):
@parameterized.named_parameters(
('PD_nash', pd, pd_nash),
('RPS_nash', rps, rps_nash),
)
def test_unreg_exploitability_of_nash(self, payoff_tensor, nash):
# assumes symmetric games
exp = exploitability.unreg_exploitability(nash, payoff_tensor)
self.assertEqual(exp, 0., 'nash should have zero exploitability')
@parameterized.named_parameters(
('PD_non_nash_1', pd, pd_non_nash_1, pd_non_nash_exp_1),
('PD_non_nash_2', pd, pd_non_nash_2, pd_non_nash_exp_2),
('RPS_non_nash_1', rps, rps_non_nash_1, rps_non_nash_exp_1),
('RPS_non_nash_2', rps, rps_non_nash_2, rps_non_nash_exp_2),
('RPS_non_nash_3', rps, rps_non_nash_3, rps_non_nash_exp_3),
)
def test_unreg_exploitability_of_non_nash(self, payoff_tensor, dist, exp):
# assumes symmetric games
exp_pred = exploitability.unreg_exploitability(dist, payoff_tensor)
self.assertEqual(exp_pred, exp, 'dist should have the given exploitability')
@parameterized.named_parameters(
('PD_rand', pd, test_seed),
('RPS_rand', rps, test_seed),
)
def test_unreg_exploitability_of_rand(self, payoff_tensor, seed=None):
trials = 100
random = np.random.RandomState(seed)
num_strategies = payoff_tensor.shape[-1]
dists = random.rand(trials, num_strategies)
dists /= np.sum(dists, axis=1, keepdims=True)
exploitable = []
for dist in dists:
exp = exploitability.unreg_exploitability(dist, payoff_tensor)
exploitable.append(exp > 0.)
perc = 100 * np.mean(exploitable)
logging.info('rand strat exploitable rate out of %d is %f', trials, perc)
self.assertEqual(perc, 100., 'found rand strat that was nash')
@parameterized.named_parameters(
('RPS_nash_p=0', rps, rps_nash, 0.),
('RPS_nash_p=0.1', rps, rps_nash, 0.1),
('RPS_nash_p=1', rps, rps_nash, 1.),
)
def test_ate_exploitability_of_nash(self, payoff_tensor, nash, p):
# assumes symmetric games
exp = exploitability.ate_exploitability(nash, payoff_tensor, p)
self.assertGreaterEqual(0., exp,
'uniform nash should have zero exploitability')
@parameterized.named_parameters(
('PD_non_nash_p=0', pd, 0., pd_non_nash_1, pd_non_nash_exp_1),
('PD_non_nash_p=1', pd, 1., pd_non_nash_2, pd_non_nash_ate_exp_2),
)
def test_ate_exploitability_of_non_nash(self, payoff_tensor, p, dist, exp):
# assumes symmetric games
exp_pred = exploitability.ate_exploitability(dist, payoff_tensor, p)
self.assertAlmostEqual(exp_pred, exp,
msg='dist should have the given exploitability')
@parameterized.named_parameters(
('RPS_rand_p=0', rps, 0., test_seed),
('RPS_rand_p=0.1', rps, 0.1, test_seed),
('RPS_rand_p=1', rps, 1., test_seed),
)
def test_ate_exploitability_of_rand(self, payoff_tensor, p, seed=None):
trials = 100
random = np.random.RandomState(seed)
num_strategies = payoff_tensor.shape[-1]
dists = random.rand(trials, num_strategies)
dists /= np.sum(dists, axis=1, keepdims=True)
exploitable = []
for dist in dists:
exp = exploitability.ate_exploitability(dist, payoff_tensor, p)
exploitable.append(exp > 0.)
perc = 100 * np.mean(exploitable)
logging.info('rand strat exploitable rate out of %d is %f', trials, perc)
self.assertEqual(perc, 100., 'found rand strat that was nash')
@parameterized.named_parameters(
('RPS_nash_tau=0', rps, rps_nash, 0.),
('RPS_nash_tau=0.1', rps, rps_nash, 0.1),
('RPS_nash_tau=1', rps, rps_nash, 1.),
)
def test_qre_exploitability_of_nash(self, payoff_tensor, nash, temperature):
# assumes symmetric games
exp = exploitability.qre_exploitability(nash, payoff_tensor, temperature)
self.assertGreaterEqual(1e-10, exp,
'uniform nash should have zero exploitability')
@parameterized.named_parameters(
('PD_non_nash_tau=0', pd, 0., pd_non_nash_1, pd_non_nash_exp_1),
('PD_non_nash_tau=1', pd, 1., pd_non_nash_2, pd_non_nash_qre_exp_2),
)
def test_qre_exploitability_of_non_nash(self, payoff_tensor, temperature,
dist, exp):
# assumes symmetric games
exp_pred = exploitability.qre_exploitability(dist, payoff_tensor,
temperature)
self.assertAlmostEqual(exp_pred, exp,
msg='dist should have the given exploitability')
@parameterized.named_parameters(
('RPS_rand_tau=0', rps, 0., test_seed),
('RPS_rand_tau=0.1', rps, 0.1, test_seed),
('RPS_rand_tau=1', rps, 1., test_seed),
)
def test_qre_exploitability_of_rand(self, payoff_tensor, temperature,
seed=None):
trials = 100
random = np.random.RandomState(seed)
num_strategies = payoff_tensor.shape[-1]
dists = random.rand(trials, num_strategies)
dists /= np.sum(dists, axis=1, keepdims=True)
exploitable = []
for dist in dists:
exp = exploitability.qre_exploitability(dist, payoff_tensor, temperature)
exploitable.append(exp > 0.)
perc = 100 * np.mean(exploitable)
logging.info('rand strat exploitable rate out of %d is %f', trials, perc)
self.assertEqual(perc, 100., 'found rand strat that was nash')
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/helpers/symmetric/exploitability_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Symmetric payoff tensor utils."""
import itertools
import math
from absl import logging # pylint:disable=unused-import
import numpy as np
def sym(pt):
"""Symmetrize stack of payoff tensors (stacked along first dimension).
A payoff tensor can be `symmetrized' by averaging over all possible
permutations of the players. This means permuting the axes corresponding to
the player strategies as well as the payoffs assigned to the players. E.g.,
player A playing strategy 1 and player B playing strategy 3 is no different
from player A playing strategy 3 and player B playing strategy 1 in a
symmetric game. Note we permuted the strategies, but we must also permute the
payoffs.
Args:
pt: tensor of shape: (num_players,) + (num_strategies,) * num_players
Returns:
pt_sym: symmetrized payoff tensor of same shape
"""
num_players = len(pt.shape[1:])
num_perms = math.factorial(num_players)
pt_sym = np.zeros_like(pt)
logging.info('Symmetrizing over {:d} permutations...'.format(num_perms))
for i, perm_players in enumerate(itertools.permutations(range(num_players))):
if (i % (num_perms // 5)) == 0:
logging.info('\t{:d} / {:d}'.format(i, num_perms))
perm_axes = tuple([pi + 1 for pi in perm_players])
permuted_tensor = np.transpose(pt, (0,) + perm_axes)[list(perm_players)]
pt_sym += permuted_tensor / float(num_perms)
logging.info('\t{total:d} / {total:d}'.format(total=num_perms))
return pt_sym
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/helpers/symmetric/utils.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exploitability measurement utils for symmetric games."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from scipy import special
from open_spiel.python.algorithms.adidas_utils.helpers import misc
def unreg_exploitability(dist, payoff_tensor):
"""Compute exploitability of dist for symmetric game.
Args:
dist: 1-d np.array, current estimate of nash distribution
payoff_tensor: (>=1 x A x ... x A) np.array, payoffs for each joint action
Returns:
exploitability (float): payoff of best response - payoff of dist
"""
num_players = payoff_tensor.shape[0]
nabla = misc.pt_reduce(payoff_tensor[0], [dist] * num_players, [0])
u_br = np.max(nabla)
u_dist = nabla.dot(dist)
return u_br - u_dist
def ate_exploitability(dist, payoff_tensor, p=1):
"""Compute Tsallis regularized exploitability of dist for symmetric game.
Args:
dist: 1-d np.array, current estimate of nash distribution
payoff_tensor: (>=1 x A x ... x A) np.array, payoffs for each joint action
assumed to be non-negative
p: float in [0, 1], Tsallis entropy-regularization --> 0 as p --> 0
Returns:
exploitability (float): payoff of best response - payoff of dist
"""
if payoff_tensor.min() < 0.:
raise ValueError('payoff tensor must be non-negative')
num_players = payoff_tensor.shape[0]
nabla = misc.pt_reduce(payoff_tensor[0], [dist] * num_players, [0])
if p > 0:
power = 1./p
s = np.linalg.norm(nabla, ord=power)
br = (nabla / np.linalg.norm(nabla, ord=power))**power
else:
power = np.inf
s = np.linalg.norm(nabla, ord=power)
br = np.zeros_like(dist)
maxima = (nabla == s)
br[maxima] = 1. / maxima.sum()
u_br = nabla.dot(br) + s / (p + 1) * (1 - np.sum(br**(p + 1)))
u_dist = nabla.dot(dist) + s / (p + 1) * (1 - np.sum(dist**(p + 1)))
return u_br - u_dist
def qre_exploitability(dist, payoff_tensor, temperature=0.):
"""Compute Shannon regularized exploitability of dist for symmetric game.
Args:
dist: 1-d np.array, current estimate of nash distribution
payoff_tensor: (>=1 x A x ... x A) np.array, payoffs for each joint action
assumed to be non-negative
temperature: non-negative float
Returns:
exploitability (float): payoff of best response - payoff of dist
"""
num_players = payoff_tensor.shape[0]
nabla = misc.pt_reduce(payoff_tensor[0], [dist] * num_players, [0])
if temperature > 0:
br = special.softmax(nabla / temperature)
else:
br = np.zeros_like(dist)
maxima = (nabla == np.max(nabla))
br[maxima] = 1. / maxima.sum()
u_br = nabla.dot(br) + temperature * special.entr(br).sum()
u_dist = nabla.dot(dist) + temperature * special.entr(dist).sum()
return u_br - u_dist
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/helpers/symmetric/exploitability.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for computing gradient information: run games and record payoffs.
"""
from absl import logging # pylint:disable=unused-import
import numpy as np
def construct_game_queries(base_profile, num_checkpts):
"""Constructs a list of checkpoint selection tuples to query value function.
Each query tuple (p1's selected checkpt, ..., p7's selected checkpt)
fixes the players in the game of diplomacy to be played. It may be necessary
to play several games with the same players to form an accurate estimate of
the value or payoff for each player as checkpts contain stochastic policies.
Args:
base_profile: list of selected checkpts for each player, i.e.,
a sample from the player strategy profile ([x_i ~ p(x_i)])
num_checkpts: number of checkpts available to each player
Returns:
Set of query tuples containing a selected checkpoint index for each player.
"""
new_queries = set([])
pi, pj = 0, 1
new_profile = list(base_profile)
for ai in range(num_checkpts):
new_profile[pi] = ai
for aj in range(num_checkpts):
new_profile[pj] = aj
query = tuple(new_profile)
new_queries.update([query])
return new_queries
def construct_game_queries_for_exp(base_profile, num_checkpts):
"""Constructs a list of checkpoint selection tuples to query value function.
Each query tuple (p1's selected checkpt, ..., p7's selected checkpt)
fixes the players in the game of diplomacy to be played. It may be necessary
to play several games with the same players to form an accurate estimate of
the value or payoff for each player as checkpts contain stochastic policies.
Args:
base_profile: list of selected checkpts for each player, i.e.,
a sample from the player strategy profile ([x_i ~ p(x_i)])
num_checkpts: number of checkpts available to each player
Returns:
Set of query tuples containing a selected checkpoint index for each player.
"""
new_queries = set([])
pi = 0
new_profile = list(base_profile)
for ai in range(num_checkpts):
new_profile[pi] = ai
query = tuple(new_profile)
new_queries.update([query])
return new_queries
def run_games_and_record_payoffs(game_queries, evaluate_game, ckpt_to_policy):
"""Simulate games according to game queries and return results.
Args:
game_queries: set of tuples containing indices specifying each players strat
evaluate_game: callable function that takes a list of policies as argument
ckpt_to_policy: maps a strat (or checkpoint) to a policy
Returns:
dictionary: key=query, value=np.array of payoffs (1 for each player)
"""
game_results = {}
for query in game_queries:
policies = [ckpt_to_policy[ckpt] for ckpt in query]
payoffs = evaluate_game(policies)
game_results.update({query: payoffs})
return game_results
def form_payoff_matrices(game_results, num_checkpts):
"""Packages dictionary of game results into a payoff tensor.
Args:
game_results: dictionary of payoffs for each game evaluated
num_checkpts: int, number of strats (or ckpts) per player
Returns:
payoff_matrices: np.array (2 x num_checkpts x num_checkpts) with payoffs for
two players (assumes symmetric game and only info for 2 players is needed
for stochastic gradients)
"""
payoff_matrices = np.zeros((2, num_checkpts, num_checkpts))
for profile, payoffs in game_results.items():
i, j = profile[:2]
payoff_matrices[:, i, j] = payoffs[:2]
return payoff_matrices
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/helpers/symmetric/game_runner.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic solver for non-symmetric games."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from scipy import special
from open_spiel.python.algorithms.adidas_utils.helpers import simplex
from open_spiel.python.algorithms.adidas_utils.helpers.nonsymmetric import exploitability
class Solver(object):
"""Generic Solver."""
def __init__(self, proj_grad=True, euclidean=False, rnd_init=False,
seed=None):
"""Ctor."""
self.num_players = None
self.proj_grad = proj_grad
self.rnd_init = rnd_init
self.lrs = (None,)
self.has_aux = False
self.euclidean = euclidean
if euclidean:
self.update = self.euc_descent_step
else:
self.update = self.mirror_descent_step
self.seed = seed
self.random = np.random.RandomState(seed)
def init_vars(self, num_strats, num_players):
"""Initialize solver parameters."""
self.num_players = num_players
if len(num_strats) != num_players:
raise ValueError('Must specify num strategies for each player')
init_dist = []
for num_strats_i in num_strats:
if self.rnd_init:
init_dist_i = self.random.rand(num_strats_i)
else:
init_dist_i = np.ones(num_strats_i)
init_dist_i /= init_dist_i.sum()
init_dist.append(init_dist_i)
return (init_dist,)
def compute_gradients(self, params, payoff_matrices):
"""Compute and return gradients for all parameters.
Args:
params: e.g., tuple of params (dist,)
payoff_matrices: dictionary with keys as tuples of agents (i, j) and
values of (2 x A x A) np.arrays, payoffs for each joint action. keys
are sorted and arrays should be indexed in the same order
Returns:
eg., tuple of gradients (grad_dist,)
"""
raise NotImplementedError('Should be implemented by specific solver.')
def exploitability(self, params, payoff_tensor):
"""Compute and return exploitability that solver is minimizing.
Args:
params: e.g., tuple of params (dist,)
payoff_tensor: (n x A1 x ... x An) np.array, payoffs for each joint
action. can also be list of (A1 x ... x An) np.arrays
Returns:
float, exploitability of current dist
"""
return exploitability.unreg_exploitability(params, payoff_tensor)
def euc_descent_step(self, params, grads, t):
"""Projected gradient descent on exploitability using Euclidean projection.
Args:
params: tuple of variables to be updated (dist,)
grads: tuple of variable gradients (grad_dist,)
t: int, solver iteration (unused)
Returns:
new_params: tuple of update params (new_dist,)
"""
del t
lr_dist = self.lrs[0]
new_params = []
for dist_i, dist_grad_i in zip(params[0], grads[0]):
new_dist_i = dist_i - lr_dist * dist_grad_i
new_dist_i = simplex.euclidean_projection_onto_simplex(new_dist_i)
new_params.append(new_dist_i)
return (new_params,)
def mirror_descent_step(self, params, grads, t):
"""Entropic mirror descent on exploitability.
Args:
params: tuple of variables to be updated (dist - a list of np.arrays)
grads: tuple of variable gradients (grad_dist - a list of np.arrays)
t: int, solver iteration (unused)
Returns:
new_params: tuple of update params (new_dist)
"""
del t
lr_dist = self.lrs[0]
new_params = []
for dist_i, dist_grad_i in zip(params[0], grads[0]):
new_dist_i = np.clip(dist_i, 0, np.inf)
new_dist_i = special.softmax(np.log(new_dist_i) - lr_dist * dist_grad_i)
new_params.append(new_dist_i)
return (new_params,)
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/helpers/nonsymmetric/updates.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/helpers/nonsymmetric/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.adidas_utils.helpers.nonsymmetric.exploitability.
Computing the exploitability of a tsallis-entropy regularized game is more
involved, so we include a derivation here of an example test case using an
asymmetric prisoner's dilemma (see pd np.array below). Note that the
tsallis-entropy setting assumes non-negative payoffs so we add 3 to the array.
We assume p=1 for the tsallis entropy in this example.
dist = [(1/3, 2/3), (1/2, 1/2)]
-- Player 1 --
pt dist grad br payoff(br) payoff(dist)
[2 0] [1/2] = [1] --> [1/3] --> 5/3 --> 5/3
[3 1] [1/2] [2] [2/3]
s = sum(grad) = 3
tsallis-entr(br) = s / (p + 1) * (1 - br_1^2 - br_2^2)
= 3 / 2 * (1 - 1/9 - 4/9) = 2/3
tsallis-entr(dist) = s / (p + 1) * (1 - dist_1^2 - dist_2^2)
= 3 / 2 * (1 - 1/9 - 4/9) = 2/3
u_1(br_1) - u_1(dist) = 5/3 + 2/3 - 5/3 - 2/3 = 0
-- Player 2 --
pt dist grad br payoff(br) payoff(dist)
[3 0] [1/3] = [1] --> [1/3] --> 5/3 --> 3/2
[4 1] [2/3] [2] [2/3]
s = sum(grad) = 3
tsallis-entr(br) = s / (p + 1) * (1 - br_1^2 - br_2^2)
= 3 / 2 * (1 - 1/9 - 4/9) = 2/3
tsallis-entr(dist) = s / (p + 1) * (1 - dist_1^2 - dist_2^2)
= 3 / 2 * (1 - 1/4 - 1/4) = 3/4
u_2(br_2) - u_2(dist) = 5/3 + 2/3 - 3/2 - 3/4 = 7 / 3 - 9 / 4
"""
from absl import logging # pylint:disable=unused-import
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python.algorithms.adidas_utils.helpers.nonsymmetric import exploitability
test_seed = 12345
# asymmetric prisoner's dilemma test case
# pylint:disable=bad-whitespace
pt_r = np.array([[2, 0],
[3, 1]])
pt_c = np.array([[3, 4],
[0, 1]])
# pylint:enable=bad-whitespace
pd = np.stack((pt_r, pt_c), axis=0)
pd_nash = [np.array([0, 1]), np.array([0, 1])]
pd_non_nash_1 = [np.array([1, 0]), np.array([1, 0])]
pd_non_nash_exp_1 = np.array([1., 1.])
pd_non_nash_ate_exp_1 = np.array([9. / 5., 16. / 7.])
pd_non_nash_2 = [np.array([1., 2.]) / 3., np.array([0.5, 0.5])]
pd_non_nash_exp_2 = np.array([1. / 3., 0.5])
pd_non_nash_ate_exp_2 = np.array([0., 7. / 3. - 9. / 4.])
qre_br_1 = np.exp([1, 2]) / np.exp([1, 2]).sum()
qre_br_2 = np.copy(qre_br_1)
entr_br_1 = -np.sum(qre_br_1 * np.log(qre_br_1))
entr_br_2 = -np.sum(qre_br_2 * np.log(qre_br_2))
entr_non_nash_2_1 = -np.sum(pd_non_nash_2[0] * np.log(pd_non_nash_2[0]))
entr_non_nash_2_2 = -np.sum(pd_non_nash_2[1] * np.log(pd_non_nash_2[1]))
u_br_minus_non_nash_1 = (qre_br_1 - pd_non_nash_2[0]).dot([1, 2])
u_br_minus_non_nash_2 = (qre_br_2 - pd_non_nash_2[1]).dot([1, 2])
pd_non_nash_qre_exp_2_1 = u_br_minus_non_nash_1 + entr_br_1 - entr_non_nash_2_1
pd_non_nash_qre_exp_2_2 = u_br_minus_non_nash_2 + entr_br_2 - entr_non_nash_2_2
pd_non_nash_qre_exp_2 = np.array([pd_non_nash_qre_exp_2_1,
pd_non_nash_qre_exp_2_2])
# rock-paper-scissors test case (nonsymmetric should work for symmetric as well)
# pylint:disable=bad-whitespace
pt_r = np.array([[0, -1, 1],
[1, 0, -1],
[-1, 1, 0]])
# pylint:enable=bad-whitespace
pt_r -= pt_r.min()
pt_c = pt_r.T
rps = np.stack((pt_r, pt_c), axis=0)
rps_nash = [np.ones(3) / 3., np.ones(3) / 3.]
rps_non_nash_1 = [np.array([1, 0, 0]), np.array([1, 0, 0])]
rps_non_nash_exp_1 = np.array([1., 1.])
rps_non_nash_2 = [np.array([0, 1, 0]), np.array([0, 1, 0])]
rps_non_nash_exp_2 = np.array([1., 1.])
rps_non_nash_3 = [np.array([0, 0, 1]), np.array([0, 0, 1])]
rps_non_nash_exp_3 = np.array([1., 1.])
# two-player game with different numbers of actions
# pylint:disable=bad-whitespace
pt_r = np.array([[2, 2],
[3, 0],
[0, 3]])
pt_c = np.array([[2, 1, 0],
[3, 0, 1]]).T
# pylint:enable=bad-whitespace
rect = [pt_r, pt_c]
rect_unreg_nash = [np.array([0, 1, 0]), np.array([1, 0])]
rect_unreg_nash_ate_exp = np.array([4. / 5., 0.])
qre_br_1 = np.exp([2, 3, 0]) / np.exp([2, 3, 0]).sum()
qre_br_2 = np.exp([1, 0]) / np.exp([1, 0]).sum()
entr_br_1 = -np.sum(qre_br_1 * np.log(qre_br_1))
entr_br_2 = -np.sum(qre_br_2 * np.log(qre_br_2))
entr_non_nash_2_1 = 0.
entr_non_nash_2_2 = 0.
u_br_minus_dist_1 = (qre_br_1 - rect_unreg_nash[0]).dot([2, 3, 0])
u_br_minus_dist_2 = (qre_br_2 - rect_unreg_nash[1]).dot([1, 0])
rect_qre_exp_1 = u_br_minus_dist_1 + entr_br_1 - entr_non_nash_2_1
rect_qre_exp_2 = u_br_minus_dist_2 + entr_br_2 - entr_non_nash_2_2
rect_unreg_nash_qre_exp = np.array([rect_qre_exp_1, rect_qre_exp_2])
class ExploitabilityTest(parameterized.TestCase):
@parameterized.named_parameters(
('PD_nash', pd, pd_nash),
('RPS_nash', rps, rps_nash),
('RECT_nash', rect, rect_unreg_nash),
)
def test_unreg_exploitability_of_nash(self, payoff_tensor, nash):
exp = exploitability.unreg_exploitability(nash, payoff_tensor, np.max)
self.assertEqual(exp, 0., 'nash should have zero exploitability')
@parameterized.named_parameters(
('PD_non_nash_1', pd, pd_non_nash_1, pd_non_nash_exp_1),
('PD_non_nash_2', pd, pd_non_nash_2, pd_non_nash_exp_2),
('RPS_non_nash_1', rps, rps_non_nash_1, rps_non_nash_exp_1),
('RPS_non_nash_2', rps, rps_non_nash_2, rps_non_nash_exp_2),
('RPS_non_nash_3', rps, rps_non_nash_3, rps_non_nash_exp_3),
)
def test_unreg_exploitability_of_non_nash(self, payoff_tensor, dist, exp):
no_op = lambda x: x
exp_pred = exploitability.unreg_exploitability(dist, payoff_tensor, no_op)
equal = np.allclose(exp_pred, exp)
msg = 'exploitability mismatch: pred={}, true={}'.format(exp_pred, exp)
self.assertTrue(equal, msg)
@parameterized.named_parameters(
('PD_rand', pd, test_seed),
('RPS_rand', rps, test_seed),
('RECT_rand', rect, test_seed),
)
def test_unreg_exploitability_of_rand(self, payoff_tensor, seed=None):
trials = 100
random = np.random.RandomState(seed)
num_strategies = payoff_tensor[0].shape
total_num_strategies = sum(num_strategies)
pseudo_dists = random.rand(trials, total_num_strategies)
exploitable = []
for pseudo_dist in pseudo_dists:
# first split and normalize pseudo_dist into strat for each player
pseudo_dist_i = np.split(pseudo_dist, np.cumsum(num_strategies)[:-1])
dist = [pdi / pdi.sum() for pdi in pseudo_dist_i]
exp = exploitability.unreg_exploitability(dist, payoff_tensor, np.max)
exploitable.append(exp > 0.)
perc = 100 * np.mean(exploitable)
logging.info('rand strat exploitable rate out of %d is %f', trials, perc)
self.assertEqual(perc, 100., 'found rand strat that was nash')
@parameterized.named_parameters(
('RPS_nash_p=0', rps, rps_nash, 0.),
('RPS_nash_p=0.1', rps, rps_nash, 0.1),
('RPS_nash_p=1', rps, rps_nash, 1.),
)
def test_ate_exploitability_of_nash(self, payoff_tensor, nash, p):
exp = exploitability.ate_exploitability(nash, payoff_tensor, p, np.max)
self.assertGreaterEqual(0., exp,
'uniform nash should have zero exploitability')
@parameterized.named_parameters(
('PD_non_nash_p=0', pd, 0., pd_non_nash_1, pd_non_nash_exp_1),
('PD_non_nash_p=1', pd, 1., pd_non_nash_2, pd_non_nash_ate_exp_2),
('RECT_non_nash_p=0', rect, 1., rect_unreg_nash, rect_unreg_nash_ate_exp),
)
def test_ate_exploitability_of_non_nash(self, payoff_tensor, p, dist, exp):
no_op = lambda x: x
exp_pred = exploitability.ate_exploitability(dist, payoff_tensor, p, no_op)
close = np.allclose(exp_pred, exp)
msg = 'exploitability mismatch: pred={}, true={}'.format(exp_pred, exp)
self.assertTrue(close, msg=msg)
@parameterized.named_parameters(
('RPS_rand_p=0', rps, 0., test_seed),
('RPS_rand_p=0.1', rps, 0.1, test_seed),
('RPS_rand_p=1', rps, 1., test_seed),
('RECT_rand_p=1', rect, 1., test_seed),
)
def test_ate_exploitability_of_rand(self, payoff_tensor, p, seed=None):
trials = 100
random = np.random.RandomState(seed)
num_strategies = payoff_tensor[0].shape
total_num_strategies = sum(num_strategies)
pseudo_dists = random.rand(trials, total_num_strategies)
exploitable = []
for pseudo_dist in pseudo_dists:
# first split and normalize pseudo_dist into strat for each player
pseudo_dist_i = np.split(pseudo_dist, np.cumsum(num_strategies)[:-1])
dist = [pdi / pdi.sum() for pdi in pseudo_dist_i]
exp = exploitability.ate_exploitability(dist, payoff_tensor, p, np.max)
exploitable.append(exp > 0.)
perc = 100 * np.mean(exploitable)
logging.info('rand strat exploitable rate out of %d is %f', trials, perc)
self.assertEqual(perc, 100., 'found rand strat that was nash')
@parameterized.named_parameters(
('RPS_nash_tau=0', rps, rps_nash, 0.),
('RPS_nash_tau=0.1', rps, rps_nash, 0.1),
('RPS_nash_tau=1', rps, rps_nash, 1.),
)
def test_qre_exploitability_of_nash(self, payoff_tensor, nash, temperature):
exp = exploitability.qre_exploitability(nash, payoff_tensor, temperature,
np.max)
self.assertGreaterEqual(1e-10, exp,
'uniform nash should have zero exploitability')
@parameterized.named_parameters(
('PD_non_nash_tau=0', pd, 0., pd_non_nash_1, pd_non_nash_exp_1),
('PD_non_nash_tau=1', pd, 1., pd_non_nash_2, pd_non_nash_qre_exp_2),
('RECT_non_nash_tau=1', rect, 1., rect_unreg_nash,
rect_unreg_nash_qre_exp),
)
def test_qre_exploitability_of_non_nash(self, payoff_tensor, temperature,
dist, exp):
no_op = lambda x: x
exp_pred = exploitability.qre_exploitability(dist, payoff_tensor,
temperature, no_op)
close = np.allclose(exp_pred, exp)
msg = 'exploitability mismatch: pred={}, true={}'.format(exp_pred, exp)
self.assertTrue(close, msg=msg)
@parameterized.named_parameters(
('RPS_rand_tau=0', rps, 0., test_seed),
('RPS_rand_tau=0.1', rps, 0.1, test_seed),
('RPS_rand_tau=1', rps, 1., test_seed),
('RECT_rand_tau=1', rect, 1., test_seed),
)
def test_qre_exploitability_of_rand(self, payoff_tensor, temperature,
seed=None):
trials = 100
random = np.random.RandomState(seed)
num_strategies = payoff_tensor[0].shape
total_num_strategies = sum(num_strategies)
pseudo_dists = random.rand(trials, total_num_strategies)
exploitable = []
for pseudo_dist in pseudo_dists:
# first split and normalize pseudo_dist into strat for each player
pseudo_dist_i = np.split(pseudo_dist, np.cumsum(num_strategies)[:-1])
dist = [pdi / pdi.sum() for pdi in pseudo_dist_i]
exp = exploitability.qre_exploitability(dist, payoff_tensor, temperature,
np.max)
exploitable.append(exp > 0.)
perc = 100 * np.mean(exploitable)
logging.info('rand strat exploitable rate out of %d is %f', trials, perc)
self.assertEqual(perc, 100., 'found rand strat that was nash')
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/helpers/nonsymmetric/exploitability_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exploitability measurement utils for general (sym and non-sym) games."""
from absl import logging # pylint:disable=unused-import
import numpy as np
from scipy import special
from open_spiel.python.algorithms.adidas_utils.helpers import misc
def unreg_exploitability(dist, payoff_tensor, aggregate=np.mean):
"""Compute (avg, max) exploitability of dist for non-symmetric game.
Args:
dist: list of 1-d np.arrays, current estimate of nash distribution
payoff_tensor: (n x A1 x ... x An) np.array, payoffs for each joint action
can also be list of (A1 x ... x An) np.arrays
aggregate: function to reduce individual exp_is to scalar, e.g., mean or max
Returns:
exploitability (float): avg_i payoff_i of best response_i - payoff_i of dist
"""
num_players = len(payoff_tensor)
exp_i = []
for i in range(num_players):
nabla_i = misc.pt_reduce(payoff_tensor[i], dist, [i])
u_i_br = np.max(nabla_i)
u_i_dist = nabla_i.dot(dist[i])
exp_i.append(u_i_br - u_i_dist)
return aggregate(exp_i)
def ate_exploitability(dist, payoff_tensor, p=1, aggregate=np.mean):
"""Compute Tsallis regularized exploitability of dist for non-symmetric game.
Args:
dist: list of 1-d np.arrays, current estimate of nash distribution
payoff_tensor: (n x A1 x ... x An) np.array, payoffs for each joint action
assumed to be non-negative. can also be list of (A1 x ... x An) np.arrays
p: float in [0, 1], Tsallis entropy-regularization --> 0 as p --> 0
aggregate: function to reduce individual exp_is to scalar, e.g., mean or max
Returns:
exploitability (float): avg_i payoff_i of best response_i - payoff_i of dist
"""
if np.min(payoff_tensor) < 0.:
raise ValueError('payoff tensor must be non-negative')
num_players = len(payoff_tensor)
exp_i = []
for i in range(num_players):
nabla_i = misc.pt_reduce(payoff_tensor[i], dist, [i])
dist_i = dist[i]
if p > 0:
power = 1./p
s = np.linalg.norm(nabla_i, ord=power)
br_i = (nabla_i / np.linalg.norm(nabla_i, ord=power))**power
else:
power = np.inf
s = np.linalg.norm(nabla_i, ord=power)
br_i = np.zeros_like(dist_i)
maxima = (nabla_i == s)
br_i[maxima] = 1. / maxima.sum()
u_i_br = nabla_i.dot(br_i) + s / (p + 1) * (1 - np.sum(br_i**(p + 1)))
u_i_dist = nabla_i.dot(dist_i) + s / (p + 1) * (1 - np.sum(dist_i**(p + 1)))
exp_i.append(u_i_br - u_i_dist)
return aggregate(exp_i)
def qre_exploitability(dist, payoff_tensor, temperature=0., aggregate=np.mean):
"""Compute Shannon regularized exploitability of dist for non-symmetric game.
Args:
dist: list of 1-d np.arrays, current estimate of nash distribution
payoff_tensor: (n x A1 x ... x An) np.array, payoffs for each joint action
assumed to be non-negative. can also be list of (A1 x ... x An) np.arrays
temperature: non-negative float
aggregate: function to reduce individual exp_is to scalar, e.g., mean or max
Returns:
exploitability (float): avg_i payoff_i of best response_i - payoff_i of dist
"""
num_players = len(payoff_tensor)
exp_i = []
for i in range(num_players):
nabla_i = misc.pt_reduce(payoff_tensor[i], dist, [i])
dist_i = dist[i]
if temperature > 0:
br_i = special.softmax(nabla_i / temperature)
else:
br_i = np.zeros_like(dist_i)
maxima = (nabla_i == np.max(nabla_i))
br_i[maxima] = 1. / maxima.sum()
u_i_br = nabla_i.dot(br_i) + temperature * special.entr(br_i).sum()
u_i_dist = nabla_i.dot(dist_i) + temperature * special.entr(dist_i).sum()
exp_i.append(u_i_br - u_i_dist)
return aggregate(exp_i)
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/helpers/nonsymmetric/exploitability.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for computing gradient information: run games and record payoffs.
"""
import itertools
from absl import logging # pylint:disable=unused-import
import numpy as np
def construct_game_queries(base_profile, num_checkpts):
"""Constructs a list of checkpoint selection tuples to query value function.
Each query tuple (key, query) where key = (pi, pj) and query is
(p1's selected checkpt, ..., p7's selected checkpt) fixes the players in the
game of diplomacy to be played. It may be necessary to play several games with
the same players to form an accurate estimate of the value or payoff for each
player as checkpts contain stochastic policies.
Args:
base_profile: list of selected checkpts for each player, i.e.,
a sample from the player strategy profile ([x_i ~ p(x_i)])
num_checkpts: list of ints, number of strats (or ckpts) per player
Returns:
Set of query tuples containing a selected checkpoint index for each player.
"""
new_queries = set([])
num_players = len(base_profile)
for pi, pj in itertools.combinations(range(num_players), 2):
new_profile = list(base_profile)
for ai in range(num_checkpts[pi]):
new_profile[pi] = ai
for aj in range(num_checkpts[pj]):
new_profile[pj] = aj
query = tuple(new_profile)
pair = (pi, pj)
new_queries.update([(pair, query)])
return new_queries
def construct_game_queries_for_exp(base_profile, num_checkpts):
"""Constructs a list of checkpoint selection tuples to query value function.
Each query tuple (key, query) where key = (pi,) and query is
(p1's selected checkpt, ..., p7's selected checkpt) fixes the players in the
game of diplomacy to be played. It may be necessary to play several games with
the same players to form an accurate estimate of the value or payoff for each
player as checkpts contain stochastic policies.
Args:
base_profile: list of selected checkpts for each player, i.e.,
a sample from the player strategy profile ([x_i ~ p(x_i)])
num_checkpts: list of ints, number of strats (or ckpts) per player
Returns:
Set of query tuples containing a selected checkpoint index for each player.
"""
new_queries = set([])
num_players = len(base_profile)
for pi in range(num_players):
new_profile = list(base_profile)
for ai in range(num_checkpts[pi]):
new_profile[pi] = ai
query = tuple(new_profile)
new_queries.update([(pi, query)])
return new_queries
def run_games_and_record_payoffs(game_queries, evaluate_game, ckpt_to_policy):
"""Simulate games according to game queries and return results.
Args:
game_queries: set of tuples containing indices specifying each players strat
key_query = (agent_tuple, profile_tuple) format
evaluate_game: callable function that takes a list of policies as argument
ckpt_to_policy: list of maps from strat (or checkpoint) to a policy, one
map for each player
Returns:
dictionary: key=key_query, value=np.array of payoffs (1 for each player)
"""
game_results = {}
for key_query in game_queries:
_, query = key_query
policies = [ckpt_to_policy[pi][ckpt_i] for pi, ckpt_i in enumerate(query)]
payoffs = evaluate_game(policies)
game_results.update({key_query: payoffs})
return game_results
def form_payoff_matrices(game_results, num_checkpts):
"""Packages dictionary of game results into a payoff tensor.
Args:
game_results: dictionary of payoffs for each game evaluated, keys are
(pair, profile) where pair is a tuple of the two agents played against
each other and profile indicates pure joint action played by all agents
num_checkpts: list of ints, number of strats (or ckpts) per player
Returns:
payoff_matrices: dict of np.arrays (2 x num_checkpts x num_checkpts) with
payoffs for two players. keys are pairs above with lowest index agent
first
"""
num_players = len(num_checkpts)
payoff_matrices = {}
for pi, pj in itertools.combinations(range(num_players), 2):
key = (pi, pj)
payoff_matrices[key] = np.zeros((2, num_checkpts[pi], num_checkpts[pj]))
for key_profile, payoffs in game_results.items():
key, profile = key_profile
i, j = key
ai = profile[i]
aj = profile[j]
payoff_matrices[key][0, ai, aj] = payoffs[i]
payoff_matrices[key][1, ai, aj] = payoffs[j]
return payoff_matrices
| open_spiel-master | open_spiel/python/algorithms/adidas_utils/helpers/nonsymmetric/game_runner.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export the model's Tensorflow graph as a protobuf."""
from absl import app
from absl import flags
from open_spiel.python.algorithms.alpha_zero import model as model_lib
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("game", None, "Name of the game")
flags.DEFINE_string("path", None, "Directory to save graph")
flags.DEFINE_string("graph_def", None, "Filename for the graph")
flags.DEFINE_enum("nn_model", "resnet", model_lib.Model.valid_model_types,
"What type of model should be used?.")
flags.DEFINE_integer("nn_width", 2 ** 7, "How wide should the network be.")
flags.DEFINE_integer("nn_depth", 10, "How deep should the network be.")
flags.DEFINE_float("learning_rate", 0.0001, "Learning rate used for training")
flags.DEFINE_float("weight_decay", 0.0001, "L2 regularization strength.")
flags.DEFINE_bool("verbose", False, "Print information about the model.")
flags.mark_flag_as_required("game")
flags.mark_flag_as_required("path")
flags.mark_flag_as_required("graph_def")
def main(_):
game = pyspiel.load_game(FLAGS.game)
model = model_lib.Model.build_model(
FLAGS.nn_model, game.observation_tensor_shape(),
game.num_distinct_actions(), FLAGS.nn_width, FLAGS.nn_depth,
FLAGS.weight_decay, FLAGS.learning_rate, FLAGS.path)
model.write_graph(FLAGS.graph_def)
if FLAGS.verbose:
print("Game:", FLAGS.game)
print("Model type: %s(%s, %s)" % (FLAGS.nn_model, FLAGS.nn_width,
FLAGS.nn_depth))
print("Model size:", model.num_trainable_variables, "variables")
print("Variables:")
model.print_trainable_variables()
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/algorithms/alpha_zero/export_model.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.alpha_zero.model."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python.algorithms.alpha_zero import model as model_lib
import pyspiel
solved = {}
def solve_game(state):
state_str = str(state)
if state_str in solved:
return solved[state_str].value
if state.is_terminal():
return state.returns()[0]
max_player = state.current_player() == 0
obs = state.observation_tensor()
act_mask = np.array(state.legal_actions_mask())
values = np.full(act_mask.shape, -2 if max_player else 2)
for action in state.legal_actions():
values[action] = solve_game(state.child(action))
value = values.max() if max_player else values.min()
best_actions = np.where((values == value) & act_mask)
policy = np.zeros_like(act_mask)
policy[best_actions[0][0]] = 1 # Choose the first for a deterministic policy.
solved[state_str] = model_lib.TrainInput(obs, act_mask, policy, value)
return value
def build_model(game, model_type):
return model_lib.Model.build_model(
model_type, game.observation_tensor_shape(), game.num_distinct_actions(),
nn_width=32, nn_depth=2, weight_decay=1e-4, learning_rate=0.01, path=None)
class ModelTest(parameterized.TestCase):
@parameterized.parameters(model_lib.Model.valid_model_types)
def test_model_learns_simple(self, model_type):
game = pyspiel.load_game("tic_tac_toe")
model = build_model(game, model_type)
print("Num variables:", model.num_trainable_variables)
model.print_trainable_variables()
train_inputs = []
state = game.new_initial_state()
while not state.is_terminal():
obs = state.observation_tensor()
act_mask = state.legal_actions_mask()
action = state.legal_actions()[0]
policy = np.zeros(len(act_mask), dtype=float)
policy[action] = 1
train_inputs.append(model_lib.TrainInput(obs, act_mask, policy, value=1))
state.apply_action(action)
value, policy = model.inference([obs], [act_mask])
self.assertLen(policy, 1)
self.assertLen(value, 1)
self.assertLen(policy[0], game.num_distinct_actions())
self.assertLen(value[0], 1)
losses = []
policy_loss_goal = 0.05
value_loss_goal = 0.05
for i in range(200):
loss = model.update(train_inputs)
print(i, loss)
losses.append(loss)
if loss.policy < policy_loss_goal and loss.value < value_loss_goal:
break
self.assertGreater(losses[0].total, losses[-1].total)
self.assertGreater(losses[0].policy, losses[-1].policy)
self.assertGreater(losses[0].value, losses[-1].value)
self.assertLess(losses[-1].value, value_loss_goal)
self.assertLess(losses[-1].policy, policy_loss_goal)
@parameterized.parameters(model_lib.Model.valid_model_types)
def test_model_learns_optimal(self, model_type):
game = pyspiel.load_game("tic_tac_toe")
solve_game(game.new_initial_state())
model = build_model(game, model_type)
print("Num variables:", model.num_trainable_variables)
model.print_trainable_variables()
train_inputs = list(solved.values())
print("states:", len(train_inputs))
losses = []
policy_loss_goal = 0.12
value_loss_goal = 0.12
for i in range(500):
loss = model.update(train_inputs)
print(i, loss)
losses.append(loss)
if loss.policy < policy_loss_goal and loss.value < value_loss_goal:
break
self.assertGreater(losses[0].policy, losses[-1].policy)
self.assertGreater(losses[0].value, losses[-1].value)
self.assertGreater(losses[0].total, losses[-1].total)
self.assertLess(losses[-1].value, value_loss_goal)
self.assertLess(losses[-1].policy, policy_loss_goal)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/alpha_zero/model_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Output the config and graphs for an experiment.
This reads the config.json and learner.jsonl from an alpha zero experiment.
"""
import datetime
import json
import math
import os
from absl import app
from absl import flags
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from open_spiel.python.utils import gfile
X_AXIS = {
"step": "step",
"time": "time_rel_h",
"states": "total_states",
}
flags.DEFINE_string("path", None,
"Where to find config.json and learner.jsonl.")
flags.DEFINE_enum("x_axis", "step", list(X_AXIS.keys()),
"What should be on the x-axis.")
flags.mark_flag_as_required("path")
FLAGS = flags.FLAGS
MAX_WIDTH = int(os.getenv("COLUMNS", "200")) # Get your TTY width.
SMOOTHING_RATE = 10
SUBSAMPLING_MAX = 200
def print_columns(strings, max_width=MAX_WIDTH):
"""Prints a list of strings in columns."""
padding = 2
shortest = min(len(s) for s in strings)
max_columns = max(1, math.floor((max_width - 1) / (shortest + 2 * padding)))
for cols in range(max_columns, 0, -1):
rows = math.ceil(len(strings) / cols)
chunks = [strings[i:i + rows] for i in range(0, len(strings), rows)]
col_widths = [max(len(s) for s in chunk) for chunk in chunks]
if sum(col_widths) + 2 * padding * len(col_widths) <= max_width:
break
for r in range(rows):
for c in range(cols):
i = r + c * rows
if i < len(strings):
print(" " * padding + strings[i].ljust(col_widths[c] + padding), end="")
print()
def load_jsonl_data(filename):
with gfile.Open(filename) as f:
return [json.loads(l) for l in f.readlines()]
def sub_sample(data, count):
return data[::(max(1, len(data) // count))]
def smooth(data, count):
for k in data.keys():
if not isinstance(k, str) or not k.startswith("time_"):
data[k] = data[k].rolling(max(1, len(data) // count)).mean()
return data
def subselect(row, keys):
for key in keys:
row = row[key]
return row
def select(data, keys):
return [subselect(row, keys) for row in data]
def prepare(data, cols):
"""Given the dataset and a list of columns return a small pandas dataframe."""
for col in ["step", "total_states", "total_trajectories", "time_rel"]:
cols[col] = [col]
subdata = {key: select(data, col) for key, col in cols.items()}
# subdata = list(zip(*subdata)) # transpose
df = pd.DataFrame(subdata)
df = smooth(df, SMOOTHING_RATE)
df = sub_sample(df, SUBSAMPLING_MAX)
df["time_rel_h"] = df["time_rel"] / 3600
df["zero"] = 0
return df
def subplot(rows, cols, pos, *args, **kwargs):
ax = plt.subplot(rows, cols, pos, *args, **kwargs)
ax.tick_params(top=False, right=False) # Don't interfere with the titles.
return ax
def plot_avg_stddev(ax, x, data, data_col):
"""Plot stats produced by open_spiel::BasicStats::ToJson."""
cols = ["avg", "std_dev", "min", "max"]
df = prepare(data, {v: data_col + [v] for v in cols})
df.plot(ax=ax, x=x, y="avg", color="b")
plt.fill_between(
x=df[x], color="b", alpha=0.2, label="std dev",
y1=np.nanmax([df["min"], df["avg"] - df["std_dev"]], 0),
y2=np.nanmin([df["max"], df["avg"] + df["std_dev"]], 0))
plt.fill_between(
x=df[x], color="b", alpha=0.2, label="min/max",
y1=df["min"], y2=df["max"])
plot_zero(df, ax, x)
def plot_histogram_numbered(ax, x, data, data_col):
"""Plot stats produced by open_spiel::HistogramNumbered::ToJson."""
x_min, x_max = 0, data[-1][x]
y_min, y_max = 0, len(subselect(data, [0] + data_col))
z_min, z_max = 0, 1
z = np.array([subselect(row, data_col) for row in data], dtype=float)
z = np.concatenate((z, np.zeros((x_max, 1))), axis=1) # Don't cut off the top
# TODO(author7): smoothing
z = sub_sample(z, SUBSAMPLING_MAX).transpose()
p = np.percentile(z, 99)
if p > 0:
z /= p
z[z > 1] = 1
ax.grid(False)
ax.imshow(z, cmap="Reds", vmin=z_min, vmax=z_max,
extent=[x_min, x_max, y_min, y_max + 1],
interpolation="nearest", origin="lower", aspect="auto")
def plot_histogram_named(ax, x, data, data_col, normalized=True):
"""Plot stats produced by open_spiel::HistogramNamed::ToJson."""
names = subselect(data, [0] + data_col + ["names"])
df = prepare(data, {name: data_col + ["counts", i]
for i, name in enumerate(names)})
if normalized:
total = sum(df[n] for n in names)
for n in names:
df[n] /= total
df.plot.area(ax=ax, x=x, y=names)
def plot_zero(df, ax, x):
df.plot(ax=ax, x=x, y="zero", label="", visible=False)
def plot_data(config, data):
"""Plot a bunch of graphs from an alphazero experiment."""
num_rows, num_cols = 3, 4
x = X_AXIS[FLAGS.x_axis]
fig = plt.figure(figsize=(num_cols * 7, num_rows * 6))
fig.suptitle(
("Game: {}, Model: {}({}, {}), training time: {}, training steps: {}, "
"states: {}, games: {}").format(
config["game"], config["nn_model"], config["nn_width"],
config["nn_depth"],
datetime.timedelta(seconds=int(data[-1]["time_rel"])),
int(data[-1]["step"]), int(data[-1]["total_states"]),
int(data[-1]["total_trajectories"])))
cols = ["value", "policy", "l2reg", "sum"]
df = prepare(data, {v: ["loss", v] for v in cols})
ax = subplot(num_rows, num_cols, 1, title="Training loss")
for y in cols:
df.plot(ax=ax, x=x, y=y)
cols = list(range(len(data[0]["value_accuracy"])))
df = prepare(data, {i: ["value_accuracy", i, "avg"] for i in cols})
ax = subplot(num_rows, num_cols, 2, # ylim=(0, 1.05),
title="MCTS value prediction accuracy")
for y in cols:
df.plot(ax=ax, x=x, y=y)
cols = list(range(len(data[0]["value_prediction"])))
df = prepare(data, {i: ["value_prediction", i, "avg"] for i in cols})
ax = subplot(num_rows, num_cols, 3, # ylim=(0, 1.05),
title="MCTS absolute value prediction")
for y in cols:
df.plot(ax=ax, x=x, y=y)
cols = list(range(len(data[0]["eval"]["results"])))
df = prepare(data, {i: ["eval", "results", i] for i in cols})
ax = subplot(num_rows, num_cols, 4, ylim=(-1, 1),
title="Evaluation returns vs MCTS+Solver with x10^(n/2) sims")
ax.axhline(y=0, color="black")
for y in cols:
df.plot(ax=ax, x=x, y=y)
df = prepare(data, {"states_per_s": ["states_per_s"]})
ax = subplot(num_rows, num_cols, 5, title="Speed of actor state/s")
df.plot(ax=ax, x=x, y="states_per_s")
plot_zero(df, ax, x)
cols = ["requests_per_s", "misses_per_s"]
df = prepare(data, {v: ["cache", v] for v in cols})
ax = subplot(num_rows, num_cols, 6, title="Cache requests/s")
for y in cols:
df.plot(ax=ax, x=x, y=y)
plot_zero(df, ax, x)
cols = ["hit_rate", "usage"]
df = prepare(data, {v: ["cache", v] for v in cols})
ax = subplot(num_rows, num_cols, 7, title="Cache usage and hit rate.",
ylim=(0, 1.05))
for y in cols:
df.plot(ax=ax, x=x, y=y)
ax = subplot(num_rows, num_cols, 8, title="Outcomes", ylim=(0, 1))
plot_histogram_named(ax, x, data, ["outcomes"])
ax = subplot(num_rows, num_cols, 9,
title="Inference batch size + stddev + min/max")
plot_avg_stddev(ax, x, data, ["batch_size"])
ax = subplot(num_rows, num_cols, 10, title="Inference batch size")
plot_histogram_numbered(ax, x, data, ["batch_size_hist"])
ax = subplot(num_rows, num_cols, 11, title="Game length + stddev + min/max")
plot_avg_stddev(ax, x, data, ["game_length"])
ax = subplot(num_rows, num_cols, 12, title="Game length histogram")
plot_histogram_numbered(ax, x, data, ["game_length_hist"])
plt.show()
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
with gfile.Open(os.path.join(FLAGS.path, "config.json")) as f:
config = json.load(f)
data = load_jsonl_data(os.path.join(FLAGS.path, "learner.jsonl"))
print("config:")
print_columns(sorted("{}: {}".format(k, v) for k, v in config.items()))
print()
print("data keys:")
print_columns(sorted(data[0].keys()))
print()
print("training time:", datetime.timedelta(seconds=int(data[-1]["time_rel"])))
print("training steps: %d" % (data[-1]["step"]))
print("total states: %d" % (data[-1]["total_states"]))
print("total trajectories: %d" % (data[-1]["total_trajectories"]))
print()
try:
plot_data(config, data)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
app.run(main)
| open_spiel-master | open_spiel/python/algorithms/alpha_zero/analysis.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A basic AlphaZero implementation.
This implements the AlphaZero training algorithm. It spawns N actors which feed
trajectories into a replay buffer which are consumed by a learner. The learner
generates new weights, saves a checkpoint, and tells the actors to update. There
are also M evaluators running games continuously against a standard MCTS+Solver,
though each at a different difficulty (ie number of simulations for MCTS).
Due to the multi-process nature of this algorithm the logs are written to files,
one per process. The learner logs are also output to stdout. The checkpoints are
also written to the same directory.
Links to relevant articles/papers:
https://deepmind.com/blog/article/alphago-zero-starting-scratch has an open
access link to the AlphaGo Zero nature paper.
https://deepmind.com/blog/article/alphazero-shedding-new-light-grand-games-chess-shogi-and-go
has an open access link to the AlphaZero science paper.
"""
import collections
import datetime
import functools
import itertools
import json
import os
import random
import sys
import tempfile
import time
import traceback
import numpy as np
from open_spiel.python.algorithms import mcts
from open_spiel.python.algorithms.alpha_zero import evaluator as evaluator_lib
from open_spiel.python.algorithms.alpha_zero import model as model_lib
import pyspiel
from open_spiel.python.utils import data_logger
from open_spiel.python.utils import file_logger
from open_spiel.python.utils import spawn
from open_spiel.python.utils import stats
# Time to wait for processes to join.
JOIN_WAIT_DELAY = 0.001
class TrajectoryState(object):
"""A particular point along a trajectory."""
def __init__(self, observation, current_player, legals_mask, action, policy,
value):
self.observation = observation
self.current_player = current_player
self.legals_mask = legals_mask
self.action = action
self.policy = policy
self.value = value
class Trajectory(object):
"""A sequence of observations, actions and policies, and the outcomes."""
def __init__(self):
self.states = []
self.returns = None
def add(self, information_state, action, policy):
self.states.append((information_state, action, policy))
class Buffer(object):
"""A fixed size buffer that keeps the newest values."""
def __init__(self, max_size):
self.max_size = max_size
self.data = []
self.total_seen = 0 # The number of items that have passed through.
def __len__(self):
return len(self.data)
def __bool__(self):
return bool(self.data)
def append(self, val):
return self.extend([val])
def extend(self, batch):
batch = list(batch)
self.total_seen += len(batch)
self.data.extend(batch)
self.data[:-self.max_size] = []
def sample(self, count):
return random.sample(self.data, count)
class Config(collections.namedtuple(
"Config", [
"game",
"path",
"learning_rate",
"weight_decay",
"train_batch_size",
"replay_buffer_size",
"replay_buffer_reuse",
"max_steps",
"checkpoint_freq",
"actors",
"evaluators",
"evaluation_window",
"eval_levels",
"uct_c",
"max_simulations",
"policy_alpha",
"policy_epsilon",
"temperature",
"temperature_drop",
"nn_model",
"nn_width",
"nn_depth",
"observation_shape",
"output_size",
"quiet",
])):
"""A config for the model/experiment."""
pass
def _init_model_from_config(config):
return model_lib.Model.build_model(
config.nn_model,
config.observation_shape,
config.output_size,
config.nn_width,
config.nn_depth,
config.weight_decay,
config.learning_rate,
config.path)
def watcher(fn):
"""A decorator to fn/processes that gives a logger and logs exceptions."""
@functools.wraps(fn)
def _watcher(*, config, num=None, **kwargs):
"""Wrap the decorated function."""
name = fn.__name__
if num is not None:
name += "-" + str(num)
with file_logger.FileLogger(config.path, name, config.quiet) as logger:
print("{} started".format(name))
logger.print("{} started".format(name))
try:
return fn(config=config, logger=logger, **kwargs)
except Exception as e:
logger.print("\n".join([
"",
" Exception caught ".center(60, "="),
traceback.format_exc(),
"=" * 60,
]))
print("Exception caught in {}: {}".format(name, e))
raise
finally:
logger.print("{} exiting".format(name))
print("{} exiting".format(name))
return _watcher
def _init_bot(config, game, evaluator_, evaluation):
"""Initializes a bot."""
noise = None if evaluation else (config.policy_epsilon, config.policy_alpha)
return mcts.MCTSBot(
game,
config.uct_c,
config.max_simulations,
evaluator_,
solve=False,
dirichlet_noise=noise,
child_selection_fn=mcts.SearchNode.puct_value,
verbose=False,
dont_return_chance_node=True)
def _play_game(logger, game_num, game, bots, temperature, temperature_drop):
"""Play one game, return the trajectory."""
trajectory = Trajectory()
actions = []
state = game.new_initial_state()
random_state = np.random.RandomState()
logger.opt_print(" Starting game {} ".format(game_num).center(60, "-"))
logger.opt_print("Initial state:\n{}".format(state))
while not state.is_terminal():
if state.is_chance_node():
# For chance nodes, rollout according to chance node's probability
# distribution
outcomes = state.chance_outcomes()
action_list, prob_list = zip(*outcomes)
action = random_state.choice(action_list, p=prob_list)
state.apply_action(action)
else:
root = bots[state.current_player()].mcts_search(state)
policy = np.zeros(game.num_distinct_actions())
for c in root.children:
policy[c.action] = c.explore_count
policy = policy**(1 / temperature)
policy /= policy.sum()
if len(actions) >= temperature_drop:
action = root.best_child().action
else:
action = np.random.choice(len(policy), p=policy)
trajectory.states.append(
TrajectoryState(state.observation_tensor(), state.current_player(),
state.legal_actions_mask(), action, policy,
root.total_reward / root.explore_count))
action_str = state.action_to_string(state.current_player(), action)
actions.append(action_str)
logger.opt_print("Player {} sampled action: {}".format(
state.current_player(), action_str))
state.apply_action(action)
logger.opt_print("Next state:\n{}".format(state))
trajectory.returns = state.returns()
logger.print("Game {}: Returns: {}; Actions: {}".format(
game_num, " ".join(map(str, trajectory.returns)), " ".join(actions)))
return trajectory
def update_checkpoint(logger, queue, model, az_evaluator):
"""Read the queue for a checkpoint to load, or an exit signal."""
path = None
while True: # Get the last message, ignore intermediate ones.
try:
path = queue.get_nowait()
except spawn.Empty:
break
if path:
logger.print("Inference cache:", az_evaluator.cache_info())
logger.print("Loading checkpoint", path)
model.load_checkpoint(path)
az_evaluator.clear_cache()
elif path is not None: # Empty string means stop this process.
return False
return True
@watcher
def actor(*, config, game, logger, queue):
"""An actor process runner that generates games and returns trajectories."""
logger.print("Initializing model")
model = _init_model_from_config(config)
logger.print("Initializing bots")
az_evaluator = evaluator_lib.AlphaZeroEvaluator(game, model)
bots = [
_init_bot(config, game, az_evaluator, False),
_init_bot(config, game, az_evaluator, False),
]
for game_num in itertools.count():
if not update_checkpoint(logger, queue, model, az_evaluator):
return
queue.put(_play_game(logger, game_num, game, bots, config.temperature,
config.temperature_drop))
@watcher
def evaluator(*, game, config, logger, queue):
"""A process that plays the latest checkpoint vs standard MCTS."""
results = Buffer(config.evaluation_window)
logger.print("Initializing model")
model = _init_model_from_config(config)
logger.print("Initializing bots")
az_evaluator = evaluator_lib.AlphaZeroEvaluator(game, model)
random_evaluator = mcts.RandomRolloutEvaluator()
for game_num in itertools.count():
if not update_checkpoint(logger, queue, model, az_evaluator):
return
az_player = game_num % 2
difficulty = (game_num // 2) % config.eval_levels
max_simulations = int(config.max_simulations * (10 ** (difficulty / 2)))
bots = [
_init_bot(config, game, az_evaluator, True),
mcts.MCTSBot(
game,
config.uct_c,
max_simulations,
random_evaluator,
solve=True,
verbose=False,
dont_return_chance_node=True)
]
if az_player == 1:
bots = list(reversed(bots))
trajectory = _play_game(logger, game_num, game, bots, temperature=1,
temperature_drop=0)
results.append(trajectory.returns[az_player])
queue.put((difficulty, trajectory.returns[az_player]))
logger.print("AZ: {}, MCTS: {}, AZ avg/{}: {:.3f}".format(
trajectory.returns[az_player],
trajectory.returns[1 - az_player],
len(results), np.mean(results.data)))
@watcher
def learner(*, game, config, actors, evaluators, broadcast_fn, logger):
"""A learner that consumes the replay buffer and trains the network."""
logger.also_to_stdout = True
replay_buffer = Buffer(config.replay_buffer_size)
learn_rate = config.replay_buffer_size // config.replay_buffer_reuse
logger.print("Initializing model")
model = _init_model_from_config(config)
logger.print("Model type: %s(%s, %s)" % (config.nn_model, config.nn_width,
config.nn_depth))
logger.print("Model size:", model.num_trainable_variables, "variables")
save_path = model.save_checkpoint(0)
logger.print("Initial checkpoint:", save_path)
broadcast_fn(save_path)
data_log = data_logger.DataLoggerJsonLines(config.path, "learner", True)
stage_count = 7
value_accuracies = [stats.BasicStats() for _ in range(stage_count)]
value_predictions = [stats.BasicStats() for _ in range(stage_count)]
game_lengths = stats.BasicStats()
game_lengths_hist = stats.HistogramNumbered(game.max_game_length() + 1)
outcomes = stats.HistogramNamed(["Player1", "Player2", "Draw"])
evals = [Buffer(config.evaluation_window) for _ in range(config.eval_levels)]
total_trajectories = 0
def trajectory_generator():
"""Merge all the actor queues into a single generator."""
while True:
found = 0
for actor_process in actors:
try:
yield actor_process.queue.get_nowait()
except spawn.Empty:
pass
else:
found += 1
if found == 0:
time.sleep(0.01) # 10ms
def collect_trajectories():
"""Collects the trajectories from actors into the replay buffer."""
num_trajectories = 0
num_states = 0
for trajectory in trajectory_generator():
num_trajectories += 1
num_states += len(trajectory.states)
game_lengths.add(len(trajectory.states))
game_lengths_hist.add(len(trajectory.states))
p1_outcome = trajectory.returns[0]
if p1_outcome > 0:
outcomes.add(0)
elif p1_outcome < 0:
outcomes.add(1)
else:
outcomes.add(2)
replay_buffer.extend(
model_lib.TrainInput(
s.observation, s.legals_mask, s.policy, p1_outcome)
for s in trajectory.states)
for stage in range(stage_count):
# Scale for the length of the game
index = (len(trajectory.states) - 1) * stage // (stage_count - 1)
n = trajectory.states[index]
accurate = (n.value >= 0) == (trajectory.returns[n.current_player] >= 0)
value_accuracies[stage].add(1 if accurate else 0)
value_predictions[stage].add(abs(n.value))
if num_states >= learn_rate:
break
return num_trajectories, num_states
def learn(step):
"""Sample from the replay buffer, update weights and save a checkpoint."""
losses = []
for _ in range(len(replay_buffer) // config.train_batch_size):
data = replay_buffer.sample(config.train_batch_size)
losses.append(model.update(data))
# Always save a checkpoint, either for keeping or for loading the weights to
# the actors. It only allows numbers, so use -1 as "latest".
save_path = model.save_checkpoint(
step if step % config.checkpoint_freq == 0 else -1)
losses = sum(losses, model_lib.Losses(0, 0, 0)) / len(losses)
logger.print(losses)
logger.print("Checkpoint saved:", save_path)
return save_path, losses
last_time = time.time() - 60
for step in itertools.count(1):
for value_accuracy in value_accuracies:
value_accuracy.reset()
for value_prediction in value_predictions:
value_prediction.reset()
game_lengths.reset()
game_lengths_hist.reset()
outcomes.reset()
num_trajectories, num_states = collect_trajectories()
total_trajectories += num_trajectories
now = time.time()
seconds = now - last_time
last_time = now
logger.print("Step:", step)
logger.print(
("Collected {:5} states from {:3} games, {:.1f} states/s. "
"{:.1f} states/(s*actor), game length: {:.1f}").format(
num_states, num_trajectories, num_states / seconds,
num_states / (config.actors * seconds),
num_states / num_trajectories))
logger.print("Buffer size: {}. States seen: {}".format(
len(replay_buffer), replay_buffer.total_seen))
save_path, losses = learn(step)
for eval_process in evaluators:
while True:
try:
difficulty, outcome = eval_process.queue.get_nowait()
evals[difficulty].append(outcome)
except spawn.Empty:
break
batch_size_stats = stats.BasicStats() # Only makes sense in C++.
batch_size_stats.add(1)
data_log.write({
"step": step,
"total_states": replay_buffer.total_seen,
"states_per_s": num_states / seconds,
"states_per_s_actor": num_states / (config.actors * seconds),
"total_trajectories": total_trajectories,
"trajectories_per_s": num_trajectories / seconds,
"queue_size": 0, # Only available in C++.
"game_length": game_lengths.as_dict,
"game_length_hist": game_lengths_hist.data,
"outcomes": outcomes.data,
"value_accuracy": [v.as_dict for v in value_accuracies],
"value_prediction": [v.as_dict for v in value_predictions],
"eval": {
"count": evals[0].total_seen,
"results": [sum(e.data) / len(e) if e else 0 for e in evals],
},
"batch_size": batch_size_stats.as_dict,
"batch_size_hist": [0, 1],
"loss": {
"policy": losses.policy,
"value": losses.value,
"l2reg": losses.l2,
"sum": losses.total,
},
"cache": { # Null stats because it's hard to report between processes.
"size": 0,
"max_size": 0,
"usage": 0,
"requests": 0,
"requests_per_s": 0,
"hits": 0,
"misses": 0,
"misses_per_s": 0,
"hit_rate": 0,
},
})
logger.print()
if config.max_steps > 0 and step >= config.max_steps:
break
broadcast_fn(save_path)
def alpha_zero(config: Config):
"""Start all the worker processes for a full alphazero setup."""
game = pyspiel.load_game(config.game)
config = config._replace(
observation_shape=game.observation_tensor_shape(),
output_size=game.num_distinct_actions())
print("Starting game", config.game)
if game.num_players() != 2:
sys.exit("AlphaZero can only handle 2-player games.")
game_type = game.get_type()
if game_type.reward_model != pyspiel.GameType.RewardModel.TERMINAL:
raise ValueError("Game must have terminal rewards.")
if game_type.dynamics != pyspiel.GameType.Dynamics.SEQUENTIAL:
raise ValueError("Game must have sequential turns.")
if game_type.chance_mode != pyspiel.GameType.ChanceMode.DETERMINISTIC:
raise ValueError("Game must be deterministic.")
path = config.path
if not path:
path = tempfile.mkdtemp(prefix="az-{}-{}-".format(
datetime.datetime.now().strftime("%Y-%m-%d-%H-%M"), config.game))
config = config._replace(path=path)
if not os.path.exists(path):
os.makedirs(path)
if not os.path.isdir(path):
sys.exit("{} isn't a directory".format(path))
print("Writing logs and checkpoints to:", path)
print("Model type: %s(%s, %s)" % (config.nn_model, config.nn_width,
config.nn_depth))
with open(os.path.join(config.path, "config.json"), "w") as fp:
fp.write(json.dumps(config._asdict(), indent=2, sort_keys=True) + "\n")
actors = [spawn.Process(actor, kwargs={"game": game, "config": config,
"num": i})
for i in range(config.actors)]
evaluators = [spawn.Process(evaluator, kwargs={"game": game, "config": config,
"num": i})
for i in range(config.evaluators)]
def broadcast(msg):
for proc in actors + evaluators:
proc.queue.put(msg)
try:
learner(game=game, config=config, actors=actors, # pylint: disable=missing-kwoa
evaluators=evaluators, broadcast_fn=broadcast)
except (KeyboardInterrupt, EOFError):
print("Caught a KeyboardInterrupt, stopping early.")
finally:
broadcast("")
# for actor processes to join we have to make sure that their q_in is empty,
# including backed up items
for proc in actors:
while proc.exitcode is None:
while not proc.queue.empty():
proc.queue.get_nowait()
proc.join(JOIN_WAIT_DELAY)
for proc in evaluators:
proc.join()
| open_spiel-master | open_spiel/python/algorithms/alpha_zero/alpha_zero.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.algorithms.alpha_zero.evaluator."""
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import mcts
from open_spiel.python.algorithms.alpha_zero import evaluator as evaluator_lib
from open_spiel.python.algorithms.alpha_zero import model as model_lib
import pyspiel
def build_model(game):
return model_lib.Model.build_model(
"mlp", game.observation_tensor_shape(), game.num_distinct_actions(),
nn_width=64, nn_depth=2, weight_decay=1e-4, learning_rate=0.01, path=None)
class EvaluatorTest(absltest.TestCase):
def test_evaluator_caching(self):
game = pyspiel.load_game("tic_tac_toe")
model = build_model(game)
evaluator = evaluator_lib.AlphaZeroEvaluator(game, model)
state = game.new_initial_state()
obs = state.observation_tensor()
act_mask = state.legal_actions_mask()
action = state.legal_actions()[0]
policy = np.zeros(len(act_mask), dtype=float)
policy[action] = 1
train_inputs = [model_lib.TrainInput(obs, act_mask, policy, value=1)]
value = evaluator.evaluate(state)
self.assertEqual(value[0], -value[1])
value = value[0]
value2 = evaluator.evaluate(state)[0]
self.assertEqual(value, value2)
prior = evaluator.prior(state)
prior2 = evaluator.prior(state)
np.testing.assert_array_equal(prior, prior2)
info = evaluator.cache_info()
self.assertEqual(info.misses, 1)
self.assertEqual(info.hits, 3)
for _ in range(20):
model.update(train_inputs)
# Still equal due to not clearing the cache
value3 = evaluator.evaluate(state)[0]
self.assertEqual(value, value3)
info = evaluator.cache_info()
self.assertEqual(info.misses, 1)
self.assertEqual(info.hits, 4)
evaluator.clear_cache()
info = evaluator.cache_info()
self.assertEqual(info.misses, 0)
self.assertEqual(info.hits, 0)
# Now they differ from before
value4 = evaluator.evaluate(state)[0]
value5 = evaluator.evaluate(state)[0]
self.assertNotEqual(value, value4)
self.assertEqual(value4, value5)
info = evaluator.cache_info()
self.assertEqual(info.misses, 1)
self.assertEqual(info.hits, 1)
value6 = evaluator.evaluate(game.new_initial_state())[0]
self.assertEqual(value4, value6)
info = evaluator.cache_info()
self.assertEqual(info.misses, 1)
self.assertEqual(info.hits, 2)
def test_works_with_mcts(self):
game = pyspiel.load_game("tic_tac_toe")
model = build_model(game)
evaluator = evaluator_lib.AlphaZeroEvaluator(game, model)
bot = mcts.MCTSBot(
game, 1., 20, evaluator, solve=False, dirichlet_noise=(0.25, 1.))
root = bot.mcts_search(game.new_initial_state())
self.assertEqual(root.explore_count, 20)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/algorithms/alpha_zero/evaluator_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/algorithms/alpha_zero/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An AlphaZero style model with a policy and value head."""
import collections
import functools
import os
from typing import Sequence
import numpy as np
import tensorflow.compat.v1 as tf
def cascade(x, fns):
for fn in fns:
x = fn(x)
return x
tfkl = tf.keras.layers
conv_2d = functools.partial(tfkl.Conv2D, padding="same")
def batch_norm(training, updates, name):
"""A batch norm layer.
Args:
training: A placeholder of whether this is done in training or not.
updates: A list to be extended with this layer's updates.
name: Name of the layer.
Returns:
A function to apply to the previous layer.
"""
bn = tfkl.BatchNormalization(name=name)
def batch_norm_layer(x):
# This emits a warning that training is a placeholder instead of a concrete
# bool, but seems to work anyway.
applied = bn(x, training)
updates.extend(bn.updates)
return applied
return batch_norm_layer
def residual_layer(inputs, num_filters, kernel_size, training, updates, name):
return cascade(inputs, [
conv_2d(num_filters, kernel_size, name=f"{name}_res_conv1"),
batch_norm(training, updates, f"{name}_res_batch_norm1"),
tfkl.Activation("relu"),
conv_2d(num_filters, kernel_size, name=f"{name}_res_conv2"),
batch_norm(training, updates, f"{name}_res_batch_norm2"),
lambda x: tfkl.add([x, inputs]),
tfkl.Activation("relu"),
])
class TrainInput(collections.namedtuple(
"TrainInput", "observation legals_mask policy value")):
"""Inputs for training the Model."""
@staticmethod
def stack(train_inputs):
observation, legals_mask, policy, value = zip(*train_inputs)
return TrainInput(
np.array(observation, dtype=np.float32),
np.array(legals_mask, dtype=bool),
np.array(policy),
np.expand_dims(value, 1))
class Losses(collections.namedtuple("Losses", "policy value l2")):
"""Losses from a training step."""
@property
def total(self):
return self.policy + self.value + self.l2
def __str__(self):
return ("Losses(total: {:.3f}, policy: {:.3f}, value: {:.3f}, "
"l2: {:.3f})").format(self.total, self.policy, self.value, self.l2)
def __add__(self, other):
return Losses(self.policy + other.policy,
self.value + other.value,
self.l2 + other.l2)
def __truediv__(self, n):
return Losses(self.policy / n, self.value / n, self.l2 / n)
class Model(object):
"""An AlphaZero style model with a policy and value head.
This supports three types of models: mlp, conv2d and resnet.
All models have a shared torso stack with two output heads: policy and value.
They have same meaning as in the AlphaGo Zero and AlphaZero papers. The resnet
model copies the one in that paper when set with width 256 and depth 20. The
conv2d model is the same as the resnet except uses a conv+batchnorm+relu
instead of the res blocks. The mlp model uses dense layers instead of conv,
and drops batch norm.
Links to relevant articles/papers:
https://deepmind.com/blog/article/alphago-zero-starting-scratch has an open
access link to the AlphaGo Zero nature paper.
https://deepmind.com/blog/article/alphazero-shedding-new-light-grand-games-chess-shogi-and-go
has an open access link to the AlphaZero science paper.
All are parameterized by their input (observation) shape and output size
(number of actions), though the conv2d and resnet might only work with games
that have spatial data (ie 3 non-batch dimensions, eg: connect four would
work, but not poker).
The depth is the number of blocks in the torso, where the definition of a
block varies by model. For a resnet it's a resblock which is two conv2ds,
batch norms and relus, and an addition. For conv2d it's a conv2d, a batch norm
and a relu. For mlp it's a dense plus relu.
The width is the number of filters for any conv2d and the number of hidden
units for any dense layer.
Note that this uses an explicit graph so that it can be used for inference
and training from C++. It seems to also be 20%+ faster than using eager mode,
at least for the unit test.
"""
valid_model_types = ["mlp", "conv2d", "resnet"]
def __init__(self, session, saver, path):
"""Init a model. Use build_model, from_checkpoint or from_graph instead."""
self._session = session
self._saver = saver
self._path = path
def get_var(name):
return self._session.graph.get_tensor_by_name(name + ":0")
self._input = get_var("input")
self._legals_mask = get_var("legals_mask")
self._training = get_var("training")
self._value_out = get_var("value_out")
self._policy_softmax = get_var("policy_softmax")
self._policy_loss = get_var("policy_loss")
self._value_loss = get_var("value_loss")
self._l2_reg_loss = get_var("l2_reg_loss")
self._policy_targets = get_var("policy_targets")
self._value_targets = get_var("value_targets")
self._train = self._session.graph.get_operation_by_name("train")
@classmethod
def build_model(cls, model_type, input_shape, output_size, nn_width, nn_depth,
weight_decay, learning_rate, path):
"""Build a model with the specified params."""
if model_type not in cls.valid_model_types:
raise ValueError(f"Invalid model type: {model_type}, "
f"expected one of: {cls.valid_model_types}")
# The order of creating the graph, init, saver, and session is important!
# https://stackoverflow.com/a/40788998
g = tf.Graph() # Allow multiple independent models and graphs.
with g.as_default():
cls._define_graph(model_type, input_shape, output_size, nn_width,
nn_depth, weight_decay, learning_rate)
init = tf.variables_initializer(tf.global_variables(),
name="init_all_vars_op")
with tf.device("/cpu:0"): # Saver only works on CPU.
saver = tf.train.Saver(
max_to_keep=10000, sharded=False, name="saver")
session = tf.Session(graph=g)
session.__enter__()
session.run(init)
return cls(session, saver, path)
@classmethod
def from_checkpoint(cls, checkpoint, path=None):
"""Load a model from a checkpoint."""
model = cls.from_graph(checkpoint, path)
model.load_checkpoint(checkpoint)
return model
@classmethod
def from_graph(cls, metagraph, path=None):
"""Load only the model from a graph or checkpoint."""
if not os.path.exists(metagraph):
metagraph += ".meta"
if not path:
path = os.path.dirname(metagraph)
g = tf.Graph() # Allow multiple independent models and graphs.
with g.as_default():
saver = tf.train.import_meta_graph(metagraph)
session = tf.Session(graph=g)
session.__enter__()
session.run("init_all_vars_op")
return cls(session, saver, path)
def __del__(self):
if hasattr(self, "_session") and self._session:
self._session.close()
@staticmethod
def _define_graph(model_type, input_shape, output_size,
nn_width, nn_depth, weight_decay, learning_rate):
"""Define the model graph."""
# Inference inputs
input_size = int(np.prod(input_shape))
observations = tf.placeholder(tf.float32, [None, input_size], name="input")
legals_mask = tf.placeholder(tf.bool, [None, output_size],
name="legals_mask")
training = tf.placeholder(tf.bool, name="training")
bn_updates = []
# Main torso of the network
if model_type == "mlp":
torso = observations # Ignore the input shape, treat it as a flat array.
for i in range(nn_depth):
torso = cascade(torso, [
tfkl.Dense(nn_width, name=f"torso_{i}_dense"),
tfkl.Activation("relu"),
])
elif model_type == "conv2d":
torso = tfkl.Reshape(input_shape)(observations)
for i in range(nn_depth):
torso = cascade(torso, [
conv_2d(nn_width, 3, name=f"torso_{i}_conv"),
batch_norm(training, bn_updates, f"torso_{i}_batch_norm"),
tfkl.Activation("relu"),
])
elif model_type == "resnet":
torso = cascade(observations, [
tfkl.Reshape(input_shape),
conv_2d(nn_width, 3, name="torso_in_conv"),
batch_norm(training, bn_updates, "torso_in_batch_norm"),
tfkl.Activation("relu"),
])
for i in range(nn_depth):
torso = residual_layer(torso, nn_width, 3, training, bn_updates,
f"torso_{i}")
else:
raise ValueError("Unknown model type.")
# The policy head
if model_type == "mlp":
policy_head = cascade(torso, [
tfkl.Dense(nn_width, name="policy_dense"),
tfkl.Activation("relu"),
])
else:
policy_head = cascade(torso, [
conv_2d(filters=2, kernel_size=1, name="policy_conv"),
batch_norm(training, bn_updates, "policy_batch_norm"),
tfkl.Activation("relu"),
tfkl.Flatten(),
])
policy_logits = tfkl.Dense(output_size, name="policy")(policy_head)
policy_logits = tf.where(legals_mask, policy_logits,
-1e32 * tf.ones_like(policy_logits))
unused_policy_softmax = tf.identity(tfkl.Softmax()(policy_logits),
name="policy_softmax")
policy_targets = tf.placeholder(
shape=[None, output_size], dtype=tf.float32, name="policy_targets")
policy_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
logits=policy_logits, labels=policy_targets),
name="policy_loss")
# The value head
if model_type == "mlp":
value_head = torso # Nothing specific before the shared value head.
else:
value_head = cascade(torso, [
conv_2d(filters=1, kernel_size=1, name="value_conv"),
batch_norm(training, bn_updates, "value_batch_norm"),
tfkl.Activation("relu"),
tfkl.Flatten(),
])
value_out = cascade(value_head, [
tfkl.Dense(nn_width, name="value_dense"),
tfkl.Activation("relu"),
tfkl.Dense(1, name="value"),
tfkl.Activation("tanh"),
])
# Need the identity to name the single value output from the dense layer.
value_out = tf.identity(value_out, name="value_out")
value_targets = tf.placeholder(
shape=[None, 1], dtype=tf.float32, name="value_targets")
value_loss = tf.identity(tf.losses.mean_squared_error(
value_out, value_targets), name="value_loss")
l2_reg_loss = tf.add_n([
weight_decay * tf.nn.l2_loss(var)
for var in tf.trainable_variables()
if "/bias:" not in var.name
], name="l2_reg_loss")
total_loss = policy_loss + value_loss + l2_reg_loss
optimizer = tf.train.AdamOptimizer(learning_rate)
with tf.control_dependencies(bn_updates):
unused_train = optimizer.minimize(total_loss, name="train")
@property
def num_trainable_variables(self):
return sum(np.prod(v.shape) for v in tf.trainable_variables())
def print_trainable_variables(self):
for v in tf.trainable_variables():
print("{}: {}".format(v.name, v.shape))
def write_graph(self, filename):
full_path = os.path.join(self._path, filename)
tf.train.export_meta_graph(
graph_def=self._session.graph_def, saver_def=self._saver.saver_def,
filename=full_path, as_text=False)
return full_path
def inference(self, observation, legals_mask):
return self._session.run(
[self._value_out, self._policy_softmax],
feed_dict={self._input: np.array(observation, dtype=np.float32),
self._legals_mask: np.array(legals_mask, dtype=bool),
self._training: False})
def update(self, train_inputs: Sequence[TrainInput]):
"""Runs a training step."""
batch = TrainInput.stack(train_inputs)
# Run a training step and get the losses.
_, policy_loss, value_loss, l2_reg_loss = self._session.run(
[self._train, self._policy_loss, self._value_loss, self._l2_reg_loss],
feed_dict={self._input: batch.observation,
self._legals_mask: batch.legals_mask,
self._policy_targets: batch.policy,
self._value_targets: batch.value,
self._training: True})
return Losses(policy_loss, value_loss, l2_reg_loss)
def save_checkpoint(self, step):
return self._saver.save(
self._session,
os.path.join(self._path, "checkpoint"),
global_step=step)
def load_checkpoint(self, path):
return self._saver.restore(self._session, path)
| open_spiel-master | open_spiel/python/algorithms/alpha_zero/model.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An MCTS Evaluator for an AlphaZero model."""
import numpy as np
from open_spiel.python.algorithms import mcts
import pyspiel
from open_spiel.python.utils import lru_cache
class AlphaZeroEvaluator(mcts.Evaluator):
"""An AlphaZero MCTS Evaluator."""
def __init__(self, game, model, cache_size=2**16):
"""An AlphaZero MCTS Evaluator."""
if game.num_players() != 2:
raise ValueError("Game must be for two players.")
game_type = game.get_type()
if game_type.reward_model != pyspiel.GameType.RewardModel.TERMINAL:
raise ValueError("Game must have terminal rewards.")
if game_type.dynamics != pyspiel.GameType.Dynamics.SEQUENTIAL:
raise ValueError("Game must have sequential turns.")
self._model = model
self._cache = lru_cache.LRUCache(cache_size)
def cache_info(self):
return self._cache.info()
def clear_cache(self):
self._cache.clear()
def _inference(self, state):
# Make a singleton batch
obs = np.expand_dims(state.observation_tensor(), 0)
mask = np.expand_dims(state.legal_actions_mask(), 0)
# ndarray isn't hashable
cache_key = obs.tobytes() + mask.tobytes()
value, policy = self._cache.make(
cache_key, lambda: self._model.inference(obs, mask))
return value[0, 0], policy[0] # Unpack batch
def evaluate(self, state):
"""Returns a value for the given state."""
value, _ = self._inference(state)
return np.array([value, -value])
def prior(self, state):
if state.is_chance_node():
return state.chance_outcomes()
else:
# Returns the probabilities for all actions.
_, policy = self._inference(state)
return [(action, policy[action]) for action in state.legal_actions()]
| open_spiel-master | open_spiel/python/algorithms/alpha_zero/evaluator.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python spiel example."""
import pickle
from absl import app
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python import games # pylint: disable=unused-import
from open_spiel.python.algorithms import get_all_states
from open_spiel.python.mfg import games as mfg_games # pylint:disable=unused-import
import pyspiel
from open_spiel.python.utils import file_utils
# TODO(author18): add predator_prey in the list of game tested
# Put a bound on length of game so test does not timeout.
MAX_ACTIONS_PER_GAME = 1000
# All games registered in the main spiel library.
SPIEL_GAMES_LIST = pyspiel.registered_games()
# All games loadable without parameter values.
SPIEL_LOADABLE_GAMES_LIST = [g for g in SPIEL_GAMES_LIST if g.default_loadable]
# A list of games to exclude from the general simulation tests. This should
# remain empty, but it is helpful to use while a game is under construction.
SPIEL_EXCLUDE_SIMS_TEST_GAMES_LIST = ["yacht"]
# TODO(b/141950198): Stop hard-coding the number of loadable games.
assert len(SPIEL_LOADABLE_GAMES_LIST) >= 38, len(SPIEL_LOADABLE_GAMES_LIST)
# All simultaneous games.
SPIEL_SIMULTANEOUS_GAMES_LIST = [
g for g in SPIEL_LOADABLE_GAMES_LIST
if g.dynamics == pyspiel.GameType.Dynamics.SIMULTANEOUS
]
assert len(SPIEL_SIMULTANEOUS_GAMES_LIST) >= 14, len(
SPIEL_SIMULTANEOUS_GAMES_LIST)
# All multiplayer games. This is a list of (game, num_players) pairs to test.
SPIEL_MULTIPLAYER_GAMES_LIST = [
# pylint: disable=g-complex-comprehension
(g, p)
for g in SPIEL_LOADABLE_GAMES_LIST
for p in range(max(g.min_num_players, 2), 1 + min(g.max_num_players, 6))
if g.max_num_players > 2 and g.max_num_players > g.min_num_players and
g.short_name != "tiny_hanabi" # default payoff only works for 2p
# cannot change the number of players without changing other parameters
and g.short_name != "universal_poker" and g.short_name != "scotland_yard"
]
assert len(SPIEL_MULTIPLAYER_GAMES_LIST) >= 35, len(
SPIEL_MULTIPLAYER_GAMES_LIST)
class GamesSimTest(parameterized.TestCase):
def apply_action(self, state, action):
if state.is_simultaneous_node():
assert isinstance(action, list)
state.apply_actions(action)
else:
state.apply_action(action)
def apply_action_test_clone(self, state, action):
"""Applies the action and tests the clone method if it's implemented."""
try:
state_clone = state.clone()
except Exception: # pylint: disable=broad-except
self.apply_action(state, action)
return
self.assertEqual(str(state), str(state_clone))
self.assertEqual(state.history(), state_clone.history())
self.apply_action(state, action)
self.apply_action(state_clone, action)
self.assertEqual(str(state), str(state_clone))
self.assertEqual(state.history(), state_clone.history())
def serialize_deserialize(self, game, state, check_pyspiel_serialization,
check_pickle_serialization):
# OpenSpiel native serialization
if check_pyspiel_serialization:
ser_str = pyspiel.serialize_game_and_state(game, state)
new_game, new_state = pyspiel.deserialize_game_and_state(ser_str)
self.assertEqual(str(game), str(new_game))
self.assertEqual(str(state), str(new_state))
if check_pickle_serialization:
# Pickle serialization + deserialization (of the state).
pickled_state = pickle.dumps(state)
unpickled_state = pickle.loads(pickled_state)
self.assertEqual(str(state), str(unpickled_state))
def sim_game(
self,
game,
check_pyspiel_serialization=True,
check_pickle_serialization=True,
make_distribution_fn=(
lambda states: ([1 / len(states)] * len(states) if states else []))
):
min_utility = game.min_utility()
max_utility = game.max_utility()
self.assertLess(min_utility, max_utility)
if check_pickle_serialization:
# Pickle serialization + deserialization (of the game).
pickled_game = pickle.dumps(game)
unpickled_game = pickle.loads(pickled_game)
self.assertEqual(str(game), str(unpickled_game))
# Pickle serialization + deserialization (of the game type).
pickled_game_type = pickle.dumps(game.get_type())
unpickled_game_type = pickle.loads(pickled_game_type)
self.assertEqual(game.get_type(), unpickled_game_type)
# Get a new state
for state in game.new_initial_states():
total_actions = 0
next_serialize_check = 1
while not state.is_terminal() and total_actions <= MAX_ACTIONS_PER_GAME:
total_actions += 1
# Serialize/Deserialize is costly. Only do it every power of 2 actions.
if total_actions >= next_serialize_check:
self.serialize_deserialize(game, state, check_pyspiel_serialization,
check_pickle_serialization)
next_serialize_check *= 2
# The state can be four different types: chance node,
# mean-field-game node, simultaneous node, or decision node
if state.is_chance_node():
# Chance node: sample an outcome
outcomes = state.chance_outcomes()
self.assertNotEmpty(outcomes)
action_list, prob_list = zip(*outcomes)
action = np.random.choice(action_list, p=prob_list)
state.apply_action(action)
elif state.is_simultaneous_node():
# Simultaneous node: sample actions for all players
chosen_actions = []
for pid in range(game.num_players()):
legal_actions = state.legal_actions(pid)
action = 0 if not legal_actions else np.random.choice(legal_actions)
chosen_actions.append(action)
# Apply the joint action and test cloning states.
self.apply_action_test_clone(state, chosen_actions)
elif state.is_mean_field_node():
self.assertEqual(game.get_type().dynamics,
pyspiel.GameType.Dynamics.MEAN_FIELD)
state.update_distribution(
make_distribution_fn(state.distribution_support()))
else:
self.assertTrue(state.is_player_node())
# Decision node: sample action for the single current player
action = np.random.choice(state.legal_actions(state.current_player()))
# Apply action and test state cloning.
self.apply_action_test_clone(state, action)
# Max sure at least one action was made.
self.assertGreater(total_actions, 0,
"No actions taken in sim of " + str(game))
# Either the game is now done, or the maximum actions has been taken.
if state.is_terminal():
# Check there are no legal actions.
self.assertEmpty(state.legal_actions())
for player in range(game.num_players()):
self.assertEmpty(state.legal_actions(player))
# Print utilities for each player.
utilities = state.returns()
# Check that player returns are correct
for player in range(game.num_players()):
self.assertEqual(state.player_return(player), utilities[player])
# Check that each one is in range
for utility in utilities:
self.assertGreaterEqual(utility, game.min_utility())
self.assertLessEqual(utility, game.max_utility())
print("Sim of game {} terminated with {} total actions. Utilities: {}"
.format(game, total_actions, utilities))
else:
print("Sim of game {} terminated after maximum number of actions {}"
.format(game, MAX_ACTIONS_PER_GAME))
@parameterized.named_parameters((game_info.short_name, game_info)
for game_info in SPIEL_LOADABLE_GAMES_LIST)
def test_game_sim(self, game_info):
if game_info.short_name in SPIEL_EXCLUDE_SIMS_TEST_GAMES_LIST:
print(f"{game_info.short_name} is excluded from sim tests. Skipping.")
return
game = pyspiel.load_game(game_info.short_name)
self.assertLessEqual(game_info.min_num_players, game.num_players())
self.assertLessEqual(game.num_players(), game_info.max_num_players)
self.sim_game(game)
@parameterized.named_parameters(
(game_info.short_name, game_info)
for game_info in SPIEL_SIMULTANEOUS_GAMES_LIST)
def test_simultaneous_game_as_turn_based(self, game_info):
converted_game = pyspiel.load_game_as_turn_based(game_info.short_name)
self.sim_game(converted_game)
@parameterized.named_parameters((f"{p}p_{g.short_name}", g, p)
for g, p in SPIEL_MULTIPLAYER_GAMES_LIST)
def test_multiplayer_game(self, game_info, num_players):
if game_info.short_name == "python_mfg_predator_prey":
reward_matrix = np.ones((num_players, num_players))
# Construct an initial distribution matrix of suitable dimensions.
zero_mat = np.zeros((5, 5))
pop_1 = zero_mat.copy()
pop_1[0, 0] = 1.0
pop_1 = pop_1.tolist()
pop_2 = zero_mat.copy()
pop_2[0, -1] = 1.0
pop_2 = pop_2.tolist()
pop_3 = zero_mat.copy()
pop_3[-1, 0] = 1.0
pop_3 = pop_3.tolist()
pop_4 = zero_mat.copy()
pop_4[-1, -1] = 1.0
pop_4 = pop_4.tolist()
pops = [pop_1, pop_2, pop_3, pop_4]
init_distrib = []
for p in range(num_players):
init_distrib += pops[p%4]
init_distrib = np.array(init_distrib)
dict_args = {
"players": num_players,
"reward_matrix": " ".join(str(v) for v in reward_matrix.flatten()),
"init_distrib": " ".join(str(v) for v in init_distrib.flatten()),
}
else:
dict_args = {"players": num_players}
game = pyspiel.load_game(game_info.short_name, dict_args)
self.sim_game(game)
def test_breakthrough(self):
# make a smaller (6x6) board
game = pyspiel.load_game("breakthrough(rows=6,columns=6)")
self.sim_game(game)
def test_pig(self):
# make a smaller lower win score
game = pyspiel.load_game("pig(players=2,winscore=15)")
self.sim_game(game)
def test_efg_game(self):
game = pyspiel.load_efg_game(pyspiel.get_sample_efg_data())
# EFG games loaded directly by string cannot serialize because the game's
# data cannot be passed in via string parameter.
for _ in range(0, 100):
self.sim_game(
game,
check_pyspiel_serialization=False,
check_pickle_serialization=False)
game = pyspiel.load_efg_game(pyspiel.get_kuhn_poker_efg_data())
for _ in range(0, 100):
self.sim_game(
game,
check_pyspiel_serialization=False,
check_pickle_serialization=False)
# EFG games loaded by file should serialize properly:
filename = file_utils.find_file(
"third_party/open_spiel/games/efg/sample.efg", 2)
if filename is not None:
game = pyspiel.load_game("efg_game(filename=" + filename + ")")
for _ in range(0, 100):
self.sim_game(game)
filename = file_utils.find_file(
"third_party/open_spiel/games/efg/sample.efg", 2)
if filename is not None:
game = pyspiel.load_game("efg_game(filename=" + filename + ")")
for _ in range(0, 100):
self.sim_game(game)
def test_backgammon_checker_moves(self):
game = pyspiel.load_game("backgammon")
state = game.new_initial_state()
state.apply_action(0) # Roll 12 and X starts
action = state.legal_actions()[0] # First legal action
# X has player id 0.
checker_moves = state.spiel_move_to_checker_moves(0, action)
print("Checker moves:")
for i in range(2):
print("pos {}, num {}, hit? {}".format(checker_moves[i].pos,
checker_moves[i].num,
checker_moves[i].hit))
action2 = state.checker_moves_to_spiel_move(checker_moves)
self.assertEqual(action, action2)
action3 = state.translate_action(0, 0, True) # 0->2, 0->1
self.assertEqual(action3, 0)
def test_backgammon_checker_moves_with_hit_info(self):
game = pyspiel.load_game("backgammon")
state = game.new_initial_state()
while not state.is_terminal():
if state.is_chance_node():
outcomes_with_probs = state.chance_outcomes()
action_list, prob_list = zip(*outcomes_with_probs)
action = np.random.choice(action_list, p=prob_list)
state.apply_action(action)
else:
legal_actions = state.legal_actions()
player = state.current_player()
for action in legal_actions:
action_str = state.action_to_string(player, action)
checker_moves = (
state.augment_with_hit_info(
player, state.spiel_move_to_checker_moves(player, action)))
if checker_moves[0].hit or checker_moves[1].hit:
self.assertGreaterEqual(action_str.find("*"), 0)
else:
self.assertLess(action_str.find("*"), 0)
if action_str.find("*") > 0:
self.assertTrue(checker_moves[0].hit or checker_moves[1].hit)
else:
self.assertTrue(not checker_moves[0].hit and
not checker_moves[1].hit)
action = np.random.choice(legal_actions)
state.apply_action(action)
def test_leduc_get_and_set_private_cards(self):
game = pyspiel.load_game("leduc_poker")
state = game.new_initial_state()
state.apply_action(0) # give player 0 jack of first suit
state.apply_action(1) # give player 1 jack of second suit
# check that we can retrieve those cards
print(state)
private_cards = state.get_private_cards()
self.assertEqual(private_cards, [0, 1])
# now give them queens instead, get them again, and check that it worked
state.set_private_cards([2, 3])
print(state)
private_cards = state.get_private_cards()
self.assertEqual(private_cards, [2, 3])
def test_dots_and_boxes_with_notation(self):
game = pyspiel.load_game("dots_and_boxes")
state = game.new_initial_state()
state.apply_action(0) # horizontal 0, 0
state.apply_action(1) # horizontal 0, 1
# check that we can retrieve the notiation
dbn = state.dbn_string()
self.assertEqual(dbn, "110000000000")
@parameterized.parameters(
{"game_name": "blotto"},
{"game_name": "goofspiel"},
{"game_name": "kuhn_poker"},
{"game_name": "tiny_hanabi"},
{"game_name": "phantom_ttt"},
{"game_name": "matrix_rps"},
{"game_name": "kuhn_poker"},
)
def test_restricted_nash_response_test(self, game_name):
rnr_game = pyspiel.load_game(
f"restricted_nash_response(game={game_name}())")
for _ in range(10):
self.sim_game(rnr_game, check_pyspiel_serialization=False,
check_pickle_serialization=False)
# TODO(author18): find the list of games where it is reasonable to call
# get_all_states
@parameterized.parameters(
{"game_name": "python_mfg_crowd_modelling"},
{"game_name": "mfg_crowd_modelling"},
# {"game_name": "mfg_crowd_modelling_2d"},
{"game_name": "kuhn_poker"},
{"game_name": "leduc_poker"},
)
def test_has_at_least_an_action(self, game_name):
"""Check that all population's state have at least one action."""
game = pyspiel.load_game(game_name)
to_string = (
lambda s: s.observation_string(pyspiel.PlayerId.DEFAULT_PLAYER_ID))
states = get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False,
include_mean_field_states=False,
to_string=to_string)
for state in states.values():
self.assertNotEmpty(state.legal_actions())
def main(_):
absltest.main()
if __name__ == "__main__":
# Necessary to run main via app.run for internal tests.
app.run(main)
| open_spiel-master | open_spiel/python/tests/games_sim_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the C++ matrix game utility methods exposed to Python."""
from absl.testing import absltest
import pyspiel
class TensorGamesUtilsTest(absltest.TestCase):
def test_extensive_to_tensor_game_type(self):
game = pyspiel.extensive_to_tensor_game(
pyspiel.load_game(
"turn_based_simultaneous_game(game=blotto(players=3,coins=5))"))
game_type = game.get_type()
self.assertEqual(game_type.dynamics, pyspiel.GameType.Dynamics.SIMULTANEOUS)
self.assertEqual(game_type.chance_mode,
pyspiel.GameType.ChanceMode.DETERMINISTIC)
self.assertEqual(game_type.information,
pyspiel.GameType.Information.ONE_SHOT)
self.assertEqual(game_type.utility, pyspiel.GameType.Utility.ZERO_SUM)
def test_extensive_to_tensor_game_payoff_tensor(self):
turn_based_game = pyspiel.load_game_as_turn_based(
"blotto(players=3,coins=5)")
tensor_game1 = pyspiel.extensive_to_tensor_game(turn_based_game)
tensor_game2 = pyspiel.load_tensor_game("blotto(players=3,coins=5)")
self.assertEqual(tensor_game1.shape(), tensor_game2.shape())
s0 = turn_based_game.new_initial_state()
self.assertEqual(tensor_game1.shape()[0], s0.num_distinct_actions())
for a0 in range(s0.num_distinct_actions()):
s1 = s0.child(a0)
self.assertEqual(tensor_game1.shape()[1], s1.num_distinct_actions())
for a1 in range(s1.num_distinct_actions()):
s2 = s1.child(a1)
self.assertEqual(tensor_game1.shape()[2], s2.num_distinct_actions())
for a2 in range(s2.num_distinct_actions()):
s3 = s2.child(a2)
self.assertTrue(s3.is_terminal())
for player in range(3):
self.assertEqual(
s3.returns()[player],
tensor_game1.player_utility(player, (a0, a1, a2)))
self.assertEqual(
s3.returns()[player],
tensor_game2.player_utility(player, (a0, a1, a2)))
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/tests/tensor_game_utils_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that Python and C++ bots can be called by a C++ algorithm."""
import os
from absl.testing import absltest
import numpy as np
from open_spiel.python.bots import uniform_random
import pyspiel
# Specify bot names in alphabetical order, to make it easier to read.
SPIEL_BOTS_LIST = [
# Chooses actions in a fixed order.
"fixed_action_preference",
"uniform_random",
]
class BotTest(absltest.TestCase):
def test_python_and_cpp_bot(self):
game = pyspiel.load_game("kuhn_poker")
bots = [
pyspiel.make_uniform_random_bot(0, 1234),
uniform_random.UniformRandomBot(1, np.random.RandomState(4321)),
]
results = np.array([
pyspiel.evaluate_bots(game.new_initial_state(), bots, iteration)
for iteration in range(10000)
])
average_results = np.mean(results, axis=0)
np.testing.assert_allclose(average_results, [0.125, -0.125], atol=0.1)
def test_registered_bots(self):
expected = SPIEL_BOTS_LIST[:]
if os.environ.get("OPEN_SPIEL_BUILD_WITH_ACPC", "OFF") == "ON":
expected.append("uniform_restricted_actions")
self.assertCountEqual(pyspiel.registered_bots(), expected)
def test_cpp_mcts_bot(self):
game = pyspiel.load_game("tic_tac_toe")
bots = [
pyspiel.MCTSBot(game, pyspiel.RandomRolloutEvaluator(1, 0), 2.0,
100, 100, False, 42, False)
] * 2
_ = np.array([
pyspiel.evaluate_bots(game.new_initial_state(), bots, iteration)
for iteration in range(10)
])
# Do a search directly, and inspect the values.
state = game.new_initial_state()
search_node = bots[0].mcts_search(state)
for child in search_node.children:
print(f"Child action {child.action}, total reward: {child.total_reward}" +
f", explore count: {child.explore_count}")
# Similar way to achieve the above.
print(f"Children string: {search_node.children_str(state)}")
print(f"Best child: {search_node.best_child().to_string(state)}")
def test_can_play_game(self):
game = pyspiel.load_game("kuhn_poker")
self.assertIn("uniform_random", pyspiel.bots_that_can_play_game(game))
def test_passing_params(self):
game = pyspiel.load_game("tic_tac_toe")
bots = [
pyspiel.load_bot(
"fixed_action_preference",
game,
player=0,
params={"actions": "0:1:2"}),
pyspiel.load_bot(
"fixed_action_preference",
game,
player=1,
params={"actions": "3:4"}),
]
result = pyspiel.evaluate_bots(game.new_initial_state(), bots, seed=0)
self.assertEqual(result, [1, -1]) # Player 0 wins.
def test_roshambo_bot(self):
if hasattr(pyspiel, "make_roshambo_bot"):
game = pyspiel.load_game("repeated_game(stage_game=matrix_rps()," +
"num_repetitions=" +
f"{pyspiel.ROSHAMBO_NUM_THROWS})")
num_players = 2
bots = [
pyspiel.make_roshambo_bot(0, "rotatebot",
pyspiel.ROSHAMBO_NUM_THROWS),
pyspiel.make_roshambo_bot(1, "copybot", pyspiel.ROSHAMBO_NUM_THROWS)
]
state = game.new_initial_state()
for i in range(pyspiel.ROSHAMBO_NUM_THROWS):
joint_action = [-1] * num_players
for p in range(num_players):
joint_action[p] = bots[p].step(state)
state.apply_actions(joint_action)
if i == 0:
# copybot wins the first round
self.assertListEqual(state.returns(), [-1, 1])
else:
# the rest are a draw
self.assertListEqual(state.rewards(), [0, 0])
self.assertTrue(state.is_terminal())
self.assertListEqual(state.returns(), [-1, 1])
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/tests/bot_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.policy."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from open_spiel.python import games # pylint: disable=unused-import
from open_spiel.python import policy
from open_spiel.python.algorithms import get_all_states
import pyspiel
SEED = 187461917
_TIC_TAC_TOE_STATES = [
{
# ...
# xoo
# ..x
"state": "3, 4, 8, 5",
"legal_actions": (0, 1, 2, 6, 7)
},
{
# xo.
# oxx
# o..
"state": "4, 1, 0, 3, 5, 6",
"legal_actions": (2, 7, 8)
},
{
# ...
# ...
# ...
"state": "",
"legal_actions": (0, 1, 2, 3, 4, 5, 6, 7, 8)
}
]
class DerivedPolicyTest(absltest.TestCase):
def test_derive_from_policy(self):
class DerivedPolicy(pyspiel.Policy):
def action_probabilities(self, state):
return {0: 0.1, 1: 0.9}
def get_state_policy(self, infostate):
return {10: 0.9, 11: 0.1}
policy_obj = DerivedPolicy()
self.assertEqual(DerivedPolicy.__bases__, (pyspiel.Policy,))
self.assertIsInstance(policy_obj, pyspiel.Policy)
self.assertEqual(
{0: 0.1, 1: 0.9},
policy_obj.action_probabilities(
pyspiel.load_game("kuhn_poker").new_initial_state()
),
)
self.assertEqual(
{0: 0.1, 1: 0.9}, policy_obj.action_probabilities("some infostate")
)
self.assertEqual(
{10: 0.9, 11: 0.1}, policy_obj.get_state_policy("some infostate")
)
with self.assertRaises(RuntimeError):
policy_obj.serialize()
def test_cpp_policy_from_py(self):
class DerivedPolicy(pyspiel.Policy):
def action_probabilities(self, state):
return {0: 0.0, 1: 0.0}
def get_state_policy(self, infostate):
return [(2, 0.0), (3, 0.0)]
def get_state_policy_as_parallel_vectors(self, state):
if isinstance(state, str):
return [4, 5], [0, 0]
else:
return [6, 7], [0, 0]
def serialize(self, precision, delim):
return f"Serialized string, {precision=}, {delim=}"
policy_obj = DerivedPolicy()
self.assertEqual(
{0: 0.0, 1: 0.0},
pyspiel._policy_trampoline_testing.call_action_probabilities(
policy_obj, pyspiel.load_game("kuhn_poker").new_initial_state()
),
)
self.assertEqual(
{0: 0.0, 1: 0.0},
pyspiel._policy_trampoline_testing.call_action_probabilities(
policy_obj, "some infostate"),
)
self.assertEqual(
[(2, 0.0), (3, 0.0)],
pyspiel._policy_trampoline_testing.call_get_state_policy(
policy_obj, pyspiel.load_game("kuhn_poker").new_initial_state()
),
)
self.assertEqual(
[(2, 0.0), (3, 0.0)],
pyspiel._policy_trampoline_testing.call_get_state_policy(
policy_obj, "some infostate"),
)
self.assertEqual(
([4, 5], [0, 0]),
pyspiel._policy_trampoline_testing.call_get_state_policy_as_parallel_vectors(
policy_obj, "some infostate"),
)
self.assertEqual(
([6, 7], [0, 0]),
pyspiel._policy_trampoline_testing.call_get_state_policy_as_parallel_vectors(
policy_obj, pyspiel.load_game("kuhn_poker").new_initial_state()
),
)
self.assertEqual(
pyspiel._policy_trampoline_testing.call_serialize(policy_obj, 3, "!?"),
"Serialized string, precision=3, delim='!?'",
)
def test_policy_on_game(self, game, policy_object, player=-1):
"""Checks the policy conforms to the conventions.
Checks the Policy.action_probabilities contains only legal actions (but not
necessarily all).
Checks that the probabilities are positive and sum to 1.
Args:
self: The Test class. This methid targets as being used as a utility
function to test policies.
game: A `pyspiel.Game`, same as the one used in the policy.
policy_object: A `policy.Policy` object on `game`. to test.
player: Restrict testing policy to a player.
"""
all_states = get_all_states.get_all_states(
game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False,
to_string=lambda s: s.information_state_string())
for state in all_states.values():
legal_actions = set(state.legal_actions())
action_probabilities = policy_object.action_probabilities(state)
for action in action_probabilities.keys():
# We want a clearer error message to be able to debug.
actions_missing = set(legal_actions) - set(action_probabilities.keys())
illegal_actions = set(action_probabilities.keys()) - set(legal_actions)
self.assertIn(
action,
legal_actions,
msg="The action {} is present in the policy but is not a legal "
"actions (these are {})\n"
"Legal actions missing from policy: {}\n"
"Illegal actions present in policy: {}".format(
action, legal_actions, actions_missing, illegal_actions))
sum_ = 0
for prob in action_probabilities.values():
sum_ += prob
self.assertGreaterEqual(prob, 0)
if player < 0 or state.current_player() == player:
self.assertAlmostEqual(1, sum_)
else:
self.assertAlmostEqual(0, sum_)
_LEDUC_POKER = pyspiel.load_game("leduc_poker")
class CommonTest(parameterized.TestCase):
@parameterized.parameters([
policy.TabularPolicy(_LEDUC_POKER),
policy.UniformRandomPolicy(_LEDUC_POKER),
policy.FirstActionPolicy(_LEDUC_POKER),
])
def test_policy_on_leduc(self, policy_object):
test_policy_on_game(self, _LEDUC_POKER, policy_object)
@parameterized.named_parameters([
("pyspiel.UniformRandomPolicy",
pyspiel.UniformRandomPolicy(_LEDUC_POKER)),
("pyspiel.GetRandomPolicy",
pyspiel.GetRandomPolicy(_LEDUC_POKER, 1)),
("pyspiel.GetFlatDirichletPolicy",
pyspiel.GetFlatDirichletPolicy(_LEDUC_POKER, 1)),
("pyspiel.GetRandomDeterministicPolicy",
pyspiel.GetRandomDeterministicPolicy(_LEDUC_POKER, 1)),
])
def test_cpp_policies_on_leduc(self, policy_object):
test_policy_on_game(self, _LEDUC_POKER, policy_object)
@parameterized.named_parameters([
("pyspiel.GetRandomPolicy0",
pyspiel.GetRandomPolicy(_LEDUC_POKER, 1, 0), 0),
("pyspiel.GetFlatDirichletPolicy1",
pyspiel.GetFlatDirichletPolicy(_LEDUC_POKER, 1, 1), 1),
("pyspiel.GetRandomDeterministicPolicym1",
pyspiel.GetRandomDeterministicPolicy(_LEDUC_POKER, 1, -1), -1),
])
def test_cpp_player_policies_on_leduc(self, policy_object, player):
test_policy_on_game(self, _LEDUC_POKER, policy_object, player)
class TabularTicTacToePolicyTest(parameterized.TestCase):
# Enumerating all the states for tic tac toe is quite slow, so we do this
# ony once.
@classmethod
def setUpClass(cls):
super(TabularTicTacToePolicyTest, cls).setUpClass()
cls.game = pyspiel.load_game("tic_tac_toe")
cls.tabular_policy = policy.TabularPolicy(cls.game)
def test_policy_shape(self):
# Tic tac toe has 4520 decision states; ref
# https://pubs.acs.org/doi/pdf/10.1021/acs.jcim.5b00324
# There are 9 possible moves in the game (one per grid cell).
# However, the TabularPolicy uses InformationState as keys, which in the
# case of TicTacToe corresponds to the number of unique sequences (due to
# perfect recall) requires by several algorithms, i.e. CFR.
self.assertEqual(self.tabular_policy.action_probability_array.shape,
(294778, 9))
def test_policy_attributes(self):
# Verify the base class attributes of the policy
self.assertEqual(self.tabular_policy.player_ids, [0, 1])
@parameterized.parameters(*_TIC_TAC_TOE_STATES)
def test_policy_at_state(self, state, legal_actions):
index = self.tabular_policy.state_lookup[state]
prob = 1 / len(legal_actions)
np.testing.assert_array_equal(
self.tabular_policy.action_probability_array[index],
[prob if action in legal_actions else 0 for action in range(9)])
@parameterized.parameters(*_TIC_TAC_TOE_STATES)
def test_legal_actions_at_state(self, state, legal_actions):
index = self.tabular_policy.state_lookup[state]
np.testing.assert_array_equal(
self.tabular_policy.legal_actions_mask[index],
[1 if action in legal_actions else 0 for action in range(9)])
def test_call_for_state(self):
state = self.game.new_initial_state()
state.apply_action(3)
state.apply_action(4)
state.apply_action(5)
state.apply_action(6)
state.apply_action(7)
self.assertEqual(
self.tabular_policy.action_probabilities(state), {
0: 0.25,
1: 0.25,
2: 0.25,
8: 0.25
})
def test_states_ordered_by_player(self):
max_player0_index = max(
self.tabular_policy.state_lookup[state]
for state in self.tabular_policy.states_per_player[0])
min_player1_index = min(
self.tabular_policy.state_lookup[state]
for state in self.tabular_policy.states_per_player[1])
self.assertEqual(max_player0_index + 1, min_player1_index)
def test_state_in(self):
# Per state, we have 9 cells each with 3 possible states (o, x, empty)
# Tic tac toe has 4520 decision states, but the tabular policy indexes by
# InformationState, which leads to a larger number due to perfect recall
self.assertEqual(self.tabular_policy.state_in.shape, (294778, 27))
@parameterized.parameters(*_TIC_TAC_TOE_STATES)
def test_policy_for_state_string(self, state, legal_actions):
prob = 1 / len(legal_actions)
np.testing.assert_array_equal(
self.tabular_policy.policy_for_key(state),
[prob if action in legal_actions else 0 for action in range(9)])
class TabularPolicyTest(parameterized.TestCase):
def test_update_elementwise(self):
game = pyspiel.load_game("kuhn_poker")
tabular_policy = policy.TabularPolicy(game)
state = "0pb"
np.testing.assert_array_equal(
tabular_policy.policy_for_key(state), [0.5, 0.5])
tabular_policy.policy_for_key(state)[0] = 0.9
tabular_policy.policy_for_key(state)[1] = 0.1
np.testing.assert_array_equal(
tabular_policy.policy_for_key(state), [0.9, 0.1])
def test_update_slice(self):
game = pyspiel.load_game("kuhn_poker")
tabular_policy = policy.TabularPolicy(game)
state = "2b"
np.testing.assert_array_equal(
tabular_policy.policy_for_key(state), [0.5, 0.5])
tabular_policy.policy_for_key(state)[:] = [0.8, 0.2]
np.testing.assert_array_equal(
tabular_policy.policy_for_key(state), [0.8, 0.2])
def test_state_ordering_is_deterministic(self):
game = pyspiel.load_game("kuhn_poker")
tabular_policy = policy.TabularPolicy(game)
expected = {
"0": 0,
"0pb": 1,
"1": 2,
"1pb": 3,
"2": 4,
"2pb": 5,
"1p": 6,
"1b": 7,
"2p": 8,
"2b": 9,
"0p": 10,
"0b": 11,
}
self.assertEqual(expected, tabular_policy.state_lookup)
def test_partial_tabular_policy_empty_uniform(self):
"""Tests that a partial tabular policy works for an empty policy."""
game = pyspiel.load_game("kuhn_poker")
# python tabular policy is initialized to uniform
python_tabular_policy = policy.TabularPolicy(game)
partial_pyspiel_policy = pyspiel.PartialTabularPolicy()
self.assertNotEmpty(python_tabular_policy.state_lookup)
all_states = get_all_states.get_all_states(game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False,
include_mean_field_states=False)
self.assertNotEmpty(all_states)
for _, state in all_states.items():
tabular_probs = python_tabular_policy.action_probabilities(state)
state_policy = partial_pyspiel_policy.get_state_policy(state)
self.assertLen(state_policy, 2)
for a, p in state_policy:
self.assertAlmostEqual(p, tabular_probs[a])
def test_partial_tabular_policy_set_full(self):
"""Tests the partial tabular policy works for a complete policy."""
game = pyspiel.load_game("kuhn_poker")
# python tabular policy is initialized to uniform
python_tabular_policy = policy.TabularPolicy(game)
partial_pyspiel_policy = pyspiel.PartialTabularPolicy()
self.assertNotEmpty(python_tabular_policy.state_lookup)
all_states = get_all_states.get_all_states(game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False,
include_mean_field_states=False)
self.assertNotEmpty(all_states)
policy_dict = python_tabular_policy.to_dict()
partial_pyspiel_policy = pyspiel.PartialTabularPolicy(policy_dict)
for _, state in all_states.items():
tabular_probs = python_tabular_policy.action_probabilities(state)
state_policy = partial_pyspiel_policy.get_state_policy(state)
self.assertLen(state_policy, 2)
for a, p in state_policy:
self.assertAlmostEqual(p, tabular_probs[a])
def test_partial_tabular_policy_override_fallback(self):
"""Tests the partial tabular policy for a truly partial policy.
Specifically: assigns a full policy, overrides some entries, and
removes others. Checks that the overridden ones return correctly and that
the missing ones return the fallback.
"""
game = pyspiel.load_game("kuhn_poker")
# python tabular policy is initialized to uniform
python_tabular_policy = policy.TabularPolicy(game)
partial_pyspiel_policy = pyspiel.PartialTabularPolicy()
self.assertNotEmpty(python_tabular_policy.state_lookup)
all_states = get_all_states.get_all_states(game,
depth_limit=-1,
include_terminals=False,
include_chance_states=False,
include_mean_field_states=False)
self.assertNotEmpty(all_states)
policy_dict = python_tabular_policy.to_dict()
partial_pyspiel_policy = pyspiel.PartialTabularPolicy(policy_dict)
perturbed_policy_dict = {}
for key in policy_dict:
if np.random.uniform() < 0.5:
perturbed_policy_dict[key] = [(0, 1.0)]
partial_pyspiel_policy = pyspiel.PartialTabularPolicy(perturbed_policy_dict)
for _, state in all_states.items():
infostate_key = state.information_state_string()
state_policy = partial_pyspiel_policy.get_state_policy(state)
if infostate_key in perturbed_policy_dict:
self.assertLen(state_policy, 1)
self.assertAlmostEqual(state_policy[0][1], 1.0)
else:
tabular_probs = python_tabular_policy.action_probabilities(state)
for a, p in state_policy:
self.assertAlmostEqual(p, tabular_probs[a])
def test_states(self):
game = pyspiel.load_game("leduc_poker")
tabular_policy = policy.TabularPolicy(game)
i = 0
for state in tabular_policy.states:
self.assertEqual(i, tabular_policy.state_index(state))
i += 1
self.assertEqual(936, i)
@parameterized.parameters((policy.FirstActionPolicy, "kuhn_poker"),
(policy.UniformRandomPolicy, "kuhn_poker"),
(policy.FirstActionPolicy, "leduc_poker"),
(policy.UniformRandomPolicy, "leduc_poker"))
def test_can_turn_policy_into_tabular_policy(self, policy_class, game_name):
game = pyspiel.load_game(game_name)
realized_policy = policy_class(game)
tabular_policy = realized_policy.to_tabular()
for state in tabular_policy.states:
self.assertEqual(
realized_policy.action_probabilities(state),
tabular_policy.action_probabilities(state))
class TabularRockPaperScissorsPolicyTest(absltest.TestCase):
# Enumerating all the states for rock-paper-scissors is fast, but
# we initialize only once for consistency with slower games.
@classmethod
def setUpClass(cls):
super(TabularRockPaperScissorsPolicyTest, cls).setUpClass()
game = pyspiel.load_game_as_turn_based("matrix_rps")
cls.tabular_policy = policy.TabularPolicy(game)
def test_policy_attributes(self):
# Verify the base class attributes of the policy
self.assertEqual(self.tabular_policy.player_ids, [0, 1])
def test_tabular_policy(self):
# Test that the tabular policy is uniform random in each state.
np.testing.assert_array_equal(
self.tabular_policy.action_probability_array,
[[1 / 3, 1 / 3, 1 / 3], [1 / 3, 1 / 3, 1 / 3]])
def test_states_lookup(self):
# Test that there are two valid states, indexed as 0 and 1.
game = pyspiel.load_game_as_turn_based("matrix_rps")
state = game.new_initial_state()
first_info_state = state.information_state_string()
state.apply_action(state.legal_actions()[0])
second_info_state = state.information_state_string()
self.assertCountEqual(self.tabular_policy.state_lookup,
[first_info_state, second_info_state])
self.assertCountEqual(self.tabular_policy.state_lookup.values(), [0, 1])
def test_legal_actions_mask(self):
# Test that all actions are valid in all states.
np.testing.assert_array_equal(self.tabular_policy.legal_actions_mask,
[[1, 1, 1], [1, 1, 1]])
class UniformRandomPolicyTest(absltest.TestCase):
def test_policy_attributes(self):
game = pyspiel.load_game("tiny_bridge_4p")
uniform_random_policy = policy.UniformRandomPolicy(game)
self.assertEqual(uniform_random_policy.player_ids, [0, 1, 2, 3])
def test_policy_at_state(self):
game = pyspiel.load_game("tic_tac_toe")
uniform_random_policy = policy.UniformRandomPolicy(game)
state = game.new_initial_state()
state.apply_action(2)
state.apply_action(4)
state.apply_action(6)
state.apply_action(8)
self.assertEqual(
uniform_random_policy.action_probabilities(state), {
0: 0.2,
1: 0.2,
3: 0.2,
5: 0.2,
7: 0.2
})
def test_players_have_different_legal_actions(self):
game = pyspiel.load_game("oshi_zumo")
uniform_random_policy = policy.UniformRandomPolicy(game)
state = game.new_initial_state()
state.apply_actions([46, 49])
# Started with 50 coins each, now have 4 and 1 respectively
self.assertEqual(
uniform_random_policy.action_probabilities(state, player_id=0), {
0: 0.2,
1: 0.2,
2: 0.2,
3: 0.2,
4: 0.2
})
self.assertEqual(
uniform_random_policy.action_probabilities(state, player_id=1), {
0: 0.5,
1: 0.5
})
class MergeTabularPoliciesTest(absltest.TestCase):
def test_identity(self):
num_players = 2
game = pyspiel.load_game("kuhn_poker", {"players": num_players})
tabular_policies = [ # Policy limited to player.
policy.TabularPolicy(game, players=(player,))
for player in range(num_players)
]
for player, tabular_policy in enumerate(tabular_policies):
tabular_policy.action_probability_array[:] = 0
tabular_policy.action_probability_array[:, player] = 1.0
merged_tabular_policy = policy.merge_tabular_policies(
tabular_policies, game)
self.assertIdentityPoliciesEqual(tabular_policies, merged_tabular_policy,
game)
def test_identity_redundant(self):
num_players = 2
game = pyspiel.load_game("kuhn_poker", {"players": num_players})
tabular_policies = [ # Policy for all players.
policy.TabularPolicy(game, players=None)
for player in range(num_players)
]
for player, tabular_policy in enumerate(tabular_policies):
tabular_policy.action_probability_array[:] = 0
tabular_policy.action_probability_array[:, player] = 1.0
merged_tabular_policy = policy.merge_tabular_policies(
tabular_policies, game)
self.assertIdentityPoliciesEqual(tabular_policies, merged_tabular_policy,
game)
def test_identity_missing(self):
num_players = 2
game = pyspiel.load_game("kuhn_poker", {"players": num_players})
tabular_policies = [ # Only first player (repeated).
policy.TabularPolicy(game, players=(0,))
for player in range(num_players)
]
for player, tabular_policy in enumerate(tabular_policies):
tabular_policy.action_probability_array[:] = 0
tabular_policy.action_probability_array[:, player] = 1.0
merged_tabular_policy = policy.merge_tabular_policies(
tabular_policies, game)
for player in range(game.num_players()):
if player == 0:
self.assertListEqual(tabular_policies[player].states_per_player[player],
merged_tabular_policy.states_per_player[player])
for p_state in merged_tabular_policy.states_per_player[player]:
to_index = merged_tabular_policy.state_lookup[p_state]
from_index = tabular_policies[player].state_lookup[p_state]
self.assertTrue(
np.allclose(
merged_tabular_policy.action_probability_array[to_index],
tabular_policies[player].action_probability_array[from_index])
)
self.assertTrue(
np.allclose(
merged_tabular_policy.action_probability_array[to_index,
player], 1))
else:
# Missing players have uniform policy.
self.assertEmpty(tabular_policies[player].states_per_player[player])
for p_state in merged_tabular_policy.states_per_player[player]:
to_index = merged_tabular_policy.state_lookup[p_state]
self.assertTrue(
np.allclose(
merged_tabular_policy.action_probability_array[to_index,
player], 0.5))
def assertIdentityPoliciesEqual(self, tabular_policies, merged_tabular_policy,
game):
for player in range(game.num_players()):
self.assertListEqual(tabular_policies[player].states_per_player[player],
merged_tabular_policy.states_per_player[player])
for p_state in merged_tabular_policy.states_per_player[player]:
to_index = merged_tabular_policy.state_lookup[p_state]
from_index = tabular_policies[player].state_lookup[p_state]
self.assertTrue(
np.allclose(
merged_tabular_policy.action_probability_array[to_index],
tabular_policies[player].action_probability_array[from_index]))
self.assertTrue(
np.allclose(
merged_tabular_policy.action_probability_array[to_index,
player], 1))
class JointActionProbTest(absltest.TestCase):
def test_joint_action_probabilities(self):
"""Test expected behavior of joint_action_probabilities."""
game = pyspiel.load_game("python_iterated_prisoners_dilemma")
uniform_policy = policy.UniformRandomPolicy(game)
joint_action_probs = policy.joint_action_probabilities(
game.new_initial_state(), uniform_policy)
self.assertCountEqual(
list(joint_action_probs), [
((0, 0), 0.25),
((1, 1), 0.25),
((1, 0), 0.25),
((0, 1), 0.25),
])
def test_joint_action_probabilities_failure_on_seq_game(self):
"""Test failure of child on sequential games."""
game = pyspiel.load_game("kuhn_poker")
with self.assertRaises(AssertionError):
list(policy.joint_action_probabilities(
game.new_initial_state(), policy.UniformRandomPolicy(game)))
class ChildTest(absltest.TestCase):
def test_child_function_expected_behavior_for_seq_game(self):
"""Test expected behavior of child on sequential games."""
game = pyspiel.load_game("tic_tac_toe")
initial_state = game.new_initial_state()
action = 3
new_state = policy.child(initial_state, action)
self.assertNotEqual(new_state.history(), initial_state.history())
expected_new_state = initial_state.child(action)
self.assertNotEqual(new_state, expected_new_state)
self.assertEqual(new_state.history(), expected_new_state.history())
def test_child_function_expected_behavior_for_sim_game(self):
"""Test expected behavior of child on simultaneous games."""
game = pyspiel.load_game("python_iterated_prisoners_dilemma")
parameter_state = game.new_initial_state()
actions = [1, 1]
new_state = policy.child(parameter_state, actions)
self.assertEqual(str(new_state), ("p0:D p1:D"))
def test_child_function_failure_behavior_for_sim_game(self):
"""Test failure behavior of child on simultaneous games."""
game = pyspiel.load_game("python_iterated_prisoners_dilemma")
parameter_state = game.new_initial_state()
with self.assertRaises(AssertionError):
policy.child(parameter_state, 0)
if __name__ == "__main__":
np.random.seed(SEED)
absltest.main()
| open_spiel-master | open_spiel/python/tests/policy_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the game-specific functions for euchre."""
from absl.testing import absltest
import pyspiel
euchre = pyspiel.euchre
class GamesEuchreTest(absltest.TestCase):
def test_bindings(self):
self.assertEqual(euchre.JACK_RANK, 2)
self.assertEqual(euchre.NUM_SUITS, 4)
self.assertEqual(euchre.NUM_CARDS_PER_SUIT, 6)
self.assertEqual(euchre.NUM_CARDS, 24)
self.assertEqual(euchre.PASS_ACTION, 24)
self.assertEqual(euchre.CLUBS_TRUMP_ACTION, 25)
self.assertEqual(euchre.DIAMONDS_TRUMP_ACTION, 26)
self.assertEqual(euchre.HEARTS_TRUMP_ACTION, 27)
self.assertEqual(euchre.SPADES_TRUMP_ACTION, 28)
self.assertEqual(euchre.GO_ALONE_ACTION, 29)
self.assertEqual(euchre.PLAY_WITH_PARTNER_ACTION, 30)
self.assertEqual(euchre.MAX_BIDS, 8)
self.assertEqual(euchre.NUM_TRICKS, 5)
self.assertEqual(euchre.FULL_HAND_SIZE, 5)
game = pyspiel.load_game('euchre')
state = game.new_initial_state()
self.assertEqual(state.num_cards_dealt(), 0)
self.assertEqual(state.num_cards_played(), 0)
self.assertEqual(state.num_passes(), 0)
self.assertEqual(state.upcard(), pyspiel.INVALID_ACTION)
self.assertEqual(state.discard(), pyspiel.INVALID_ACTION)
self.assertEqual(state.trump_suit(), pyspiel.INVALID_ACTION)
self.assertEqual(state.left_bower(), pyspiel.INVALID_ACTION)
self.assertEqual(state.right_bower(), pyspiel.INVALID_ACTION)
self.assertEqual(state.declarer(), pyspiel.PlayerId.INVALID)
self.assertEqual(state.declarer_partner(), pyspiel.PlayerId.INVALID)
self.assertEqual(state.first_defender(), pyspiel.PlayerId.INVALID)
self.assertEqual(state.second_defender(), pyspiel.PlayerId.INVALID)
self.assertIsNone(state.declarer_go_alone())
self.assertEqual(state.lone_defender(), pyspiel.PlayerId.INVALID)
self.assertEqual(state.active_players(), [True, True, True, True])
self.assertEqual(state.dealer(), pyspiel.INVALID_ACTION)
self.assertEqual(state.current_phase(), euchre.Phase.DEALER_SELECTION)
self.assertEqual(state.current_trick_index(), 0)
self.assertEqual(state.card_holder(), [None] * 24)
self.assertEqual(euchre.card_rank(8), euchre.JACK_RANK)
self.assertEqual(euchre.card_rank(8, euchre.Suit.CLUBS), 100)
self.assertEqual(euchre.card_suit(8), euchre.Suit.CLUBS)
self.assertEqual(euchre.card_suit(8, euchre.Suit.SPADES),
euchre.Suit.SPADES)
self.assertEqual(euchre.card_string(8), 'CJ')
trick = state.tricks()[state.current_trick_index()]
self.assertEqual(trick.winning_card(), pyspiel.INVALID_ACTION)
self.assertEqual(trick.led_suit(), euchre.Suit.INVALID_SUIT)
self.assertEqual(trick.trump_suit(), euchre.Suit.INVALID_SUIT)
self.assertFalse(trick.trump_played())
self.assertEqual(trick.leader(), pyspiel.PlayerId.INVALID)
self.assertEqual(trick.winner(), pyspiel.PlayerId.INVALID)
self.assertEqual(trick.cards(), [pyspiel.INVALID_ACTION])
trick = state.current_trick()
self.assertEqual(trick.led_suit(), euchre.Suit.INVALID_SUIT)
self.assertEqual(trick.trump_suit(), euchre.Suit.INVALID_SUIT)
self.assertFalse(trick.trump_played())
self.assertEqual(trick.leader(), pyspiel.PlayerId.INVALID)
self.assertEqual(trick.winner(), pyspiel.PlayerId.INVALID)
self.assertEqual(trick.cards(), [pyspiel.INVALID_ACTION])
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/tests/games_euchre_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the C++ matrix game utility methods exposed to Python."""
from absl.testing import absltest
from open_spiel.python.algorithms import lp_solver
import pyspiel
class MatrixGamesUtilsTest(absltest.TestCase):
def test_num_deterministic_policies(self):
# Kuhn poker has six information sets with two actions each (2^6 = 64).
game = pyspiel.load_game("kuhn_poker")
self.assertEqual(pyspiel.num_deterministic_policies(game, 0), 64)
self.assertEqual(pyspiel.num_deterministic_policies(game, 1), 64)
# Leduc poker has larger than 2^64 - 1, so -1 will be returned.
game = pyspiel.load_game("leduc_poker")
self.assertEqual(pyspiel.num_deterministic_policies(game, 0), -1)
self.assertEqual(pyspiel.num_deterministic_policies(game, 1), -1)
def test_extensive_to_matrix_game(self):
kuhn_game = pyspiel.load_game("kuhn_poker")
kuhn_matrix_game = pyspiel.extensive_to_matrix_game(kuhn_game)
unused_p0_strategy, unused_p1_strategy, p0_sol_val, p1_sol_val = (
lp_solver.solve_zero_sum_matrix_game(kuhn_matrix_game))
# value from Kuhn 1950 or https://en.wikipedia.org/wiki/Kuhn_poker
self.assertAlmostEqual(p0_sol_val, -1 / 18)
self.assertAlmostEqual(p1_sol_val, +1 / 18)
def test_extensive_to_matrix_game_type(self):
game = pyspiel.extensive_to_matrix_game(pyspiel.load_game("kuhn_poker"))
game_type = game.get_type()
self.assertEqual(game_type.dynamics, pyspiel.GameType.Dynamics.SIMULTANEOUS)
self.assertEqual(game_type.chance_mode,
pyspiel.GameType.ChanceMode.DETERMINISTIC)
self.assertEqual(game_type.information,
pyspiel.GameType.Information.ONE_SHOT)
self.assertEqual(game_type.utility, pyspiel.GameType.Utility.ZERO_SUM)
def test_extensive_to_matrix_game_payoff_matrix(self):
turn_based_game = pyspiel.load_game_as_turn_based("matrix_pd")
matrix_game = pyspiel.extensive_to_matrix_game(turn_based_game)
orig_game = pyspiel.load_matrix_game("matrix_pd")
for row in range(orig_game.num_rows()):
for col in range(orig_game.num_cols()):
for player in range(2):
self.assertEqual(
orig_game.player_utility(player, row, col),
matrix_game.player_utility(player, row, col))
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/tests/matrix_game_utils_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.referee."""
import os
from absl import flags
from absl.testing import absltest
import pyspiel
flags.DEFINE_string("bot_dir",
os.path.dirname(__file__) + "/../bots",
"Path to python implementation of bots.")
FLAGS = flags.FLAGS
class RefereeTest(absltest.TestCase):
def test_playing_tournament(self):
ref = pyspiel.Referee(
"kuhn_poker", [f"python {FLAGS.bot_dir}/higc_random_bot_test.py"] * 2,
settings=pyspiel.TournamentSettings(
timeout_ready=2000, timeout_start=500))
results = ref.play_tournament(num_matches=1)
self.assertLen(results.matches, 1)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/tests/higc_referee_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/tests/__init__.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.pybind11.pyspiel."""
import os
from absl.testing import absltest
from open_spiel.python import games # pylint: disable=unused-import
from open_spiel.python import policy
from open_spiel.python.mfg import games as mfgs # pylint: disable=unused-import
import pyspiel
# Specify game names in alphabetical order, to make the test easier to read.
EXPECTED_GAMES = frozenset([
"2048",
"add_noise",
"amazons",
"backgammon",
"bargaining",
"battleship",
"blackjack",
"blotto",
"breakthrough",
"bridge",
"bridge_uncontested_bidding",
"catch",
"checkers",
"chess",
"cliff_walking",
"clobber",
"coin_game",
"colored_trails",
"connect_four",
"coop_box_pushing",
"coop_to_1p",
"coordinated_mp",
"crazy_eights",
"cursor_go",
"dark_chess",
"dark_hex",
"dark_hex_ir",
"deep_sea",
"dots_and_boxes",
"dou_dizhu",
"efg_game",
"euchre",
"first_sealed_auction",
"gin_rummy",
"go",
"goofspiel",
"havannah",
"hex",
"hearts",
"kriegspiel",
"kuhn_poker",
"laser_tag",
"lewis_signaling",
"leduc_poker",
"liars_dice",
"liars_dice_ir",
"maedn",
"mancala",
"markov_soccer",
"matching_pennies_3p",
"matrix_bos",
"matrix_brps",
"matrix_cd",
"matrix_coordination",
"matrix_mp",
"matrix_pd",
"matrix_rps",
"matrix_rpsw",
"matrix_sh",
"matrix_shapleys_game",
"mean_field_lin_quad",
"mfg_crowd_modelling",
"mfg_crowd_modelling_2d",
"mfg_dynamic_routing",
"mfg_garnet",
"misere",
"morpion_solitaire",
"negotiation",
"nfg_game",
"nim",
"nine_mens_morris",
"normal_form_extensive_game",
"oh_hell",
"oshi_zumo",
"othello",
"oware",
"pentago",
"pathfinding",
"phantom_go",
"phantom_ttt",
"phantom_ttt_ir",
"pig",
"python_block_dominoes",
"python_dynamic_routing",
"python_iterated_prisoners_dilemma",
"python_mfg_crowd_avoidance",
"python_mfg_crowd_modelling",
"python_mfg_dynamic_routing",
"python_mfg_periodic_aversion",
"python_mfg_predator_prey",
"python_kuhn_poker",
"python_tic_tac_toe",
"python_liars_poker",
"quoridor",
"repeated_game",
"rbc",
"restricted_nash_response",
"sheriff",
"skat",
"start_at",
"solitaire",
"stones_and_gems",
"tarok",
"tic_tac_toe",
"tiny_bridge_2p",
"tiny_bridge_4p",
"tiny_hanabi",
"trade_comm",
"turn_based_simultaneous_game",
"ultimate_tic_tac_toe",
"y",
"zerosum",
])
class PyspielTest(absltest.TestCase):
def test_registered_names(self):
game_names = pyspiel.registered_names()
expected = list(EXPECTED_GAMES)
if (os.environ.get("OPEN_SPIEL_BUILD_WITH_HANABI", "OFF") == "ON" and
"hanabi" not in expected):
expected.append("hanabi")
if (os.environ.get("OPEN_SPIEL_BUILD_WITH_ACPC", "OFF") == "ON" and
"universal_poker" not in expected):
expected.append("universal_poker")
expected = sorted(expected)
self.assertCountEqual(game_names, expected)
def teste_default_loadable(self):
# Games which cannmot be loaded with default parameters will be skipped by
# several standard tests. We make a list of such games here in order to make
# implementors think twice about making new games non-default-loadable
non_default_loadable = [
game.short_name
for game in pyspiel.registered_games()
if not game.default_loadable
]
expected = [
# Being non-default-loadable prevents various automated tests.
# Only add games here if there is no sensible default for a parameter.
"add_noise",
"efg_game",
"nfg_game",
"misere",
"turn_based_simultaneous_game",
"normal_form_extensive_game",
"repeated_game",
"restricted_nash_response",
"start_at",
"zerosum",
]
self.assertCountEqual(non_default_loadable, expected)
def test_registered_game_attributes(self):
game_list = {game.short_name: game for game in pyspiel.registered_games()}
self.assertEqual(game_list["kuhn_poker"].dynamics,
pyspiel.GameType.Dynamics.SEQUENTIAL)
self.assertEqual(game_list["kuhn_poker"].chance_mode,
pyspiel.GameType.ChanceMode.EXPLICIT_STOCHASTIC)
self.assertEqual(game_list["kuhn_poker"].information,
pyspiel.GameType.Information.IMPERFECT_INFORMATION)
self.assertEqual(game_list["kuhn_poker"].utility,
pyspiel.GameType.Utility.ZERO_SUM)
self.assertEqual(game_list["kuhn_poker"].min_num_players, 2)
def test_create_game(self):
game = pyspiel.load_game("kuhn_poker")
game_info = game.get_type()
self.assertEqual(game_info.information,
pyspiel.GameType.Information.IMPERFECT_INFORMATION)
self.assertEqual(game.num_players(), 2)
def test_play_kuhn_poker(self):
game = pyspiel.load_game("kuhn_poker")
state = game.new_initial_state()
self.assertEqual(state.is_chance_node(), True)
self.assertEqual(state.chance_outcomes(), [(0, 1 / 3), (1, 1 / 3),
(2, 1 / 3)])
state.apply_action(1)
self.assertEqual(state.is_chance_node(), True)
self.assertEqual(state.chance_outcomes(), [(0, 0.5), (2, 0.5)])
state.apply_action(2)
self.assertEqual(state.is_chance_node(), False)
self.assertEqual(state.legal_actions(), [0, 1])
sampler = pyspiel.UniformProbabilitySampler(0., 1.)
clone = state.resample_from_infostate(1, sampler)
self.assertEqual(
clone.information_state_string(1), state.information_state_string(1))
def test_othello(self):
game = pyspiel.load_game("othello")
state = game.new_initial_state()
self.assertFalse(state.is_chance_node())
self.assertFalse(state.is_terminal())
self.assertEqual(state.legal_actions(), [19, 26, 37, 44])
def test_tic_tac_toe(self):
game = pyspiel.load_game("tic_tac_toe")
state = game.new_initial_state()
self.assertFalse(state.is_chance_node())
self.assertFalse(state.is_terminal())
self.assertEqual(state.legal_actions(), [0, 1, 2, 3, 4, 5, 6, 7, 8])
def test_game_parameters_from_string_empty(self):
self.assertEqual(pyspiel.game_parameters_from_string(""), {})
def test_game_parameters_from_string_simple(self):
self.assertEqual(
pyspiel.game_parameters_from_string("foo"), {"name": "foo"})
def test_game_parameters_from_string_with_options(self):
self.assertEqual(
pyspiel.game_parameters_from_string("foo(x=2,y=true)"), {
"name": "foo",
"x": 2,
"y": True
})
def test_game_parameters_from_string_with_subgame(self):
self.assertEqual(
pyspiel.game_parameters_from_string(
"foo(x=2,y=true,subgame=bar(z=False))"), {
"name": "foo",
"x": 2,
"y": True,
"subgame": {
"name": "bar",
"z": False
}
})
def test_game_parameters_to_string_empty(self):
self.assertEqual(pyspiel.game_parameters_to_string({}), "")
def test_game_parameters_to_string_simple(self):
self.assertEqual(
pyspiel.game_parameters_to_string({"name": "foo"}), "foo()")
def test_game_parameters_to_string_with_options(self):
self.assertEqual(
pyspiel.game_parameters_to_string({
"name": "foo",
"x": 2,
"y": True
}), "foo(x=2,y=True)")
def test_game_parameters_to_string_with_subgame(self):
self.assertEqual(
pyspiel.game_parameters_to_string({
"name": "foo",
"x": 2,
"y": True,
"subgame": {
"name": "bar",
"z": False
}
}), "foo(subgame=bar(z=False),x=2,y=True)")
def test_game_type(self):
game_type = pyspiel.GameType(
"matrix_mp", "Matching Pennies", pyspiel.GameType.Dynamics.SIMULTANEOUS,
pyspiel.GameType.ChanceMode.DETERMINISTIC,
pyspiel.GameType.Information.PERFECT_INFORMATION,
pyspiel.GameType.Utility.ZERO_SUM,
pyspiel.GameType.RewardModel.TERMINAL, 2, 2, True, True, False, False,
dict())
self.assertEqual(game_type.chance_mode,
pyspiel.GameType.ChanceMode.DETERMINISTIC)
def test_error_handling(self):
with self.assertRaisesRegex(RuntimeError,
"Unknown game 'invalid_game_name'"):
unused_game = pyspiel.load_game("invalid_game_name")
def test_can_create_cpp_tabular_policy(self):
for game_name in ["kuhn_poker", "leduc_poker", "liars_dice"]:
game = pyspiel.load_game(game_name)
# We just test that we can create a tabular policy.
policy.python_policy_to_pyspiel_policy(policy.TabularPolicy(game))
def test_simultaneous_game_history(self):
game = pyspiel.load_game("coop_box_pushing")
state = game.new_initial_state()
state.apply_action(0)
state2 = game.new_initial_state()
state2.apply_actions([0] * game.num_players())
self.assertEqual(state.history(), state2.history())
def test_record_batched_trajectories(self):
for game_name in ["kuhn_poker", "leduc_poker", "liars_dice"]:
game = pyspiel.load_game(game_name)
python_policy = policy.TabularPolicy(game)
tabular_policy = policy.python_policy_to_pyspiel_policy(python_policy)
policies = [tabular_policy] * 2
# We test that we can create a batch of trajectories.
seed = 0
batch_size = 128
include_full_observations = False
pyspiel.record_batched_trajectories(game, policies,
python_policy.state_lookup,
batch_size, include_full_observations,
seed, -1)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/tests/pyspiel_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for third_party.open_spiel.python.observation."""
import collections
import random
import time
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import get_all_states
from open_spiel.python.observation import INFO_STATE_OBS_TYPE
from open_spiel.python.observation import make_observation
import pyspiel
class ObservationTest(absltest.TestCase):
def test_leduc_observation(self):
game = pyspiel.load_game("leduc_poker")
observation = make_observation(game)
state = game.new_initial_state()
state.apply_action(1) # Deal 1
state.apply_action(2) # Deal 2
state.apply_action(2) # Bet
state.apply_action(1) # Call
state.apply_action(3) # Deal 3
observation.set_from(state, player=0)
np.testing.assert_array_equal(
observation.tensor, [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 3, 3])
self.assertEqual(
list(observation.dict),
["player", "private_card", "community_card", "pot_contribution"])
np.testing.assert_array_equal(observation.dict["player"], [1, 0])
np.testing.assert_array_equal(observation.dict["private_card"],
[0, 1, 0, 0, 0, 0])
np.testing.assert_array_equal(observation.dict["community_card"],
[0, 0, 0, 1, 0, 0])
np.testing.assert_array_equal(observation.dict["pot_contribution"], [3, 3])
self.assertEqual(
observation.string_from(state, 0),
"[Observer: 0][Private: 1][Round 2][Player: 0][Pot: 6]"
"[Money: 97 97][Public: 3][Ante: 3 3]")
def test_leduc_info_state(self):
game = pyspiel.load_game("leduc_poker")
observation = make_observation(game, INFO_STATE_OBS_TYPE)
state = game.new_initial_state()
state.apply_action(1) # Deal 1
state.apply_action(2) # Deal 2
state.apply_action(2) # Bet
state.apply_action(1) # Call
state.apply_action(3) # Deal 3
observation.set_from(state, player=0)
np.testing.assert_array_equal(observation.tensor, [
1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0
])
self.assertEqual(
list(observation.dict),
["player", "private_card", "community_card", "betting"])
np.testing.assert_array_equal(observation.dict["player"], [1, 0])
np.testing.assert_array_equal(observation.dict["private_card"],
[0, 1, 0, 0, 0, 0])
np.testing.assert_array_equal(observation.dict["community_card"],
[0, 0, 0, 1, 0, 0])
np.testing.assert_array_equal(
observation.dict["betting"],
[
[[0, 1], [1, 0], [0, 0], [0, 0]], # First round
[[0, 0], [0, 0], [0, 0], [0, 0]], # Second round
])
self.assertEqual(
observation.string_from(state, 0),
"[Observer: 0][Private: 1][Round 2][Player: 0][Pot: 6]"
"[Money: 97 97][Public: 3][Round1: 2 1][Round2: ]")
def test_leduc_info_state_as_single_tensor(self):
game = pyspiel.load_game("leduc_poker")
observation = make_observation(
game, INFO_STATE_OBS_TYPE,
pyspiel.game_parameters_from_string("single_tensor"))
state = game.new_initial_state()
state.apply_action(1) # Deal 1
state.apply_action(2) # Deal 2
state.apply_action(2) # Bet
state.apply_action(1) # Call
state.apply_action(3) # Deal 3
observation.set_from(state, player=0)
self.assertEqual(list(observation.dict), ["info_state"])
np.testing.assert_array_equal(observation.dict["info_state"], [
1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0
])
def test_leduc_all_player_privates(self):
game = pyspiel.load_game("leduc_poker")
observation = make_observation(
game,
pyspiel.IIGObservationType(
perfect_recall=True,
private_info=pyspiel.PrivateInfoType.ALL_PLAYERS))
state = game.new_initial_state()
state.apply_action(1) # Deal 1
state.apply_action(2) # Deal 2
state.apply_action(2) # Bet
state.apply_action(1) # Call
state.apply_action(3) # Deal 3
observation.set_from(state, player=0)
np.testing.assert_array_equal(observation.dict["private_cards"], [
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
])
def test_benchmark_state_generation(self):
# Generate trajectories to test on
game = pyspiel.load_game("chess")
trajectories = []
for _ in range(20):
state = game.new_initial_state()
while not state.is_terminal():
state.apply_action(random.choice(state.legal_actions()))
trajectories.append(state.history())
# New API
total = 0
observation = make_observation(game)
start = time.time()
for trajectory in trajectories:
state = game.new_initial_state()
for action in trajectory:
state.apply_action(action)
observation.set_from(state, 0)
total += np.mean(observation.tensor)
end = time.time()
print("New API time per iteration "
f"{1000*(end-start)/len(trajectories)}msec")
# Old API
total = 0
start = time.time()
for trajectory in trajectories:
state = game.new_initial_state()
for action in trajectory:
state.apply_action(action)
obs = state.observation_tensor(0)
tensor = np.asarray(obs)
total += np.mean(tensor)
end = time.time()
print("Old API time per iteration "
f"{1000*(end-start)/len(trajectories)}msec")
def test_compression_binary(self):
# All infostates for leduc are binary, so we can compress them effectively.
game = pyspiel.load_game("leduc_poker")
obs1 = make_observation(game, INFO_STATE_OBS_TYPE)
obs2 = make_observation(game, INFO_STATE_OBS_TYPE)
self.assertLen(obs1.tensor, 30) # 30 floats = 120 bytes
for state in get_all_states.get_all_states(game).values():
for player in range(game.num_players()):
obs1.set_from(state, player)
compressed = obs1.compress()
self.assertEqual(type(compressed), bytes)
self.assertLen(compressed, 5)
obs2.decompress(compressed)
np.testing.assert_array_equal(obs1.tensor, obs2.tensor)
def test_compression_none(self):
# Most observations for leduc have non-binary data, so we can't
# currently compress them.
game = pyspiel.load_game("leduc_poker")
obs1 = make_observation(game)
obs2 = make_observation(game)
self.assertLen(obs1.tensor, 16) # 16 floats = 64 bytes
freq = collections.Counter()
for state in get_all_states.get_all_states(game).values():
for player in range(game.num_players()):
obs1.set_from(state, player)
compressed = obs1.compress()
self.assertEqual(type(compressed), bytes)
freq[len(compressed)] += 1
obs2.decompress(compressed)
np.testing.assert_array_equal(obs1.tensor, obs2.tensor)
expected_freq = {
3: 840, # Compressible states take 3 bytes
65: 17760, # Uncompressible states take 65 bytes
}
self.assertEqual(freq, expected_freq)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/tests/observation_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the game-specific functions for chess."""
from absl.testing import absltest
import numpy as np
import pyspiel
chess = pyspiel.chess
class GamesChessTest(absltest.TestCase):
def test_bindings_sim(self):
game = pyspiel.load_game("chess")
state = game.new_initial_state()
while not state.is_terminal():
print(state)
player = state.current_player()
legal_actions = state.legal_actions()
board = state.board()
for action in legal_actions:
action_str = state.action_to_string(player, action)
move = chess.action_to_move(action, board)
move_from = move.from_square
move_to = move.to_square
decoded_from_to = (f"({move_from.x} {move_from.y}) -> " +
f"({move_to.x} {move_to.y})")
print(f"Legal action: {action_str} decoded from to {decoded_from_to}")
print(f"Move representations: {move.to_string()} | " +
f"{move.to_lan()} | {move.to_san(board)}")
action = np.random.choice(legal_actions)
state.apply_action(action)
print(board.to_unicode_string())
print(board.debug_string())
print("Moves history:")
print(" ".join([move.to_lan() for move in state.moves_history()]))
self.assertTrue(state.is_terminal())
if __name__ == "__main__":
np.random.seed(87375711)
absltest.main()
| open_spiel-master | open_spiel/python/tests/games_chess_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Python bindings for game transforms."""
from absl.testing import absltest
import pyspiel
class RepeatedGameTest(absltest.TestCase):
def test_create_repeated_game(self):
"""Test both create_repeated_game function signatures."""
repeated_game = pyspiel.create_repeated_game("matrix_rps",
{"num_repetitions": 10})
assert repeated_game.utility_sum() == 0
state = repeated_game.new_initial_state()
for _ in range(10):
state.apply_actions([0, 0])
assert state.is_terminal()
stage_game = pyspiel.load_game("matrix_mp")
repeated_game = pyspiel.create_repeated_game(stage_game,
{"num_repetitions": 5})
state = repeated_game.new_initial_state()
for _ in range(5):
state.apply_actions([0, 0])
assert state.is_terminal()
stage_game = pyspiel.load_game("matrix_pd")
repeated_game = pyspiel.create_repeated_game(stage_game,
{"num_repetitions": 5})
assert repeated_game.utility_sum() is None
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/tests/game_transforms_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from absl.testing import absltest
from absl.testing import parameterized
from open_spiel.python import test_utils
import pyspiel
# All games with kSampledStochastic chance mode.
SPIEL_SAMPLED_STOCHASTIC_GAMES_LIST = [
g for g in pyspiel.registered_games() if g.default_loadable and
g.chance_mode == pyspiel.GameType.ChanceMode.SAMPLED_STOCHASTIC
]
assert len(SPIEL_SAMPLED_STOCHASTIC_GAMES_LIST) >= 2
# We only do 2 runs as this is slow.
NUM_RUNS = 2
class SampledStochasticGamesTest(parameterized.TestCase):
@parameterized.parameters(*SPIEL_SAMPLED_STOCHASTIC_GAMES_LIST)
def test_stateful_game_serialization(self, game_info):
game = pyspiel.load_game(game_info.short_name, {"rng_seed": 0})
for seed in range(NUM_RUNS):
# Mutate game's internal RNG state by doing a full playout.
test_utils.random_playout(game.new_initial_state(), seed)
deserialized_game = pickle.loads(pickle.dumps(game))
# Make sure initial states are the same after game deserialization.
state = test_utils.random_playout(game.new_initial_state(), seed)
deserialized_state = test_utils.random_playout(
deserialized_game.new_initial_state(), seed)
self.assertEqual(str(state), str(deserialized_state))
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/tests/sampled_stochastic_games_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the C++ nfg_game methods exposed to Python."""
from absl.testing import absltest
import pyspiel
class NFGGameTest(absltest.TestCase):
def test_pd(self):
pd_nfg_string = ("""NFG 1 R "OpenSpiel export of matrix_pd()"
{ "Player 0" "Player 1" } { 2 2 }
5 5
10 0
0 10
1 1
""")
game = pyspiel.load_nfg_game(pd_nfg_string)
# First (row) player utilities (player, row, col)
self.assertEqual(game.player_utility(0, 0, 0), 5)
self.assertEqual(game.player_utility(0, 1, 0), 10)
self.assertEqual(game.player_utility(0, 0, 1), 0)
self.assertEqual(game.player_utility(0, 1, 1), 1)
# Now, second (column) player
self.assertEqual(game.player_utility(1, 0, 0), 5)
self.assertEqual(game.player_utility(1, 1, 0), 0)
self.assertEqual(game.player_utility(1, 0, 1), 10)
self.assertEqual(game.player_utility(1, 1, 1), 1)
def test_native_export_import(self):
"""Check that we can import games that we've exported.
We do not do any additional checking here, as these methods are already
being extensively tested in nfg_test.cc. The purpose of this test is only
to check that the python wrapping works.
"""
game_strings = [
"matrix_rps", "matrix_shapleys_game", "matrix_pd", "matrix_sh",
"blotto(players=2,coins=5,fields=3)",
"blotto(players=3,coins=5,fields=3)"
]
for game_string in game_strings:
game = pyspiel.load_game(game_string)
nfg_text = pyspiel.game_to_nfg_string(game)
nfg_game = pyspiel.load_nfg_game(nfg_text)
self.assertIsNotNone(nfg_game)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/tests/nfg_game_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.pybind11.pyspiel."""
from absl.testing import absltest
from open_spiel.python import rl_environment
import pyspiel
class RLEnvironmentTest(absltest.TestCase):
def test_create_game(self):
env = rl_environment.Environment("tic_tac_toe")
self.assertEqual(env.is_turn_based, True)
self.assertEqual(env.num_players, 2)
def test_create_game_with_args(self):
env = rl_environment.Environment("kuhn_poker", **{"players": 3})
self.assertEqual(env.is_turn_based, True)
self.assertEqual(env.num_players, 3)
def test_create_env_from_game_instance(self):
game = pyspiel.load_game("tic_tac_toe")
env = rl_environment.Environment(game)
self.assertEqual(env.is_turn_based, True)
self.assertEqual(env.num_players, 2)
def test_reset(self):
env = rl_environment.Environment("kuhn_poker", **{"players": 3})
time_step = env.reset()
self.assertEqual(time_step.observations["current_player"], 0)
self.assertEmpty(time_step.observations["serialized_state"], 0)
self.assertLen(time_step.observations["info_state"], 3)
self.assertLen(time_step.observations["legal_actions"], 3)
self.assertIsNone(time_step.rewards)
self.assertIsNone(time_step.discounts)
self.assertEqual(time_step.step_type.first(), True)
def test_initial_info_state_is_decision_node(self):
env = rl_environment.Environment("kuhn_poker")
time_step = env.reset()
self.assertEqual(time_step.step_type.first(), True)
self.assertEqual(env.is_chance_node, False)
def test_full_game(self):
env = rl_environment.Environment("tic_tac_toe", include_full_state=True)
_ = env.reset()
time_step = env.step([0])
self.assertEqual(time_step.observations["current_player"], 1)
self.assertLen(time_step.observations["info_state"], 2)
self.assertLen(time_step.observations["legal_actions"], 2)
self.assertLen(time_step.rewards, 2)
self.assertLen(time_step.discounts, 2)
self.assertLen(time_step.observations, 4)
# O X O # Moves 0, 1, 2
# X O X # Moves 3, 4, 5
# O . . # Move 6, game over (player 0 wins).
for i in range(1, 7):
self.assertEqual(time_step.step_type.mid(), True)
time_step = env.step([i])
self.assertEqual(time_step.step_type.last(), True)
def test_spec_fields(self):
env = rl_environment.Environment("tic_tac_toe")
env_spec = env.observation_spec()
action_spec = env.action_spec()
ttt_max_actions = 9
ttt_normalized_info_set_shape = (27,)
self.assertEqual(action_spec["num_actions"], ttt_max_actions)
self.assertEqual(env_spec["info_state"], ttt_normalized_info_set_shape)
self.assertCountEqual(
env_spec.keys(),
["current_player", "info_state", "serialized_state", "legal_actions"])
self.assertCountEqual(action_spec.keys(),
["dtype", "max", "min", "num_actions"])
def test_full_game_simultaneous_move(self):
env = rl_environment.Environment("goofspiel")
_ = env.reset()
time_step = env.step([0, 0])
self.assertEqual(time_step.observations["current_player"],
rl_environment.SIMULTANEOUS_PLAYER_ID)
self.assertLen(time_step.observations["info_state"], 2)
self.assertLen(time_step.observations["legal_actions"], 2)
self.assertLen(time_step.rewards, 2)
self.assertLen(time_step.discounts, 2)
self.assertLen(time_step.observations, 4)
actions = [act[0] for act in time_step.observations["legal_actions"]]
time_step = env.step(actions)
self.assertEqual(time_step.step_type.mid(), True)
while not time_step.last():
actions = [act[0] for act in time_step.observations["legal_actions"]]
time_step = env.step(actions)
def test_set_and_get_state(self):
env_ttt1 = rl_environment.Environment("tic_tac_toe")
env_ttt2 = rl_environment.Environment("tic_tac_toe")
env_kuhn1 = rl_environment.Environment("kuhn_poker", players=2)
env_kuhn2 = rl_environment.Environment("kuhn_poker", players=3)
env_ttt1.reset()
env_ttt2.reset()
env_kuhn1.reset()
env_kuhn2.reset()
# Transfering states between identical games should work.
env_ttt1.set_state(env_ttt2.get_state)
env_ttt2.set_state(env_ttt1.get_state)
# Transfering states between different games or games with different
# parameters should fail.
with self.assertRaises(AssertionError):
self.fail(env_ttt1.set_state(env_kuhn1.get_state))
with self.assertRaises(AssertionError):
self.fail(env_kuhn1.set_state(env_ttt1.get_state))
with self.assertRaises(AssertionError):
self.fail(env_kuhn1.set_state(env_kuhn2.get_state))
with self.assertRaises(AssertionError):
self.fail(env_kuhn2.set_state(env_kuhn1.get_state))
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/tests/rl_environment_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the game-specific functions for bridge."""
import random
import timeit
from absl.testing import absltest
import numpy as np
import pyspiel
class GamesBridgeTest(absltest.TestCase):
def test_contract_names(self):
game = pyspiel.load_game('bridge(use_double_dummy_result=false)')
self.assertEqual(game.contract_string(0), 'Passed Out')
self.assertEqual(game.contract_string(38), '1SX N')
def test_possible_contracts(self):
game = pyspiel.load_game('bridge(use_double_dummy_result=false)')
state = game.new_initial_state()
for a in range(52):
state.apply_action(a)
state.apply_action(59) # 1NT - now South cannot declare notrump
state.apply_action(67) # 3H - now West cannot declare hearts
state.apply_action(86) # 7D
state.apply_action(53) # Dbl
possible_contracts = [
game.contract_string(i)
for i, v in enumerate(state.possible_contracts())
if v
]
self.assertCountEqual(possible_contracts, [
'7DX S', '7DXX S', '7H N', '7HX N', '7HXX N', '7H E', '7HX E', '7HXX E',
'7H S', '7HX S', '7HXX S', '7S N', '7SX N', '7SXX N', '7S E', '7SX E',
'7SXX E', '7S S', '7SX S', '7SXX S', '7S W', '7SX W', '7SXX W', '7N N',
'7NX N', '7NXX N', '7N E', '7NX E', '7NXX E', '7N W', '7NX W', '7NXX W'
])
def test_scoring(self):
game = pyspiel.load_game('bridge')
state = game.new_initial_state()
# S J9873
# H A7
# D KT74
# C KT
# S AKQT S 42
# H T852 H K63
# D AQ D 52
# C Q64 C A98732
# S 65
# H QJ94
# D J9863
# C J5
for a in [
7, 28, 37, 2, 45, 3, 25, 51, 27, 48, 5, 43, 23, 13, 12, 8, 22, 46, 38,
26, 9, 20, 36, 34, 32, 11, 29, 35, 44, 1, 10, 14, 39, 4, 19, 40, 50, 6,
17, 41, 33, 0, 42, 16, 21, 18, 30, 49, 31, 24, 15, 47
]:
state.apply_action(a)
score = {
game.contract_string(i): s
for i, s in enumerate(state.score_by_contract())
}
self.assertEqual(score['3N E'], 100)
self.assertEqual(score['3N W'], -460)
self.assertEqual(score['1N W'], -210)
self.assertEqual(score['3DX S'], -100)
self.assertEqual(score['1CXX E'], -830)
self.assertEqual(score['1CXX W'], -1030)
def test_score_single_contract(self):
game = pyspiel.load_game('bridge(use_double_dummy_result=false)')
state = game.new_initial_state()
# S T3
# H QT42
# D A82
# C A632
# S KJ5 S Q7
# H A965 H KJ8
# D Q43 D KJT5
# C T87 C Q954
# S A98642
# H 73
# D 976
# C KJ
for a in [
49, 45, 31, 5, 10, 40, 27, 47, 35, 38, 17, 14, 0, 33, 21, 39, 34, 12,
22, 41, 1, 13, 36, 9, 4, 46, 11, 32, 2, 37, 29, 30, 7, 8, 19, 24, 16,
43, 51, 15, 48, 23, 6, 20, 42, 26, 44, 50, 25, 28, 3, 18
]:
state.apply_action(a)
cid = {
game.contract_string(i): i for i in range(game.num_possible_contracts())
}
self.assertEqual(state.score_for_contracts(0, [cid['1H E']]), [-110])
self.assertEqual(
state.score_for_contracts(1, [cid['1H E'], cid['1H W']]), [110, 80])
self.assertEqual(
state.score_for_contracts(2, [cid['1H E'], cid['2H E'], cid['3H E']]),
[-110, -110, 50])
self.assertEqual(
state.score_for_contracts(3, [cid['1H W'], cid['3N W']]), [80, -50])
self.assertEqual(state.score_for_contracts(0, [cid['1DX N']]), [-300])
self.assertEqual(state.score_for_contracts(1, [cid['1CXX W']]), [430])
def test_benchmark_score_single(self):
game = pyspiel.load_game('bridge(use_double_dummy_result=false)')
state = game.new_initial_state()
for a in [
49, 45, 31, 5, 10, 40, 27, 47, 35, 38, 17, 14, 0, 33, 21, 39, 34, 12,
22, 41, 1, 13, 36, 9, 4, 46, 11, 32, 2, 37, 29, 30, 7, 8, 19, 24, 16,
43, 51, 15, 48, 23, 6, 20, 42, 26, 44, 50, 25, 28, 3, 18
]:
state.apply_action(a)
cid = {
game.contract_string(i): i for i in range(game.num_possible_contracts())
}
for contracts in (
['1H E'],
['1H E', '1H W'],
['1H E', '2H E', '3H E'],
['1H E', '1CXX W'],
list(cid),
):
cids = [cid[c] for c in contracts]
def benchmark(cids=cids):
working_state = state.clone()
_ = working_state.score_for_contracts(0, cids)
repeat = 1
times = np.array(timeit.repeat(benchmark, number=1, repeat=repeat))
print(f'{contracts} mean {times.mean():.4}s, min {times.min():.4}s')
def test_public_observation(self):
game = pyspiel.load_game('bridge(use_double_dummy_result=false)')
state = game.new_initial_state()
for a in range(52):
state.apply_action(a)
state.apply_action(52) # Pass
state.apply_action(59) # 1NT
obs = state.public_observation_tensor()
self.assertLen(obs, game.public_observation_tensor_size())
def test_private_observation(self):
game = pyspiel.load_game('bridge(use_double_dummy_result=false)')
state = game.new_initial_state()
# S T3
# H QT42
# D A82
# C A632
# S KJ5 S Q7
# H A965 H KJ8
# D Q43 D KJT5
# C T87 C Q954
# S A98642
# H 73
# D 976
# C KJ
for a in [
49, 45, 31, 5, 10, 40, 27, 47, 35, 38, 17, 14, 0, 33, 21, 39, 34, 12,
22, 41, 1, 13, 36, 9, 4, 46, 11, 32, 2, 37, 29, 30, 7, 8, 19, 24, 16,
43, 51, 15, 48, 23, 6, 20, 42, 26, 44, 50, 25, 28, 3, 18
]:
state.apply_action(a)
obs = state.private_observation_tensor(0)
self.assertLen(obs, game.private_observation_tensor_size())
self.assertEqual(obs, [
1.0, 1.0, 1.0, 0.0, # C2, D2, H2
1.0, 0.0, 0.0, 1.0, # C3, S3
0.0, 0.0, 1.0, 0.0, # H4
0.0, 0.0, 0.0, 0.0, # No 5s
1.0, 0.0, 0.0, 0.0, # C6
0.0, 0.0, 0.0, 0.0, # No 7s
0.0, 1.0, 0.0, 0.0, # D8
0.0, 0.0, 0.0, 0.0, # No 9s
0.0, 0.0, 1.0, 1.0, # H10, S10
0.0, 0.0, 0.0, 0.0, # No Jacks
0.0, 0.0, 1.0, 0.0, # HQ
0.0, 0.0, 0.0, 0.0, # No kings
1.0, 1.0, 0.0, 0.0 # CA, DA
])
def test_benchmark_observation(self):
game = pyspiel.load_game('bridge(use_double_dummy_result=false)')
def make_state():
state = game.new_initial_state()
for _ in range(60):
a = random.choice(state.legal_actions())
state.apply_action(a)
if state.is_terminal(): break
return state
batch_size = 16
obs_shape = [batch_size] + game.observation_tensor_shape()
states = [make_state() for _ in range(batch_size)]
def make_obs_copy():
inputs = np.zeros(obs_shape)
for i in range(batch_size):
inputs[i, :] = states[i].observation_tensor()
return inputs
def make_obs_inplace():
inputs = np.zeros(obs_shape, np.float32)
for i in range(batch_size):
states[i].write_observation_tensor(inputs[i])
return inputs
repeat = 2
number = 2
times = np.array(timeit.repeat(make_obs_copy, number=number, repeat=repeat))
print(f'OpenSpiel {times.mean():.4}s, min {times.min():.4}s')
times = np.array(
timeit.repeat(make_obs_inplace, number=number, repeat=repeat))
print(f'In-place {times.mean():.4}s, min {times.min():.4}s')
np.testing.assert_array_equal(make_obs_copy(), make_obs_inplace())
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/tests/games_bridge_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the game-specific functions for gin rummy."""
from absl.testing import absltest
import pyspiel
gin_rummy = pyspiel.gin_rummy
class GamesGinRummyTest(absltest.TestCase):
def test_bindings(self):
# gin_rummy submodule attributes
self.assertEqual(gin_rummy.DEFAULT_NUM_RANKS, 13)
self.assertEqual(gin_rummy.DEFAULT_NUM_SUITS, 4)
self.assertEqual(gin_rummy.DEFAULT_NUM_CARDS, 52)
self.assertEqual(gin_rummy.NUM_PLAYERS, 2)
self.assertEqual(gin_rummy.MAX_POSSIBLE_DEADWOOD, 98)
self.assertEqual(gin_rummy.MAX_NUM_DRAW_UPCARD_ACTIONS, 50)
self.assertEqual(gin_rummy.DEFAULT_HAND_SIZE, 10)
self.assertEqual(gin_rummy.WALL_STOCK_SIZE, 2)
self.assertEqual(gin_rummy.DEFAULT_KNOCK_CARD, 10)
self.assertEqual(gin_rummy.DEFAULT_GIN_BONUS, 25)
self.assertEqual(gin_rummy.DEFAULT_UNDERCUT_BONUS, 25)
self.assertEqual(gin_rummy.DRAW_UPCARD_ACTION, 52)
self.assertEqual(gin_rummy.DRAW_STOCK_ACTION, 53)
self.assertEqual(gin_rummy.PASS_ACTION, 54)
self.assertEqual(gin_rummy.KNOCK_ACTION, 55)
self.assertEqual(gin_rummy.MELD_ACTION_BASE, 56)
self.assertEqual(gin_rummy.NUM_MELD_ACTIONS, 185)
self.assertEqual(gin_rummy.NUM_DISTINCT_ACTIONS, 241)
self.assertEqual(gin_rummy.OBSERVATION_TENSOR_SIZE, 644)
# Game bindings
game = pyspiel.load_game('gin_rummy')
self.assertFalse(game.oklahoma())
self.assertEqual(game.knock_card(), 10)
# State bindings
state = game.new_initial_state()
self.assertEqual(state.current_phase(), gin_rummy.Phase.DEAL)
self.assertEqual(state.current_player(), pyspiel.PlayerId.CHANCE)
self.assertIsNone(state.upcard())
self.assertEqual(state.stock_size(), 52)
self.assertEqual(state.hands(), [[], []])
self.assertEqual(state.discard_pile(), [])
self.assertEqual(state.deadwood(), [0, 0])
self.assertEqual(state.knocked(), [False, False])
self.assertEqual(state.pass_on_first_upcard(), [False, False])
self.assertEqual(state.layed_melds(), [[], []])
self.assertEqual(state.layoffs(), [])
self.assertFalse(state.finished_layoffs())
# Utils
utils = gin_rummy.GinRummyUtils(gin_rummy.DEFAULT_NUM_RANKS,
gin_rummy.DEFAULT_NUM_SUITS,
gin_rummy.DEFAULT_HAND_SIZE)
self.assertEqual(utils.card_string(0), 'As')
self.assertEqual(utils.hand_to_string([0, 1, 2]),
'+--------------------------+\n'
'|As2s3s |\n'
'| |\n'
'| |\n'
'| |\n'
'+--------------------------+\n')
self.assertEqual(utils.card_int('As'), 0)
self.assertEqual(utils.card_ints_to_card_strings([0, 1, 2]),
['As', '2s', '3s'])
self.assertEqual(utils.card_strings_to_card_ints(['As', '2s', '3s']),
[0, 1, 2])
self.assertEqual(utils.card_value(0), 1)
self.assertEqual(utils.total_card_value([50, 51]), 20)
self.assertEqual(utils.total_card_value([[0, 1], [50, 51]]), 23)
self.assertEqual(utils.card_rank(51), 12)
self.assertEqual(utils.card_suit(51), 3)
self.assertTrue(utils.is_consecutive([0, 1, 2]))
self.assertTrue(utils.is_rank_meld([0, 13, 26]))
self.assertTrue(utils.is_suit_meld([0, 1, 2]))
self.assertEqual(utils.rank_melds([0, 1, 13, 26]), [[0, 13, 26]])
self.assertEqual(utils.suit_melds([0, 5, 6, 7]), [[5, 6, 7]])
self.assertEqual(utils.all_melds([0, 5, 6, 7, 13, 26]),
[[0, 13, 26], [5, 6, 7]])
self.assertEqual(utils.all_meld_groups([0, 5, 6, 7, 13, 26]),
[[[0, 13, 26], [5, 6, 7]], [[5, 6, 7], [0, 13, 26]]])
self.assertEqual(utils.best_meld_group([0, 5, 6, 7, 13, 26]),
[[0, 13, 26], [5, 6, 7]])
self.assertEqual(utils.min_deadwood([0, 1, 2], 3), 0)
self.assertEqual(utils.min_deadwood([0, 1, 2]), 0)
self.assertEqual(utils.rank_meld_layoff([0, 13, 26]), 39)
self.assertEqual(utils.suit_meld_layoffs([0, 1, 2]), [3])
self.assertEqual(utils.legal_melds([0, 1, 2, 3], 10), [65, 66, 109])
self.assertEqual(utils.legal_discards([0, 1, 2], 10), [0, 1, 2])
self.assertEqual(utils.all_layoffs([65], [3]), [4])
self.assertEqual(utils.meld_to_int([0, 1, 2]), 65)
self.assertEqual(utils.int_to_meld[65], [0, 1, 2])
if __name__ == '__main__':
absltest.main()
| open_spiel-master | open_spiel/python/tests/games_gin_rummy_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the C++ nfg_writer methods exposed to Python."""
from absl.testing import absltest
import pyspiel
class NFGWriterTest(absltest.TestCase):
def test_rps(self):
expected_rps_nfg = ("""NFG 1 R "OpenSpiel export of matrix_rps()"
{ "Player 0" "Player 1" } { 3 3 }
0 0
1 -1
-1 1
-1 1
0 0
1 -1
1 -1
-1 1
0 0
""")
game = pyspiel.load_game("matrix_rps")
nfg_text = pyspiel.game_to_nfg_string(game)
self.assertEqual(nfg_text, expected_rps_nfg)
def test_pd(self):
expected_pd_nfg = ("""NFG 1 R "OpenSpiel export of matrix_pd()"
{ "Player 0" "Player 1" } { 2 2 }
5 5
10 0
0 10
1 1
""")
game = pyspiel.load_game("matrix_pd")
nfg_text = pyspiel.game_to_nfg_string(game)
self.assertEqual(nfg_text, expected_pd_nfg)
def test_mp3p(self):
expected_mp3p_nfg = ("""NFG 1 R "OpenSpiel export of matching_pennies_3p()"
{ "Player 0" "Player 1" "Player 2" } { 2 2 2 }
1 1 -1
-1 1 1
-1 -1 -1
1 -1 1
1 -1 1
-1 -1 -1
-1 1 1
1 1 -1
""")
game = pyspiel.load_game("matching_pennies_3p")
nfg_text = pyspiel.game_to_nfg_string(game)
self.assertEqual(nfg_text, expected_mp3p_nfg)
if __name__ == "__main__":
absltest.main()
| open_spiel-master | open_spiel/python/tests/nfg_writer_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| open_spiel-master | open_spiel/python/tests/mfg_implementation_test/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.